Merge branch 'master' of github.com:thomasabishop/computer_science

This commit is contained in:
thomasabishop 2022-08-01 10:21:50 +01:00
commit b48ebc345e
19 changed files with 401 additions and 77 deletions

1
.obsidian/app.json vendored Normal file
View file

@ -0,0 +1 @@
{}

1
.obsidian/appearance.json vendored Normal file
View file

@ -0,0 +1 @@
{}

19
.obsidian/core-plugins.json vendored Normal file
View file

@ -0,0 +1,19 @@
[
"file-explorer",
"global-search",
"switcher",
"graph",
"backlink",
"outgoing-link",
"tag-pane",
"page-preview",
"daily-notes",
"templates",
"note-composer",
"command-palette",
"editor-status",
"starred",
"outline",
"word-count",
"file-recovery"
]

22
.obsidian/graph.json vendored Normal file
View file

@ -0,0 +1,22 @@
{
"collapse-filter": true,
"search": "",
"showTags": false,
"showAttachments": false,
"hideUnresolved": false,
"showOrphans": true,
"collapse-color-groups": true,
"colorGroups": [],
"collapse-display": true,
"showArrow": false,
"textFadeMultiplier": 0,
"nodeSizeMultiplier": 1,
"lineSizeMultiplier": 1,
"collapse-forces": true,
"centerStrength": 0.518713248970312,
"repelStrength": 10,
"linkStrength": 1,
"linkDistance": 250,
"scale": 0.11660477277966168,
"close": false
}

1
.obsidian/hotkeys.json vendored Normal file
View file

@ -0,0 +1 @@
{}

139
.obsidian/workspace vendored Normal file
View file

@ -0,0 +1,139 @@
{
"main": {
"id": "26c1d5043e97a250",
"type": "split",
"children": [
{
"id": "3a07771d4c32b983",
"type": "leaf",
"state": {
"type": "markdown",
"state": {
"file": "Logic/Disjunction_Elimination.md",
"mode": "preview",
"source": false
}
}
}
],
"direction": "vertical"
},
"left": {
"id": "2e2d413b07672e46",
"type": "split",
"children": [
{
"id": "e8e9316b59517052",
"type": "tabs",
"children": [
{
"id": "f9904be6e8b54776",
"type": "leaf",
"state": {
"type": "file-explorer",
"state": {}
}
},
{
"id": "6ce6f22853a14337",
"type": "leaf",
"state": {
"type": "search",
"state": {
"query": "tag:#publication",
"matchingCase": false,
"explainSearch": false,
"collapseAll": false,
"extraContext": false,
"sortOrder": "alphabetical"
}
}
},
{
"id": "087697dc2054be3f",
"type": "leaf",
"state": {
"type": "starred",
"state": {}
}
}
],
"currentTab": 1
}
],
"direction": "horizontal",
"width": 300
},
"right": {
"id": "ec55042120b08d99",
"type": "split",
"children": [
{
"id": "afc0c1bab3e7a1d4",
"type": "tabs",
"children": [
{
"id": "0dfdc7a995d20deb",
"type": "leaf",
"state": {
"type": "backlink",
"state": {
"file": "Logic/Disjunction_Elimination.md",
"collapseAll": false,
"extraContext": false,
"sortOrder": "alphabetical",
"showSearch": false,
"searchQuery": "",
"backlinkCollapsed": false,
"unlinkedCollapsed": true
}
}
},
{
"id": "502a657839489cd7",
"type": "leaf",
"state": {
"type": "outgoing-link",
"state": {
"file": "Logic/Disjunction_Elimination.md",
"linksCollapsed": false,
"unlinkedCollapsed": true
}
}
},
{
"id": "f769a309ad6de62c",
"type": "leaf",
"state": {
"type": "tag",
"state": {
"sortOrder": "frequency",
"useHierarchy": true
}
}
},
{
"id": "bf315f300ad73f26",
"type": "leaf",
"state": {
"type": "outline",
"state": {
"file": "Logic/Disjunction_Elimination.md"
}
}
}
],
"currentTab": 2
}
],
"direction": "horizontal",
"width": 300
},
"active": "6ce6f22853a14337",
"lastOpenFiles": [
"Logic/Biconditional_Introduction.md",
"Logic/Conditional_Introduction.md",
"Algorithms/Algorithmic_complexity.md",
"Linguistics/Design_features_of_language.md"
]
}

View file

@ -5,7 +5,7 @@ tags:
- von-neumann
---
# Von Neumann CPU architecture
# CPU architecture
At the core of a computer sits the Central Processing Unit. This is what manages and executes all computation.

View file

@ -4,7 +4,7 @@ tags:
- Linux
---
# Basic model of a (*nix) operating system
# Basic model of a *nix operating system
We can abstract the Linux OS into three operational levels or tiers, from the bottom up:

View file

@ -17,17 +17,37 @@ The boot loader loads the kernel into memory from the disk and then starts the k
Like the kernel itself, the boot loader requires a driver in order to access the disk but it can't use the same one as the kernel since at this point, the kernel is not yet loaded into memory. So it has its own special driver: this is either the BIOS or the UEFI firmware.
> Here is something important
A boot loader's core functionality includes the ability to do the following:
* select from multiple kernels
* switch between sets of kernel parameters
* provide support for booting other operating systems
### BIOS and UEFI
BIOS and UEFI are both firmware that is installed directly on the motherboard of the computer. They are firmware because they are software that is permanent and programmed into read-only memory.
In the context of disks, their most crucial role is locating the operating system on the harddisk and loading it into memory so that the bootstrapping process can begin. However they are also responsible for the computer clock and the management of peripherals.
As we can see from the `fdisk` readout, the boot partition uses EFI, the storage partition associated with UEFI.
```bash
Device Start End Sectors Size Type
/dev/nvme0n1p1 2048 1001471 999424 488M EFI System
/dev/nvme0n1p2 1001472 59594751 58593280 27.9G Linux filesystem
/dev/nvme0n1p3 59594752 1000214527 940619776 448.5G Linux filesystem
```
Whilst UEFI is installed on the hardware, most of its configuration is stored in the EFI partition on the disk, whereas with BIOS, everything is on the chip. This make booting faster with UEFI.
Even though most modern computers use UEFI, it may still be referred to as BIOS for user-continuity. This is like on Windows. With Linux you have to explicitly create your boot process so the two are clearly distinguishable.
### GRUB
The de facto standard boot loader for Linux is GRUB: Grand Unified Boot Loader.
![](/img/grub.jpg)
You see the GRUB default menu when you first start a Linux machine. It will offer you various options for loading your installed OS or other OSs. GRUB is a filesystem like the main disk. If you press `e` in this screen you can view and edit specific boot parameters. Pressing `c` gives you access to the GRUB command line interface. This allows you to interact with GRUB in the same way as you would with any other filesystem, allowing for advanced configuration.
## The boot sequence

View file

@ -0,0 +1,82 @@
---
tags:
- Linux
- Operating_Systems
- disks
- disk-partition
- filesystems
---
# Filesystems
We cannot yet mount or interact with the partitions we have created. This is because we have not added a filesystem to each partition.
> A filesytem is a form of [database](/Databases/Basic_database_concepts.md); it supplies the structure to transform a simple block device into the sophisticated hierarchy of files and subdirectories that users can understand.
Linux recognises many types of filesystems. The native Linux filesystem is the **ext4** (Fourth Extended Filesystem). Another common filesystem is **FAT** (File Allocation Table). Instances of this include _MSDOS_,_EXFAT_,_FAT-32_. They originate from Microsoft systems
## Creating a filesystem
Remember we have two partitions on our external drive: `sda1` and `sda2`. We are going to use the `mkfs` utility to create an EXT4 system on both.
```bash
mkfs -t ext4 /dev/sda1
mkfs -t ext4 /dev/sda2
```
## Mounting a filesystem
We can now mount our filesystems. Whem we mount, we must specify the following criteria with the request:
* The name of the device we want to mount.
* This will be the name or the partition. However the names (`sda` etc) assigned by the OS can change. In these cases and with GPT-based partitions you can use the UUID.
* To see a list of devices and the corresponding filesystems and UUIDs on your system, you can use the **`blkid`** ('block id') program.
```
/dev/nvme0n1p3: UUID="c53577b5-92ef-4a0a-9a19-e488bfdfa39c" BLOCK_SIZE="4096" TYPE="ext4" PARTUUID="e152b9f4-7ce8-e74b-94db-2731c6fce53d"
/dev/nvme0n1p1: UUID="9920-636A" BLOCK_SIZE="512" TYPE="vfat" PARTUUID="50592521-d386-194a-a362-bc8562ed6c82"
/dev/nvme0n1p2: UUID="2ee6b834-0857-49dc-b8ba-a24d46d228ae" BLOCK_SIZE="4096" TYPE="ext4" PARTUUID="e08cc442-ef51-7b4f-9d55-e236c55c933c"
/dev/sda2: UUID="abac6e2e-e3bf-40d3-a5ba-317c53eb27ce" BLOCK_SIZE="4096" TYPE="ext4" PARTUUID="4ef1b0e8-3d5b-c940-a3b1-0f85cddeca42"
/dev/sda1: UUID="ba1e40c5-9b29-4309-a559-99bf8f68116f" BLOCK_SIZE="4096" TYPE="ext4" PARTUUID="b4983358-6036-df40-a1f8-793976f3dfb1"
```
* The filesystem type (optional)
* The **mount point**
* This is the place within the existing filesystem where you want to mount the partition.
* When you mount to a directory, this directory _becomes_ the disk you have mounted, you will not see it as a subdirectory within the the mount point, you will just see the contents of the disk itself
```bash
mkdir mountpoint
mount -t ext4 /dev/sda1 /mnt
touch test.txt
```
Our `sda1` partition is now mounted at `mountpoint`. We can go ahead and create files. If we now look within the graphical file manager when we click on the `sda1` volume, we will see the new file we have created in `mountpoint`.
![](/img/mount-directory.png)
## fstab
In most cases you want your filesystem to mount automatically on boot and always to the same mount point. You can do this via the specialised `fstab` file on Linux systems within the `/etc/` directory.
This is my current `fstab`:
```
# <file system> <dir> <type> <options> <dump> <pass>
# /dev/nvme0n1p2
UUID=2ee6b834-0857-49dc-b8ba-a24d46d228ae / ext4 rw,relatime 0 1
# /dev/nvme0n1p3
UUID=c53577b5-92ef-4a0a-9a19-e488bfdfa39c /home ext4 rw,relatime 0 2
/swapfile none swap sw 0 0
```
It shows my root and home filesystems and my [swap](/Operating_Systems/Disks/Swap_space.md) file. Note that we use the UUID to name the partition rather than its name in `/dev/`. The order of the parameters is as follows:
- Device name or UUID
- The mount point
- The filesystem type (in the example there are two types (`ext4` and `swap`))
- Options
- Now largely irrelevant backup information, set to )
- Filesystem integrity test order
- Runs the `fsck` ('filesystem check') command against each filesystem
- Put `1` against the root partition for this to be checked first
- Put `0` for no checks to take place

View file

@ -3,29 +3,17 @@ tags:
- Linux
- Operating_Systems
- disks
- harddisk
- devices
- disk-partions
---
# Disks
# Disk partitions
A disk is a mass storage [device](./Devices.md) which we can write to and read from.
A disk is divided up into [partitions](/Operating_Systems/Disks/Partitions.md) which are subsections of the overall disk. The kernel presents each partition as a [block device](/Operating_Systems/Devices.md#Devices.md) as it would with an entire disk.
## SCSI
* Small Computer System Interface, responsible for handling disk access on most Linux systems.
* It is a protocol that allows communicaton between printers, scanners and other peripherals in addition to harddisks.
## Disk schematic
The following diagram represents the basic anatomy of a disk device.
The disk dedicates a small part of its contents to a **partition table**: this defines the different partitions that comprise the total disk space.
![](/img/harddisk.png)
* A disk is divided up into **partitions** which are subsections of the overall disk. The kernel presents each partition as a [block device](./Devices.md#Devices.md) as it would with an entire disk.
* The disk dedicates a small part of its contents to a **partition table**: this defines the different partitions that comprise the total disk space.
* The **filesystem** is a database of files and directories: this comprises the bulk of the partition and is what you interact with in [user space](./User_Space.md) when reading and writing data.
## Disk partitions
### Viewing current partitions
## Viewing current partitions
Whenever you install a Linux distribution on a real or virtual machine, you must partition the drive. There are three main tools to choose from: `parted`, `g(raphical)parted`, `fdisk`.
For a top-level overview of your disks and their main partitions you can run `lsblk` (_list block devices_):
@ -79,16 +67,16 @@ The two tools disclose that the main harddrive is `/dev/nvme0n1` (equivalent to
* Boot partition (`/dev/nvme0n1p1`)
* This takes up the smallest amount of space and exists in order to bootstrap the operating system: to load the kernel into memory when the machine starts. This is where your bootloader is stored and that will be accessed by the BIOS. In Linux this will be GRUB.
* Root dir (`/dev/nvme0n1p2`)
* This is the domain of the [superuser](./User_Space.md#root-user-superuser). The part of the filesystem that you need sudo priveleges to access and where you manage users
* This is the domain of the [superuser](/Operating_Systems/User_Space.md#root-user-superuser). The part of the filesystem that you need sudo priveleges to access and where you manage users
* Home dir (`/dev/nvme0n1p3`)
* The domain of the user(s)
### Types of partition table
## Types of partition table
In the Linux world there are two main types: MBR and GPT. The type of table used determines how the OS boots. So although partition tables are also responsible for the partitioning of non-bootable sectors of a disk, **they are distinguished by the boot system they implement**.
If we look at the output from `parted` and `fdisk` above we see that the harddrive uses the GPT partition type.
#### Primary, extended and logical partitions
Most standard partition tables allow for primary, extended and logical partitions. The primary partition is the part of the harddisk that contains the operating system and is thus described as 'bootable' and may be called the 'boot partition'. During the bootstrapping process this is injected into memory as the [kernel](The_Kernel.md).
Most standard partition tables allow for primary, extended and logical partitions. The primary partition is the part of the harddisk that contains the operating system and is thus described as 'bootable' and may be called the 'boot partition'. During the bootstrapping process this is injected into memory as the [kernel](/Operating_Systems/The_Kernel.md).
The extended partition is basically everything other than the primary partition. This is typically subdivided into other partitions that are called *logical* partitions. This is because they physically reside in the same sector of the disk (the extended partition) but are treated as virtual and independent disks.
@ -122,7 +110,7 @@ In our example above:
</dd>
</dl>
### Creating a partition table
## Creating a partition table
To demonstrate the process of partitioning a harddrive I am going to repartition an external SATA drive as if it were being primed for a fresh Linux install.
@ -132,10 +120,7 @@ Let's take a look at the disk in its current form:
$ fdisk -l
Disk /dev/sda: 465.74 GiB, 500079525888 bytes, 976717824 sectors
Disk model: My Passport 071A
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk model: My Passport 071Aumount /dev/sda2
Disklabel type: gpt
Disk identifier: 9993F1BB-626C-485F-8542-3CC73BB40953
@ -245,20 +230,4 @@ sda 8:0 0 465.7G 0 disk
└─sda2 8:2 0 365.7G 0 part
```
> Whilst we have created our partitions we cannot yet mount them. This is because we have not yet set up a filesystem on the partitions. This is the next step.
## BIOS and UEFI
BIOS and UEFI are both firmware that is installed directly on the motherboard of the computer. They are firmware because they are software that is permanent and programmed into read-only memory.
In the context of disks, their most crucial role is locating the operating system on the harddisk and loading it into memory so that the bootstrapping process can begin. However they are also responsible for the computer clock and the management of peripherals.
As we can see from the `fdisk` readout, the boot partition uses EFI, the storage partition associated with UEFI.
Whilst UEFI is installed on the hardware, most of its configuration is stored in the EFI partition on the disk, whereas with BIOS, everything is on the chip. This make booting faster with UEFI.
Even though most modern computers use UEFI, it may still be referred to as BIOS for user-continuity. This is like on Windows. With Linux you have to explicitly create your boot process so the two are clearly distinguishable.
## File systems
File systems are what the computer relies on to ascertain the location and positioning of files on the disk. In Linux it is customary to use FAT-32 for the boot partition and ext-4 for the extended partition. In other operating systems you would do the same but most likely use NFTS for the extended partition.
> Whilst we have created our partitions we cannot yet mount them. This is because we have not yet set up a filesystem on the partitions. This is the next step.

View file

@ -0,0 +1,38 @@
---
tags:
- Linux
- Operating_Systems
- disks
- devices
- disk-partions
---
# Swap space
A swap partition is a partition on a disk that is not intended to be used as a filesystem. Instead, it is a part of the disk that is used to augment the main memory.
If you run out of memory and have set up a swap partition, the OS will be able to move pieces of memory to and from disk storage. This is called _swapping_ because pieces of idle programs are swapped to the disk in exchange for active pieces residing on the disk.
## View current swap usage
If you have a swap space established, the command `free` will show current usage:
```bash
free
total used free shared buff/cache available
Mem: 16099420 3031572 10157652 1153144 2910196 11605820
Swap: 3145724 0 3145724
```
## Create a swap partition
To use an existing disk partition as a swap you can run the command `mkswap [device]` and then `swapon [device]` to register the space with the [kernel](/Operating_Systems/The_Kernel.md).
### Add to `fstab`
You will want the swap to be activated every time the OS boots so add the following line to the [fstab](/Operating_Systems/Disks/Filesystems.md#fstab), where `/sda3` is used as the example partition:
```bash
/dev/sda3e none swap sw 0 0
```
## Create a swap file
// TODO: Add info

View file

@ -0,0 +1,24 @@
---
tags:
- Linux
- Operating_Systems
- disks
- devices
---
# What are disks?
A disk is a mass storage [block_device](/Operating_Systems/Devices.md) which we can write to and read from.
## SCSI
* Small Computer System Interface, responsible for handling disk access on most Linux systems.
* It is a protocol that allows communicaton between printers, scanners and other peripherals in addition to harddisks.
## Disk schematic
The following diagram represents the basic anatomy of a disk device.
![](/img/harddisk.png)
* A disk is divided up into [partitions](/Operating_Systems/Disks/Partitions.md) which are subsections of the overall disk. The kernel presents each partition as a [block device](/Operating_Systems/Devices.md#Devices.md) as it would with an entire disk.
* The disk dedicates a small part of its contents to a **partition table**: this defines the different partitions that comprise the total disk space.
* The **filesystem** is a database of files and directories: this comprises the bulk of the partition and is what you interact with in [user space](/Operating_Systems/User_Space.md) when reading and writing data.

View file

@ -17,7 +17,7 @@ The kernel acts as the primary mediator between the hardware (CPU, memory) and u
> A process is just another name for a running program. Process management is the starting, pausing, resuming, scheduling and terminating of processes.
On modern computers it appears that multiple processes can run simultaneously at once. This is only because the processor is so fast that we do not detect changes. In fact access to the CPU is always sequential. The sequence* in which multiple programs are allowed to access the CPU is managed by the kernel.
On modern computers it appears that multiple processes can run simultaneously at once. This is only because the processor is so fast that we do not detect changes. In fact access to the CPU is always sequential. The sequence in which multiple programs are allowed to access the CPU is managed by the kernel.
> Consider a system with a one-core CPU. Many processes may be _able_ to use the CPU, but only one process can actually use the CPU at any given time...Each process uses the CPU for a fraction of a second, then pauses, then another process uses it for a fraction of a second and so on... (_How Linux Works: Third Edition_, Brian Ward 2021)

View file

@ -22,7 +22,7 @@ If there was only one thread, this would be inefficient and unworkable. Therefor
![sync-thread.svg](/img/sync-thread.svg)
To accomodate the ability to increase the scale of synchronous applications you need to be able to spawn more threads commensurate to increased ademand. This increases the resource consumption of the framework (more cores, more memory etc). Moreover it is possible to reach a point where all threads are active and no more can be spawned. In this case there will simply be delays in the return of data.
To accomodate the ability to increase the scale of synchronous applications you need to be able to spawn more threads commensurate to increased demand. This increases the resource consumption of the framework (more cores, more memory etc). Moreover it is possible to reach a point where all threads are active and no more can be spawned. In this case there will simply be delays in the return of data.
## Node as a single-threaded asynchronous architecture
@ -39,31 +39,34 @@ This is the mechanism by which Node keeps track of incoming requests and their f
Node is continually monitoring the Event Loop in the background.
A running Node application is a single running process. Like everything that happens within the OS, a process is managed by the [kernel](/Operating_Systems/The_Kernel.md) that dispatches operations to the CPU in a clock cycle. A thread is a sequence of code that resides within the process and utilises its memory pool (the amount of memory assigned by the kernel to the process). The Event Loop runs on CPU ticks: a tick is a single run of the Event Loop.
A running Node application is a single running process. Like everything that happens within the OS, a process is managed by the [kernel](/Operating_Systems/The_Kernel.md) that dispatches operations to the CPU in a clock cycle. A thread is a sequence of code that resides within the process and utilises its memory pool (the amount of memory assigned by the kernel to the Node process). The Event Loop runs on CPU ticks: a tick is a single run of the Event Loop.
### Phases of the Event Loop
The Event Loop comprises six phases. The Event Loop starts at the moment Node begins to execute your `index.js` file or any other application entry point. These six phases create one cycle, or loop, which is known as a **tick**. A Node.js process exits when there is no more pending work in the Event Loop, or when `process.exit()` is called manually. A program only runs for as long as there are tasks queued in the Event Loop, or present on the [call stack](/Software_Engineering/Call_stack.md).
The Event Loop comprises six phases. The Event Loop starts at the moment Node begins to execute your `index.js` file or any other application entry point. These six phases create one cycle, or loop, equal to one **tick**. A Node.js process exits when there is no more pending work in the Event Loop, or when `process.exit()` is called manually. A program only runs for as long as there are tasks queued in the Event Loop, or present on the [call stack](/Software_Engineering/Call_stack.md).
![](/img/node-event-loop.svg)
The phases are as follows:
1. Timers
1. **Timers**
* These are functions that execute callbacks after a set period of time. As in standard JavaScript there are two global timer functions: `setTimeout` and `setInterval`. Interestingly these are not core parts of the JavaScript language, they are something that are made available to JS by the particular browser. As Node does not run in the browser, Node has to provide this functionality. It does so through the core `timers` module.
* At the beginning of this phase the Event Loop updates its own time. Then it checks a queue, or pool, of timers. This queue consists of all timers that are currently set. The Event Loop takes the timer with the shortest wait time and compares it with the Event Loop's current time. If the wait time has elapsed, then the timer's callback is queued to be called once the [call stack](/Software_Engineering/Call_stack.md) is empty.
2. I/O Callbacks
* Node implements a non-blocking input/output interface. This is to say, writing and reading to disk (files in the Node application directory) is implemented asynchronously. The asynchronous I/O request is recorded into the queue and then main call stack can continue working as expected.
2. **I/O Callbacks**
* Once timers have been checked and scheduled, Node jumps to I/O operations.
3. Idle / waiting / preparation
* Node implements a non-blocking input/output interface. This is to say, writing and reading to disk (files in the Node application directory) is implemented asynchronously. The asynchronous I/O request is recorded into the queue and then the call stack continues.
3. **Idle / waiting / preparation**
* This phase is internal to Node and is not accessible to the programmer.
* It is primarily used for gathering informtion, and planning what needs to be executed during the next tick of the Event Loop
4. I/O polling
4. **I/O polling**
* This is the phase at which the main block of code is read and executed by Node.
* During this phase the Event Loop is managing the I/O workload, calling the functions in the queue until the queue is empty, and calculating how long it should wait until moving to the next phase. All callbacks in this phase are called synchronously in the order that they were added to the queue, from oldest to newest.
* This is the phase that can potentially block our application if any of these callbacks are slow and not executed asynchronously.
5. `setImmediate` callbacks
* During this phase the Event Loop is managing the I/O workload, calling the functions in the queue until the queue is empty, and calculating how long it should wait until moving to the next phase. All callbacks in this phase are called synchronously (although they return asynchronously) in the order that they were added to the queue, from oldest to newest.
* This is the phase that can potentially block our application if any of these callbacks are slow or do not return asynchronously.
5. **`setImmediate` callbacks**
* This phase runs as soon as the poll phase becomes idle. If `setImmediate()` is scheduled within the I/O cycle it will always be executed before other timers regardless of how many timers are present.
* This is your opportunity to grant precedence to certain threads within the Node process
6. Close events
6. **Close events**
* This phase occurs when the Event Loop is wrapping up one cycle and is ready to move to the next one.
* It is an opportunity for clean-up and to guard against memory leaks.
* This phase can be targetted via the `process.exit()` function or the close event of a web-socket.
@ -75,5 +78,5 @@ The terms _event loop_ and _event queue_ are often used interchangeably in the l
The Event Loop is the Node runtime's method of execution, the queue is the stack of tasks that are lined up and executed by the loop. We can think of the queue as being the input and the loop as what acts on the input. The queue obviously emerges from the program we write but it is scheduled, organised and sequenced by the loop.
**this is bold**
_this is italic_
https://blog.appsignal.com/2022/07/20/an-introduction-to-multithreading-in-nodejs.html
https://school.geekwall.in/p/Bk2xFs1DV

View file

@ -5,14 +5,16 @@ tags:
- node-js
- node-modules
---
# `events` module
In most cases you won't interact with the `events` module directly since other modules and third-party modules are abstractions on top of it. For instance the `http` module is using events under the hood to handle requests and responses.
* Much of the NodeJS core is built around an [asynchronous event-driven architecture](Single-threaded%20asynchronous%20architecture.md) in which certain kinds of objects (called "emitters") emit named events that cause `Function` objects ("listeners") to be called.
* For example: a `fs.ReadStream` emits an event when the file is opened
Another way of putting this is to say that all events in Node inherit from the `EventEmitter` constructor, which is the class you instantiate to create a new event. At bottom everything in Node is an event with a callback, created via event emitters.
Because Node's runtime is [event-driven](/Programming_Languages/NodeJS/Architecture/Event_loop.md), it is event-emitter cycles that are being processed by the Event Loop, although you may know them as `fs` or `http` (etc) events. The call stack that the Event Loop works through is just a series of event emissions and their associated callbacks.
## Event Emitters
* All objects that emit events are instances of the `EventEmitter` class. These objects expose an `eventEmitter.on()` function that allows one or more functions to be attached to named events emitted by the object.
* These functions are listeners of the emitter.
* All objects that emit events are instances of the `EventEmitter` class. This object exposes an `eventEmitter.on()` function that allows one or more functions to be attached to named events emitted by the object.
* These functions are **listeners** of the emitter.
## Basic syntax

View file

@ -6,11 +6,13 @@ tags:
- node-modules
---
# `fs` module
File System is an essential built-in module of Node that contains utility methods for working with files and directories.
Every method associated with `fs` has a *blocking* and *asynchronous* implementation. The former obviously blocks the [event queue](Event%20queue.md), the latter does not.
The asynchronous methods are useful to have in some contexts but in general and with real-world applications, you should be using the async implementation.
The synchronous methods are useful to have in some contexts but in general and with real-world applications, you should be using the async implementation so as to accord with the single-threaded event-driven architecture of Node.
## Methods

View file

@ -6,21 +6,21 @@ tags:
- node-modules
---
# `http` module
The HTTP Module allows us to create a web server that listens for HTTP requests on a given port. It is therefore perfect for creating backends for client-side JavaScript.
## Creating a server
An HTTP server is another instance of an [event emitter](Events%20module.md#event-emitters). It therefore has all the same methods as the `EventEmitter` class: `on`, `emit`, `addListener` etc. This demonstrates again how much of Node's core functionality is based on event emitters.
An HTTP server is another instance of an [event emitter](/Programming_Languages/NodeJS/Modules/Core/events.md)). It therefore has all the same methods as the `EventEmitter` class: `on`, `emit`, `addListener` etc. This demonstrates again how much of Node's core functionality is based on event emitters.
*Creating a server*
````js
const http = require('http')
const server = http.createServer() // Create server as emitter
// Register functions to run when listener listener is triggered
// Register functions to run when listener is triggered
server.on('connection', (socket) => {
console.log('new connection...')
})
@ -49,7 +49,7 @@ If we were to start the server by running the file and we then used a browser to
### Sockets and `req, res`
A socket is a generic protocol for client-server communication. Crucially it allows simultaneous communication both ways. The client can contact the server but the server can also contact the client. Our listener function above uses a socket as the callback function but in most cases this is quite low-level, not distinguishing responses from requests. It is more likely that you would initiate a `request, resource` architecture in place of a socket:
A socket is a generic protocol for client-server communication. Crucially it **allows simultaneous communication both ways**. The client can contact the server but the server can also contact the client. Our listener function above uses a socket as the callback function but in most cases this is quite low-level, not distinguishing responses from requests. It is more likely that you would initiate a `request, resource` architecture in place of a socket:
````js
const server = http.createServer((req, res) => {
@ -80,5 +80,3 @@ const server = http.createServer((req, res) => {
### Express
In reality you would rarely use the `http` module directly to create a server. This is because it is quite low level and each response must be written in a linear fashion as with the two URLs in the previous example. Instead we use Express which is a framework for creating servers and routing that is an abstraction on top of the core HTTP module.
* [Create RESTful API with Express](Create%20RESTful%20API%20with%20Express.md)

View file

@ -1,11 +1,14 @@
h1, h2, h3, h4, h5, h6, p {
/* h1, h2, h3, h4, h5, h6, p {
font-family: 'Inter';
}
} */
pre, code {
font-family: 'JetBrains Mono';
font-family: 'JetBrains Mono' !important;
}
code, code {
font-family: 'Jetbrains mono';
}
/*
h1 {