init
This commit is contained in:
commit
c5cd492449
475 changed files with 27928 additions and 0 deletions
43
Meta.md
Normal file
43
Meta.md
Normal file
|
@ -0,0 +1,43 @@
|
|||
---
|
||||
obj: meta
|
||||
---
|
||||
|
||||
# Frontmatter
|
||||
[Index](MetaIndex.md)
|
||||
## Common Frontmatter Tags
|
||||
- `website` - Website
|
||||
- `repo` - Repository
|
||||
- `source` - Source
|
||||
- `wiki` - Wiki link
|
||||
- `arch-wiki` - Arch Wiki link
|
||||
- `rfc` - RFC Document link
|
||||
- `obj` - Object Type
|
||||
|
||||
## Common Frontmatter Tags for Object Types
|
||||
### `Application` + `Emulator`
|
||||
- `android-id` - Android Application ID
|
||||
- `flatpak-id` - Flatpak Application ID
|
||||
- `f-droid` - F-Droid URL
|
||||
|
||||
### `Website`
|
||||
- `category` - Website Category
|
||||
- `status` - Website Status
|
||||
|
||||
# Object Types
|
||||
- `Application` - Any application
|
||||
- `OS` - Operating System
|
||||
- `Codec` - Media Codec
|
||||
- `FileSystem` - File System
|
||||
- `Emulator` - Emulator System
|
||||
- `Concept` - Concept
|
||||
- `Meta` - Lists, Collections, Metadata
|
||||
- `Meta/Collection` - Collection of notes
|
||||
- `Device` - Physical Device
|
||||
- `Website` - Website
|
||||
- `Science` - Scientific Notes
|
||||
- `Science/Unit` - Scientific measurement unit
|
||||
|
||||
## Notes without Object set
|
||||
```dataview
|
||||
table obj as "Type" from "knowledge" where obj = null
|
||||
```
|
90
MetaIndex.md
Normal file
90
MetaIndex.md
Normal file
|
@ -0,0 +1,90 @@
|
|||
---
|
||||
obj: meta
|
||||
---
|
||||
|
||||
# Index
|
||||
## Frontmatter
|
||||
### Notes with websites
|
||||
```dataview
|
||||
table website from "/" where website != null
|
||||
```
|
||||
|
||||
### Notes with repositories
|
||||
```dataview
|
||||
table repo from "/" where repo != null
|
||||
```
|
||||
|
||||
### Notes with Sources
|
||||
```dataview
|
||||
table source from "/" where source != null
|
||||
```
|
||||
|
||||
### Notes with Wiki
|
||||
```dataview
|
||||
table wiki from "/" where wiki != null
|
||||
```
|
||||
|
||||
### Notes with Arch Wiki
|
||||
```dataview
|
||||
table arch-wiki from "/" where arch-wiki != null
|
||||
```
|
||||
|
||||
### Notes with RFC
|
||||
```dataview
|
||||
table rfc from "/" where rfc != null
|
||||
```
|
||||
|
||||
### Notes with Object Type
|
||||
```dataview
|
||||
table obj from "/" where obj != null
|
||||
```
|
||||
|
||||
## Object Type
|
||||
### Applications
|
||||
```dataview
|
||||
table android-id, flatpak-id from "/" where startswith(lower(obj), "application")
|
||||
```
|
||||
|
||||
### Operating Systems
|
||||
```dataview
|
||||
list from "/" where startswith(lower(obj), "os")
|
||||
```
|
||||
|
||||
### Codecs
|
||||
```dataview
|
||||
list from "/" where startswith(lower(obj), "codec")
|
||||
```
|
||||
|
||||
### Filesystems
|
||||
```dataview
|
||||
list from "/" where startswith(lower(obj), "filesystem")
|
||||
```
|
||||
|
||||
### Emulators
|
||||
```dataview
|
||||
list from "/" where startswith(lower(obj), "emulator")
|
||||
```
|
||||
|
||||
### Concepts
|
||||
```dataview
|
||||
list from "/" where startswith(lower(obj), "concept")
|
||||
```
|
||||
|
||||
### Meta Notes
|
||||
```dataview
|
||||
list from "/" where startswith(lower(obj), "meta")
|
||||
```
|
||||
|
||||
### Device Notes
|
||||
```dataview
|
||||
list from "/" where startswith(lower(obj), "device")
|
||||
```
|
||||
|
||||
### Science Notes
|
||||
```dataview
|
||||
list from "/" where startswith(lower(obj), "science")
|
||||
```
|
||||
### Website Notes
|
||||
```dataview
|
||||
table website, category, status from "/" where startswith(lower(obj), "website")
|
||||
```
|
169
schema.json
Normal file
169
schema.json
Normal file
|
@ -0,0 +1,169 @@
|
|||
{
|
||||
"type": "object",
|
||||
"title": "Markdown Note",
|
||||
"description": "Knowledge Base Markdown note on a topic",
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"required": ["obj"],
|
||||
"properties": {
|
||||
"tags": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"title": "Note Tags",
|
||||
"description": "List of tags asssociated with the note"
|
||||
},
|
||||
"aliases": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"title": "Note Aliases",
|
||||
"description": "List of aliases of the note"
|
||||
},
|
||||
"website": {
|
||||
"title": "Website",
|
||||
"description": "Associated website with the note",
|
||||
"type": [
|
||||
"string",
|
||||
"array"
|
||||
],
|
||||
"items": {
|
||||
"type": "string",
|
||||
"format": "uri"
|
||||
},
|
||||
"format": "uri"
|
||||
},
|
||||
"repo": {
|
||||
"title": "Repository",
|
||||
"description": "Associated repository with the note",
|
||||
"type": "string",
|
||||
"format": "uri"
|
||||
},
|
||||
"source": {
|
||||
"title": "Source",
|
||||
"description": "Source the note is based on",
|
||||
"type": "string",
|
||||
"format": "uri"
|
||||
},
|
||||
"wiki": {
|
||||
"title": "Wikipedia",
|
||||
"description": "Wikipedia link about the note",
|
||||
"type": "string",
|
||||
"format": "uri"
|
||||
},
|
||||
"arch-wiki": {
|
||||
"title": "Arch Wiki",
|
||||
"description": "Arch Wiki link about the note",
|
||||
"type": "string",
|
||||
"format": "uri"
|
||||
},
|
||||
"rfc": {
|
||||
"title": "RFC",
|
||||
"description": "Link to RFC this note is about",
|
||||
"type": "string",
|
||||
"format": "uri"
|
||||
},
|
||||
"obj": {
|
||||
"title": "Object Type",
|
||||
"description": "Meta information about the notes general classification",
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"application",
|
||||
"os",
|
||||
"codec",
|
||||
"filesystem",
|
||||
"emulator",
|
||||
"concept",
|
||||
"meta",
|
||||
"meta/collection",
|
||||
"device",
|
||||
"website",
|
||||
"science",
|
||||
"science/unit"
|
||||
]
|
||||
}
|
||||
},
|
||||
"allOf": [
|
||||
{
|
||||
"if": {
|
||||
"properties": {
|
||||
"obj": {
|
||||
"pattern": "^(application|emulator)"
|
||||
}
|
||||
}
|
||||
},
|
||||
"then": {
|
||||
"properties": {
|
||||
"android-id": {
|
||||
"title": "Android Application ID",
|
||||
"description": "The Android Application ID of the note",
|
||||
"type": "string"
|
||||
},
|
||||
"flatpak-id": {
|
||||
"title": "Flatpak Application ID",
|
||||
"description": "The Flatpak Application ID of the note",
|
||||
"type": "string"
|
||||
},
|
||||
"f-droid": {
|
||||
"title": "F-Droid Page",
|
||||
"description": "Link to the F-Droid Page of the note",
|
||||
"type": "string",
|
||||
"format": "uri"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"if": {
|
||||
"properties": {
|
||||
"obj": {
|
||||
"pattern": "^website"
|
||||
}
|
||||
}
|
||||
},
|
||||
"then": {
|
||||
"properties": {
|
||||
"category": {
|
||||
"title": "Category",
|
||||
"description": "General category the website falls into",
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"server",
|
||||
"finance",
|
||||
"social",
|
||||
"service",
|
||||
"development",
|
||||
"search",
|
||||
"information",
|
||||
"market",
|
||||
"knowledge",
|
||||
"board",
|
||||
"company",
|
||||
"shop",
|
||||
"content",
|
||||
"images",
|
||||
"link-registry",
|
||||
"api"
|
||||
]
|
||||
},
|
||||
"status": {
|
||||
"title": "Website Status",
|
||||
"description": "Wether the website is up or down",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
],
|
||||
"enum": [
|
||||
"down",
|
||||
"up"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"category"
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
11
science/Science.md
Normal file
11
science/Science.md
Normal file
|
@ -0,0 +1,11 @@
|
|||
---
|
||||
tags: ["meta"]
|
||||
obj: meta/collection
|
||||
---
|
||||
# Science Fields
|
||||
- [Math](math/Math.md)
|
||||
- [Physics](physics/Physics.md)
|
||||
- [Chemistry](chemistry/Chemistry.md)
|
||||
- [Biology](biology/Biology.md)
|
||||
- [Philosophy](philosophy/Philosophy.md)
|
||||
- [Psychology](psychology/Psychology.md)
|
5
science/biology/Biology.md
Normal file
5
science/biology/Biology.md
Normal file
|
@ -0,0 +1,5 @@
|
|||
---
|
||||
obj: meta
|
||||
---
|
||||
|
||||
#wip #🐇 #notnow
|
4
science/chemistry/Chemistry.md
Normal file
4
science/chemistry/Chemistry.md
Normal file
|
@ -0,0 +1,4 @@
|
|||
---
|
||||
obj: meta
|
||||
---
|
||||
#wip #🐇 #notnow
|
26
science/math/Binary System.md
Normal file
26
science/math/Binary System.md
Normal file
|
@ -0,0 +1,26 @@
|
|||
---
|
||||
obj: concept
|
||||
---
|
||||
# Binary System
|
||||
The base-2 numeral system is a positional notation with a radix of 2. Each digit is referred to as a bit, or binary digit. Because of its straightforward implementation in digital electronic circuitry using logic gates, the binary system is used by almost all modern computers and computer-based devices, as a preferred system of use, over various other human techniques of communication, because of the simplicity of the language and the noise immunity in physical implementation.
|
||||
|
||||
Negative numbers are commonly represented in binary using two's complement.
|
||||
|
||||
## Two's complement
|
||||
Two's complement of an integer number is achieved by:
|
||||
- Step 1: Start with the absolute value of the number.
|
||||
- Step 2: inverting (or flipping) all bits – changing every 0 to 1, and every 1 to 0;
|
||||
- Step 3: adding 1 to the entire inverted number, ignoring any overflow. Accounting for overflow will produce the wrong value for the result.
|
||||
|
||||
For example, to calculate the decimal number **−6** in binary:
|
||||
- Step 1: _+6_ in decimal is _0110_ in binary; the leftmost significant bit (the first 0) is the sign (just _110_ in binary would be -2 in decimal).
|
||||
- Step 2: flip all bits in _0110_, giving _1001_.
|
||||
- Step 3: add the place value 1 to the flipped number _1001_, giving _1010_.
|
||||
|
||||
To verify that _1010_ indeed has a value of _−6_, add the place values together, but _subtract_ the sign value from the final calculation. Because the most significant value is the sign value, it must be subtracted to produce the correct result: **1010** = **−**(**1**×23) + (**0**×22) + (**1**×21) + (**0**×20) = **1**×−8 + **0** + **1**×2 + **0** = −6.
|
||||
|
||||
| Bits: | 1 | 0 | 1 | 0 |
|
||||
| -------------------- | --------------- | ---------- | ---------- | ---------- |
|
||||
| Decimal bit value: | **−**8 | 4 | 2 | 1 |
|
||||
| Binary calculation: | **−**(**1**×23) | (**0**×22) | (**1**×21) | (**0**×20) |
|
||||
| Decimal calculation: | **−**(**1**×8) | **0** | **1**×2 | **0** |
|
10
science/math/Decimal System.md
Normal file
10
science/math/Decimal System.md
Normal file
|
@ -0,0 +1,10 @@
|
|||
---
|
||||
obj: concept
|
||||
wiki: https://en.wikipedia.org/wiki/Decimal
|
||||
---
|
||||
# Decimal System
|
||||
The decimal numeral system (also called the base-ten positional numeral system and decanary) is the standard system for denoting integer and non-integer numbers. It is the extension to non-integer numbers (decimal fractions) of the Hindu–Arabic numeral system. The way of denoting numbers in the decimal system is often referred to as decimal notation.
|
||||
|
||||
A decimal numeral (also often just decimal or, less correctly, decimal number), refers generally to the notation of a number in the decimal numeral system. Decimals may sometimes be identified by a decimal separator (usually "." or "," as in 25.9703 or 3,1415). Decimal may also refer specifically to the digits after the decimal separator, such as in "3.14 is the approximation of π to two decimals". Zero-digits after a decimal separator serve the purpose of signifying the precision of a value.
|
||||
|
||||
The decimal system uses the following symbols: `0`, `1`, `2`, `3`, `4`, `5`, `6`, `7`, `8`, `9`
|
30
science/math/Hexadecimal System.md
Normal file
30
science/math/Hexadecimal System.md
Normal file
|
@ -0,0 +1,30 @@
|
|||
---
|
||||
obj: concept
|
||||
---
|
||||
# Hexadecimal System
|
||||
The hexadecimal numbering system, often referred to as "hex," is a base-16 numeral system widely used in computing and digital electronics. It provides a convenient way to represent binary-coded values with a more human-friendly and compact notation. In the hexadecimal system, numbers are represented using 16 different digits: 0-9 and A-F, where A stands for 10, B for 11, and so on up to F for 15.
|
||||
|
||||
## Representation
|
||||
In hexadecimal, each digit represents a power of 16. The rightmost digit represents $16^0$ (1), the next digit to the left represents $16^1$ (16), the next $16^2$ (256), and so forth. For example:
|
||||
|
||||
- **1F in hexadecimal** is equal to $1×16^1+15×16^0$, which is 31 in decimal.
|
||||
- **2A3 in hexadecimal** is equal to $2×16^2+10×16^1+3×16^0,$, which is 675 in decimal.
|
||||
|
||||
## Hexadecimal Digits
|
||||
The hexadecimal system uses the following digits:
|
||||
|
||||
- **0, 1, 2, 3, 4, 5, 6, 7, 8, 9**: Represent values 0 to 9.
|
||||
- **A, B, C, D, E, F**: Represent values 10 to 15.
|
||||
|
||||
## Uses in Computing
|
||||
### Memory Addresses
|
||||
In computer programming, memory addresses are often expressed in hexadecimal. Each byte of memory can be represented by two hexadecimal digits, providing a concise way to denote memory locations.
|
||||
`Example: 0x1A3F`
|
||||
|
||||
### Color Representation
|
||||
Hexadecimal is commonly used to represent colors in web development and digital graphics. In this context, a hexadecimal color code consists of three pairs of digits representing the intensities of red, green, and blue.
|
||||
`Example: #FFA500 (RGB: 255, 165, 0)`
|
||||
|
||||
### Binary Representation
|
||||
Hexadecimal is closely related to [binary](Binary%20System.md) representation. Each hexadecimal digit corresponds to four bits in binary. This relationship makes it easier to convert between hexadecimal and binary.
|
||||
`Example: Binary 1010 is equivalent to Hex A.`
|
5
science/math/Math.md
Normal file
5
science/math/Math.md
Normal file
|
@ -0,0 +1,5 @@
|
|||
---
|
||||
obj: meta
|
||||
---
|
||||
# Math
|
||||
#wip #🐇 #notnow
|
5
science/philosophy/Philosophy.md
Normal file
5
science/philosophy/Philosophy.md
Normal file
|
@ -0,0 +1,5 @@
|
|||
---
|
||||
obj: meta
|
||||
---
|
||||
# Philosophy
|
||||
#wip #🐇 #notnow
|
4
science/physics/Physics.md
Normal file
4
science/physics/Physics.md
Normal file
|
@ -0,0 +1,4 @@
|
|||
---
|
||||
obj: meta
|
||||
---
|
||||
#wip #🐇 #notnow
|
6
science/physics/SI Units.md
Normal file
6
science/physics/SI Units.md
Normal file
|
@ -0,0 +1,6 @@
|
|||
---
|
||||
obj: meta/collection
|
||||
---
|
||||
|
||||
#wip #🐇 #notnow
|
||||
- [Volt](units/Volt.md)
|
11
science/physics/units/Volt.md
Normal file
11
science/physics/units/Volt.md
Normal file
|
@ -0,0 +1,11 @@
|
|||
---
|
||||
wiki: https://en.wikipedia.org/wiki/Volt
|
||||
obj: science/unit
|
||||
---
|
||||
# Volt
|
||||
The volt (symbol: V) is the unit of electric potential, electric potential difference (voltage), and electromotive force in the International System of Units (SI).
|
||||
|
||||
| Name | Value |
|
||||
| ------------- | ------------- |
|
||||
| Symbol | V |
|
||||
| SI base units | $$kg⋅m^2⋅s^{-3}⋅A^{−1}$$ |
|
5
science/psychology/Psychology.md
Normal file
5
science/psychology/Psychology.md
Normal file
|
@ -0,0 +1,5 @@
|
|||
---
|
||||
obj: meta
|
||||
---
|
||||
# Psychology
|
||||
#wip #🐇 #notnow
|
71
technology/Cryptography/AES.md
Normal file
71
technology/Cryptography/AES.md
Normal file
|
@ -0,0 +1,71 @@
|
|||
---
|
||||
obj: concept
|
||||
---
|
||||
|
||||
# AES
|
||||
The Advanced Encryption Standard (AES) is a widely adopted symmetric encryption algorithm used to secure sensitive data. It was established as a standard by the U.S. National Institute of Standards and Technology (NIST) in 2001, following a public competition to select a successor to the Data Encryption Standard (DES). AES is known for its efficiency, security, and versatility, making it a popular choice for various applications, including data encryption, secure communications, and cryptographic protocols.
|
||||
|
||||
## Key Features
|
||||
### 1. **Symmetric Encryption**
|
||||
AES is a symmetric encryption algorithm, meaning the same key is used for both encryption and decryption. This key is kept secret between the communicating parties.
|
||||
|
||||
### 2. **Block Cipher**
|
||||
AES operates on fixed-size blocks of data, encrypting and decrypting data in blocks of 128 bits. It supports key sizes of 128, 192, or 256 bits.
|
||||
|
||||
### 3. **Key Expansion**
|
||||
The key expansion process in AES generates a set of round keys derived from the original key. These round keys are used in the multiple rounds of encryption and provide a high level of security.
|
||||
|
||||
### 4. **Rounds of Encryption**
|
||||
AES performs a series of transformations known as rounds. The number of rounds depends on the key size: 10 rounds for a 128-bit key, 12 rounds for a 192-bit key, and 14 rounds for a 256-bit key.
|
||||
|
||||
### 5. **Substitution-Permutation Network (SPN) Structure**
|
||||
AES employs an SPN structure, combining substitution (replacing each byte with another) and permutation (rearranging bytes) operations to achieve confusion and diffusion, enhancing the algorithm's security.
|
||||
|
||||
## Encryption Process
|
||||
1. **Key Expansion:** Generate a set of round keys from the original key.
|
||||
2. **Initial Round:** Add the initial round key to the plaintext.
|
||||
3. **Main Rounds:** Perform a series of substitution, permutation, and mixing operations for the specified number of rounds.
|
||||
4. **Final Round:** The final round excludes the mixing operation.
|
||||
5. **Output:** The result is the ciphertext.
|
||||
|
||||
## Decryption Process
|
||||
1. **Key Expansion:** Generate the round keys from the original key.
|
||||
2. **Initial Round:** Add the initial round key to the ciphertext.
|
||||
3. **Main Rounds:** Perform the inverse operations of the encryption process in reverse order.
|
||||
4. **Final Round:** The final round excludes the mixing operation.
|
||||
5. **Output:** The result is the decrypted plaintext.
|
||||
|
||||
## Strengths of AES
|
||||
- **Security:** AES has withstood extensive cryptanalysis and is considered highly secure when implemented correctly.
|
||||
- **Efficiency:** It is computationally efficient and well-suited for both hardware and software implementations.
|
||||
- **Versatility:** AES is used in various applications, including securing data at rest, data in transit, and cryptographic protocols like TLS.
|
||||
|
||||
## Variants of AES
|
||||
- **AES-128:** Uses a 128-bit key and 10 rounds of encryption.
|
||||
- **AES-192:** Uses a 192-bit key and 12 rounds of encryption.
|
||||
- **AES-256:** Uses a 256-bit key and 14 rounds of encryption.
|
||||
|
||||
## Usage
|
||||
One can use AES with [OpenSSL](../applications/OpenSSL.md) or [GPG](../tools/GPG.md):
|
||||
|
||||
### OpenSSL
|
||||
Encrypt:
|
||||
```shell
|
||||
openssl enc -aes-256-cbc -salt -in plaintext.txt -out encrypted_file.enc
|
||||
```
|
||||
|
||||
Decrypt:
|
||||
```shell
|
||||
openssl enc -aes-256-cbc -d -in encrypted_file.enc -out decrypted_file.txt
|
||||
```
|
||||
|
||||
### GnuPG
|
||||
Encrypt:
|
||||
```shell
|
||||
gpg -c --cipher-algo AES256 file.txt
|
||||
```
|
||||
|
||||
Decrypt:
|
||||
```shell
|
||||
gpg -d file.txt.gpg -o decrypted_file.txt
|
||||
```
|
17
technology/Cryptography/Cryptography.md
Normal file
17
technology/Cryptography/Cryptography.md
Normal file
|
@ -0,0 +1,17 @@
|
|||
---
|
||||
obj: concept
|
||||
---
|
||||
|
||||
# Cryptography
|
||||
Cryptography is the science and art of securing communication and information through the use of mathematical techniques and algorithms. It plays a crucial role in ensuring confidentiality, integrity, and authenticity of data in various applications, including communication systems, financial transactions, and information storage.
|
||||
|
||||
## Cryptographic Algorithms
|
||||
### 1. **Symmetric-Key Algorithms**
|
||||
Symmetric-key algorithms use the same key for both encryption and decryption. They are efficient and fast, making them suitable for bulk data encryption. See [AES](AES.md).
|
||||
|
||||
### 2. **Asymmetric-Key Algorithms**
|
||||
Asymmetric-key algorithms use a pair of public and private keys. The public key is shared openly, while the private key is kept secret. Data encrypted with the public key can only be decrypted with the corresponding private key, and vice versa. See [RSA](RSA.md).
|
||||
|
||||
### 3. **Hash Functions**
|
||||
|
||||
Hash functions take input data and produce a fixed-size hash value, typically a string of characters. They are fundamental for data integrity verification. See [SHA](SHA.md).
|
63
technology/Cryptography/RSA.md
Normal file
63
technology/Cryptography/RSA.md
Normal file
|
@ -0,0 +1,63 @@
|
|||
---
|
||||
obj: concept
|
||||
---
|
||||
|
||||
# RSA
|
||||
RSA (Rivest-Shamir-Adleman) is a widely used asymmetric encryption algorithm that enables secure communication and digital signatures. Named after its inventors, Ron Rivest, Adi Shamir, and Leonard Adleman, RSA relies on the mathematical properties of large prime numbers for its security.
|
||||
|
||||
## Key Concepts
|
||||
### 1. **Asymmetric Encryption**
|
||||
RSA is an asymmetric algorithm, meaning it uses a pair of keys: a public key for encryption and a private key for decryption. The public key is widely distributed, while the private key is kept secret.
|
||||
|
||||
### 2. **Key Generation**
|
||||
- **Key Pair:** The RSA key pair consists of a public key and a corresponding private key.
|
||||
- **Public Key:** Composed of a modulus $N$ and an exponent $e$.
|
||||
- **Private Key:** Composed of the same modulus $N$ and a private exponent $d$.
|
||||
- **Key Generation Process:**
|
||||
1. Select two large prime numbers, $p$ and $q$.
|
||||
2. Compute $N = pq$.
|
||||
3. Compute $ϕ(N) = (p - 1)(q - 1)$.
|
||||
4. Choose $e$ such that $1 < e < ϕ(N)$ and $e$ is coprime to $ϕ(N)$.
|
||||
5. Calculate $d$ as the modular multiplicative inverse of $e$ modulo $ϕ(N)$.
|
||||
6. The public key is $(N, e)$ and the private key is $(N, d)$.
|
||||
|
||||
### 3. **Encryption and Decryption**
|
||||
- **Encryption:** Given the public key $(N,e)$, a plaintext message $M$ is encrypted as $C = M^e \mod N$.
|
||||
- **Decryption:** Using the private key $(N,d)$, the ciphertext $C$ is decrypted as $M = C^d \mod N$.
|
||||
|
||||
### 4. **Digital Signatures**
|
||||
RSA is commonly used for digital signatures to verify the authenticity and integrity of messages. The sender signs a message with their private key, and the recipient can verify the signature using the sender's public key.
|
||||
|
||||
## Security Considerations
|
||||
- The security of RSA relies on the difficulty of factoring the product of two large prime numbers $(N = porque)$.
|
||||
- The key length is crucial for security; longer keys provide higher security but may be computationally more expensive.
|
||||
|
||||
## Using RSA in Practice
|
||||
Using RSA can be done either with [OpenSSL](../applications/OpenSSL.md) or [GPG](../tools/GPG.md).
|
||||
|
||||
### 1. **Key Generation:**
|
||||
```shell
|
||||
# Generate a 2048-bit RSA private key
|
||||
openssl genpkey -algorithm RSA -out private_key.pem -aes256
|
||||
|
||||
# Derive the corresponding public key
|
||||
openssl rsa -pubout -in private_key.pem -out public_key.pem
|
||||
```
|
||||
|
||||
### 2. **Encryption and Decryption:**
|
||||
```shell
|
||||
# Encrypt a message with the public key
|
||||
openssl rsautl -encrypt -in plaintext.txt -out ciphertext.enc -pubin -inkey public_key.pem
|
||||
|
||||
# Decrypt the ciphertext with the private key
|
||||
openssl rsautl -decrypt -in ciphertext.enc -out decrypted.txt -inkey private_key.pem
|
||||
```
|
||||
|
||||
### 3. **Digital Signatures:**
|
||||
```shell
|
||||
# Sign a message with the private key
|
||||
openssl dgst -sha256 -sign private_key.pem -out signature.bin message.txt
|
||||
|
||||
# Verify the signature with the public key
|
||||
openssl dgst -sha256 -verify public_key.pem -signature signature.bin message.txt
|
||||
```
|
17
technology/Cryptography/SHA.md
Normal file
17
technology/Cryptography/SHA.md
Normal file
|
@ -0,0 +1,17 @@
|
|||
---
|
||||
obj: concept
|
||||
---
|
||||
|
||||
# SHA
|
||||
SHA-2 (Secure Hash Algorithm 2) is a set of cryptographic hash functions designed by the United States National Security Agency (NSA) and first published in 2001. They are built using the Merkle–Damgård construction, from a one-way compression function itself built using the Davies–Meyer structure from a specialized block cipher.
|
||||
|
||||
SHA-2 includes significant changes from its predecessor, SHA-1. The SHA-2 family consists of six hash functions with digests (hash values) that are 224, 256, 384 or 512 bits: SHA-224, SHA-256, SHA-384, SHA-512, SHA-512/224, SHA-512/256. SHA-256 and SHA-512 are novel hash functions computed with eight 32-bit and 64-bit words, respectively. They use different shift amounts and additive constants, but their structures are otherwise virtually identical, differing only in the number of rounds. SHA-224 and SHA-384 are truncated versions of SHA-256 and SHA-512 respectively, computed with different initial values. SHA-512/224 and SHA-512/256 are also truncated versions of SHA-512, but the initial values are generated using the method described in Federal Information Processing Standards (FIPS) PUB 180-4.
|
||||
|
||||
SHA has libraries for many programming languages and can be used with [OpenSSL](../applications/OpenSSL.md) or the `shasum` command.
|
||||
|
||||
## Purpose
|
||||
Hash functions play a crucial role in [cryptography](Cryptography.md) and information security. They take an input (or message) and produce a fixed-size string of characters, which is typically a digest or hash value. The primary purposes of SHA hash functions include:
|
||||
|
||||
1. **Data Integrity**: Hash functions ensure the integrity of data by generating a unique hash value for a given input. Any change in the input data will result in a completely different hash, making it easy to detect alterations.
|
||||
2. **Digital Signatures**: SHA is often used in conjunction with digital signatures to create a secure and verifiable way of confirming the origin and integrity of a message or document.
|
||||
3. **Password Storage**: Hash functions are employed to store passwords securely. Instead of storing the actual password, systems store the hash of the password, making it more challenging for attackers to obtain the original passwords.
|
16
technology/applications/3d/Blender.md
Normal file
16
technology/applications/3d/Blender.md
Normal file
File diff suppressed because one or more lines are too long
17
technology/applications/Feather Wallet.md
Normal file
17
technology/applications/Feather Wallet.md
Normal file
File diff suppressed because one or more lines are too long
27
technology/applications/MongoDB Compass.md
Normal file
27
technology/applications/MongoDB Compass.md
Normal file
File diff suppressed because one or more lines are too long
122
technology/applications/MongoDB.md
Normal file
122
technology/applications/MongoDB.md
Normal file
|
@ -0,0 +1,122 @@
|
|||
---
|
||||
website: https://www.mongodb.com
|
||||
obj: application
|
||||
---
|
||||
|
||||
#wip #🐇 #notnow
|
||||
MongoDB is a popular NoSQL database that is document-oriented and designed for scalability and flexibility
|
||||
|
||||
## Docker-Compose
|
||||
```yml
|
||||
version: '3'
|
||||
services:
|
||||
mongo:
|
||||
image: mongo
|
||||
container_name: mongo
|
||||
restart: always
|
||||
environment:
|
||||
MONGO_INITDB_ROOT_USERNAME: root
|
||||
MONGO_INITDB_ROOT_PASSWORD: password
|
||||
volumes:
|
||||
- mongodb_data:/data/db
|
||||
ports:
|
||||
- "27017:27017"
|
||||
```
|
||||
|
||||
## Usage
|
||||
### **Connecting to MongoDB**
|
||||
To connect to MongoDB using the `mongo` shell:
|
||||
`mongo mongodb://<username>:<password>@<hostname>:<port>/<database>`
|
||||
|
||||
Replace `<username>`, `<password>`, `<hostname>`, `<port>`, and `<database>` with your own values.
|
||||
|
||||
### **Working with Databases**
|
||||
To create a new database: `use <database>`
|
||||
To show a list of all databases: `show dbs`
|
||||
To switch to a different database: `use <database>`
|
||||
To drop a database: `use <database>; db.dropDatabase()`
|
||||
|
||||
### **Working with Collections**
|
||||
To create a new collection: `db.createCollection("<collection>")`
|
||||
To show a list of all collections in the current database: `show collections`
|
||||
To drop a collection: `db.<collection>.drop()`
|
||||
|
||||
### **Inserting Data**
|
||||
To insert a single document: `db.<collection>.insertOne(<document>)`
|
||||
To insert multiple documents: `db.<collection>.insertMany([<document1>, <document2>, ...])`
|
||||
|
||||
### **Querying Data**
|
||||
To find all documents in a collection: `db.<collection>.find()`
|
||||
To find documents that match a specific condition: `db.<collection>.find(<query>)`
|
||||
To limit the number of documents returned: `db.<collection>.find().limit(<limit>)`
|
||||
To sort documents by a field: `db.<collection>.find().sort({<field>: <1 or -1>})`
|
||||
To count the number of documents: `db.<collection>.count()`
|
||||
|
||||
### **Updating Data**
|
||||
To update a single document: `db.<collection>.updateOne(<filter>, <update>)`
|
||||
To update multiple documents: `db.<collection>.updateMany(<filter>, <update>)`
|
||||
To replace a document: `db.<collection>.replaceOne(<filter>, <replacement>)`
|
||||
|
||||
### **Deleting Data**
|
||||
To delete a single document: `db.<collection>.deleteOne(<filter>)`
|
||||
To delete multiple documents: `db.<collection>.deleteMany(<filter>)`
|
||||
To delete all documents in a collection: `db.<collection>.deleteMany({})`
|
||||
|
||||
### Filters
|
||||
- $eq: The $eq operator matches documents where the value of a field equals a specified value.
|
||||
- $ne: The $ne operator matches documents where the value of a field is not equal to a specified value.
|
||||
- $gt: The $gt operator matches documents where the value of a field is greater than a specified value.
|
||||
- $gte: The $gte operator matches documents where the value of a field is greater than or equal to a specified value.
|
||||
- $lt: The $lt operator matches documents where the value of a field is less than a specified value.
|
||||
- $lte: The $lte operator matches documents where the value of a field is less than or equal to a specified value.
|
||||
- $in: The $in operator matches documents where the value of a field equals any value in a specified array.
|
||||
- $nin: The $nin operator matches documents where the value of a field does not equal any value in a specified array.
|
||||
- $and: The $and operator performs a logical AND operation on an array of two or more expressions and selects the documents that satisfy all the expressions.
|
||||
- $or: The $or operator performs a logical OR operation on an array of two or more expressions and selects the documents that satisfy at least one of the expressions.
|
||||
- $not: The $not operator performs a logical NOT operation on the specified expression and selects the documents that do not match the expression.
|
||||
- $exists: The $exists operator matches documents where a specified field exists or does not exist.
|
||||
- $type: The $type operator matches documents where a specified field has a specific BSON type.
|
||||
- $regex: The $regex operator matches documents where a specified field matches a regular expression.
|
||||
- $text: The $text operator performs a text search on the specified field(s).
|
||||
- $elemMatch: The $elemMatch operator matches documents where a specified array field contains at least one element that matches all the specified conditions.
|
||||
- $size: The $size operator matches documents where a specified array field has a specific size.
|
||||
|
||||
**Example:**
|
||||
```js
|
||||
db.users.find(
|
||||
{
|
||||
$and: [
|
||||
{ status: "active" },
|
||||
{ age: { $gt: 28 } }
|
||||
]
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
### Update Modifiers
|
||||
- $set: The $set operator updates the value of a field in a document.
|
||||
- $unset: The $unset operator removes a field from a document.
|
||||
- $inc: The $inc operator increments the value of a field in a document.
|
||||
- $mul: The $mul operator multiplies the value of a field in a document.
|
||||
- $min: The $min operator updates the value of a field in a document if the new value is lower than the existing value.
|
||||
- $max: The $max operator updates the value of a field in a document if the new value is higher than the existing value.
|
||||
- $rename: The $rename operator renames a field in a document.
|
||||
- $addToSet: The $addToSet operator adds a value to an array field in a document if the value does not already exist in the array.
|
||||
- $push: The $push operator appends a value to an array field in a document.
|
||||
- $pull: The $pull operator removes a value from an array field in a document.
|
||||
- $currentDate: The $currentDate operator sets the value of a field in a document to the current date and time.
|
||||
- $each: The $each operator can be used with $addToSet and $push to append multiple values to an array field in a document.
|
||||
- $sort: The $sort operator can be used with $push to sort the elements in an array field in a document.
|
||||
- $addToSet: Add an element to an array
|
||||
|
||||
**Example:**
|
||||
```js
|
||||
db.users.updateOne(
|
||||
{ name: "John Doe" },
|
||||
{
|
||||
$set: { age: 35 },
|
||||
$addToSet: { interests: "Hiking" },
|
||||
$unset: { status: "" }
|
||||
}
|
||||
)
|
||||
```
|
134
technology/applications/OpenSSL.md
Normal file
134
technology/applications/OpenSSL.md
Normal file
|
@ -0,0 +1,134 @@
|
|||
---
|
||||
website:
|
||||
- https://www.openssl.org
|
||||
- https://www.libressl.org
|
||||
obj: application
|
||||
---
|
||||
|
||||
# OpenSSL
|
||||
OpenSSL is a [cryptography](../Cryptography/Cryptography.md) toolkit implementing the Secure Sockets Layer (SSL) and Transport Layer Security (TLS) network protocols and related [cryptography](../Cryptography/Cryptography.md) standards required by them.
|
||||
|
||||
The openssl program is a command line program for using the various [cryptography](../Cryptography/Cryptography.md) functions of OpenSSL's crypto library from the [shell](cli/Shell.md). It can be used for:
|
||||
- Creation and management of private keys, public keys and parameters
|
||||
- Public key cryptographic operations
|
||||
- Creation of X.509 certificates, CSRs and CRLs
|
||||
- Calculation of Message Digests and Message Authentication Codes
|
||||
- Encryption and Decryption with Ciphers
|
||||
- SSL/TLS Client and Server Tests
|
||||
- Handling of S/MIME signed or encrypted mail
|
||||
- Timestamp requests, generation and verification
|
||||
|
||||
## Usage
|
||||
```shell
|
||||
openssl [command] [options]
|
||||
```
|
||||
|
||||
### Certificates (`openssl req`, `openssl x509`)
|
||||
#### Generate a certificate
|
||||
Usage: `openssl req -x509 -key private_key.pem -out certificate.pem -days 365`
|
||||
|
||||
#refactor -> infos on signed certificates with CAs
|
||||
|
||||
#### Show information about a certificate
|
||||
Usage: `openssl x509 -in certificate.pem -text -noout`
|
||||
|
||||
### Digest (`openssl dgst`)
|
||||
Use digest (hash) functions. (Use `openssl dgst -list` for a list of all available digests)
|
||||
Usage: `openssl dgst [options] [file]`
|
||||
|
||||
#### Options
|
||||
| Option | Description |
|
||||
| ------------- | ----------------------------------- |
|
||||
| `-c` | Print digest with seperating colons |
|
||||
| `-r` | Print digest in coreutils format |
|
||||
| `-out <file>` | Output to filename |
|
||||
| `-hex` | Output as hex |
|
||||
| `-binary` | Output in binary |
|
||||
| `-<digest>` | Use \<digest> |
|
||||
|
||||
### Encryption (`openssl enc`)
|
||||
Encrypt and decrypt using ciphers (Use `openssl enc -ciphers` for a list of all available ciphers)
|
||||
Usage: `openssl enc [options]`
|
||||
|
||||
#### Options
|
||||
| Option | Description |
|
||||
| --------------- | ----------------------------------------------- |
|
||||
| `-e` | Do Encryption |
|
||||
| `-d` | Do Decryption |
|
||||
| `-<cipher>` | Use \<cipher> |
|
||||
| `-in <input>` | Input file |
|
||||
| `-k <val>` | Passphrase |
|
||||
| `-kfile <file>` | Read passphrase from file |
|
||||
| `-out <output>` | Output file |
|
||||
| `-a, -base64` | [Base64](../files/Base64.md) decode/encode data |
|
||||
| `-pbkdf2` | Use password-based key derivation function 2 |
|
||||
| `-iter <num>` | Change iterations of `-pbkdf2` |
|
||||
|
||||
### [RSA](../Cryptography/RSA.md) (`openssl genrsa`, `openssl rsa`, `openssl pkeyutl`)
|
||||
#### Generate [RSA](../Cryptography/RSA.md) Private Key (`openssl genrsa`)
|
||||
```shell
|
||||
openssl genrsa -out <keyfile> [-<cipher>] [-verbose] [-quiet] <numbits>
|
||||
```
|
||||
|
||||
The `-<cipher>` option lets you protect the key with a password using the specified cipher algo (See `openssl enc -ciphers` for a list of available ciphers).
|
||||
|
||||
#### Generate [RSA](../Cryptography/RSA.md) Public Key (`openssl rsa`)
|
||||
```shell
|
||||
openssl rsa -pubout -in <privatekey> [-passin file:<password_file>] -out <publickey>
|
||||
```
|
||||
|
||||
#### Working with [RSA](../Cryptography/RSA.md) (`openssl pkeyutl`)
|
||||
```shell
|
||||
# Sign with Private Key
|
||||
openssl pkeyutl -sign -in <input> -inkey <private_key> [-passin file:<password_file>] -out <output> [-digest algo]
|
||||
|
||||
# Verify with Public Key
|
||||
openssl pkeyutl -verify -in <input> -pubin -inkey <public_key> -sigfile <signature_file>
|
||||
|
||||
# Encrypt with Public Key
|
||||
openssl pkeyutl -encrypt -pubin -inkey <public_key> -in <input> -out <output>
|
||||
|
||||
# Decrypt with Private Key
|
||||
openssl pkeyutl -decrypt -inkey <private_key> [-passin file:<password_file>] -in <input> -out <output>
|
||||
```
|
||||
|
||||
### Password Hash (`openssl passwd`)
|
||||
Generate hashed passwords
|
||||
Usage: `openssl passwd [options] [password]`
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| ------------ | ------------------------------------------------ |
|
||||
| `-in infile` | Read passwords from file |
|
||||
| `-noverify` | Never verify when reading password from terminal |
|
||||
| `-stdin` | Read passwords from stdin |
|
||||
| `-salt val` | Use provided salt |
|
||||
| `-6` | SHA512-based password algorithm |
|
||||
| `-5` | SHA256-based password algorithm |
|
||||
| `-apr1` | MD5-based password algorithm, Apache variant |
|
||||
| `-1` | MD5-based password algorithm |
|
||||
| `-aixmd5` | AIX MD5-based password algorithm |
|
||||
|
||||
### Prime Numbers (`openssl prime`)
|
||||
Generate and verify prime numbers
|
||||
Usage: `openssl prime [options] [num]`
|
||||
|
||||
#### Options
|
||||
| Option | Description |
|
||||
| ------------ | ------------------------------------------------- |
|
||||
| `-bits +int` | Size of number in bits |
|
||||
| `-hex` | Hex output |
|
||||
| `-generate` | Generate a prime |
|
||||
| `-safe` | When used with `-generate`, generate a safe prime |
|
||||
|
||||
### Random Data (`openssl rand`)
|
||||
Generate random data.
|
||||
Usage: `openssl rand [options] num`
|
||||
|
||||
#### Options
|
||||
| Option | Description |
|
||||
| -------------- | ------------------------------------------------------- |
|
||||
| `-out outfile` | Output file |
|
||||
| `-base64` | [Base64](../files/Base64.md) encode output |
|
||||
| `-hex` | Hex encode output |
|
||||
| `-rand val` | Load the given file(s) into the random number generator |
|
130
technology/applications/SSH.md
Normal file
130
technology/applications/SSH.md
Normal file
|
@ -0,0 +1,130 @@
|
|||
---
|
||||
aliases:
|
||||
- OpenSSH
|
||||
website: https://www.openssh.com/
|
||||
obj: application
|
||||
repo: https://github.com/openssh/openssh-portable
|
||||
---
|
||||
# SSH
|
||||
#refactor add ssh suite applications, etc
|
||||
-> https://www.openssh.com/
|
||||
Secure Shell (SSH) is a cryptographic network protocol for operating network services securely over an unsecured network. Typical applications include remote command-line login and remote command execution, but any network service can be secured with SSH.
|
||||
|
||||
Examples of services that can use SSH are [Git](../dev/Git.md), [rsync](rsync.md) and X11 forwarding. Services that always use SSH are SCP and SFTP.
|
||||
|
||||
An SSH server, by default, listens on the standard TCP port 22. An SSH client program is typically used for establishing connections to an sshd daemon accepting remote connections. Both are commonly present on most modern operating systems, including [macOS](../macos/macOS.md), GNU/[Linux](../linux/Linux.md), Solaris and OpenVMS. Proprietary, freeware and open source versions of various levels of complexity and completeness exist.
|
||||
|
||||
## Client
|
||||
### Usage
|
||||
Connecting to a server
|
||||
```shell
|
||||
ssh -p port user@server-address
|
||||
```
|
||||
|
||||
Port forwarding:
|
||||
```shell
|
||||
# Forward Remote -> Local
|
||||
ssh -N -f -L local_port:127.0.0.1:remote_port host
|
||||
# Forward Local -> Remote
|
||||
ssh -N -f -R remote_port:127.0.0.1:local_port host
|
||||
```
|
||||
|
||||
Copying files (works with [rsync](cli/rsync.md) as well):
|
||||
```shell
|
||||
scp -r files remote:/path
|
||||
```
|
||||
|
||||
Copy ssh key to host:
|
||||
```shell
|
||||
ssh-copy-id user@remote
|
||||
```
|
||||
|
||||
Pipes work too over SSH:
|
||||
```shell
|
||||
ssh remote "cat /log" | grep denied
|
||||
cat ~/.ssh/id_rsa.pub | ssh remote 'cat >> .ssh/authorized_keys'
|
||||
```
|
||||
|
||||
Use a jump host:
|
||||
```shell
|
||||
ssh -J jump_server remote
|
||||
```
|
||||
|
||||
### Configuration
|
||||
Client can be configured by the file `~/.ssh/config`
|
||||
```
|
||||
# global options
|
||||
User user
|
||||
|
||||
# host-specific options
|
||||
Host myserver
|
||||
Hostname server-address
|
||||
Port port
|
||||
IdentityFile ~/.ssh/id_rsa
|
||||
ProxyJump host
|
||||
ProxyCommand corkscrew <proxy-host> <proxy-port> %h %p # HTTP Proxy
|
||||
```
|
||||
|
||||
With this configuration the client command can be redacted to
|
||||
```shell
|
||||
ssh myserver
|
||||
```
|
||||
|
||||
Corkscrew is a additional programm to tunnel SSH through HTTP proxies:
|
||||
```shell
|
||||
`ssh -o "ProxyCommand corkscrew <proxy-host> <proxy-port> %h %p" <ssh-username>@<ssh-server>`
|
||||
```
|
||||
|
||||
## Server
|
||||
`sshd` is the OpenSSH server daemon, configured with `/etc/ssh/sshd_config` and managed by `sshd.service`. Whenever changing the configuration, use `sshd` in test mode before restarting the service to ensure it will be able to start cleanly. Valid configurations produce no output.
|
||||
```shell
|
||||
sshd -t
|
||||
```
|
||||
|
||||
### Configuration
|
||||
Limit users:
|
||||
```
|
||||
AllowUsers user1 user2
|
||||
DenyUser user3 user4
|
||||
```
|
||||
|
||||
To allow access only for some groups:
|
||||
```
|
||||
AllowGroups group1 group2
|
||||
DenyGroups group3 group4
|
||||
```
|
||||
|
||||
Disable password authentification:
|
||||
```
|
||||
PasswordAuthentication no
|
||||
PermitEmptyPasswords no
|
||||
```
|
||||
|
||||
Disable root login:
|
||||
```
|
||||
PermitRootLogin no
|
||||
PermitRootLogin prohibit-password
|
||||
```
|
||||
|
||||
Allow port forwarding:
|
||||
```
|
||||
AllowTcpForwarding yes
|
||||
```
|
||||
|
||||
Allow only certain commands:
|
||||
```
|
||||
ForceCommand command
|
||||
```
|
||||
|
||||
Limit port forwarding:
|
||||
```
|
||||
PermitListen host:port
|
||||
PermitOpen host:port
|
||||
```
|
||||
|
||||
User-based settings (everything here only applies to `user1`):
|
||||
```
|
||||
Match User user1
|
||||
PasswordAuthentication no
|
||||
AllowTcpForwarding yes
|
||||
```
|
10
technology/applications/SurrealDB.md
Normal file
10
technology/applications/SurrealDB.md
Normal file
|
@ -0,0 +1,10 @@
|
|||
---
|
||||
obj: application
|
||||
os: linux
|
||||
website: https://surrealdb.com/
|
||||
repo: https://github.com/surrealdb/surrealdb
|
||||
---
|
||||
# SurrealDB
|
||||
#wip
|
||||
#🐇
|
||||
#notnow
|
198
technology/applications/Teleport.md
Normal file
198
technology/applications/Teleport.md
Normal file
|
@ -0,0 +1,198 @@
|
|||
---
|
||||
obj: application
|
||||
website: https://goteleport.com
|
||||
repo: https://github.com/gravitational/teleport
|
||||
---
|
||||
# Teleport
|
||||
Teleport provides connectivity, authentication, access controls and audit for infrastructure.
|
||||
|
||||
It includes an identity-aware access proxy, a CA that issues short-lived certificates, a unified access control system and a tunneling system to access resources behind the firewall.
|
||||
|
||||
Teleport understands the [SSH](SSH.md), HTTPS, RDP, Kubernetes API, MySQL, [MongoDB](MongoDB.md) and PostgreSQL wire protocols, plus many others. It can integrate with Single Sign-On providers and enables you to apply access policies using infrastructure-as-code and GitOps tools.
|
||||
|
||||
## Setup
|
||||
You need a [domain](../internet/Domain.md) pointing at your teleport proxy instance.
|
||||
|
||||
Docker-Compose:
|
||||
```yml
|
||||
version: '3'
|
||||
services:
|
||||
teleport:
|
||||
image: public.ecr.aws/gravitational/teleport:14
|
||||
restart: unless-stopped
|
||||
hostname: <yourdomain.com>
|
||||
ports:
|
||||
- "3080:3080" # Web UI
|
||||
- "3022:3022" # SSH
|
||||
- "8443:8443" # HTTPS
|
||||
volumes:
|
||||
- ./config/teleport.yaml:/etc/teleport/teleport.yaml
|
||||
- ./data:/var/lib/teleport
|
||||
```
|
||||
|
||||
teleport.yml:
|
||||
```yml
|
||||
version: v3
|
||||
teleport:
|
||||
nodename: <yourdomain.com>
|
||||
data_dir: /var/lib/teleport
|
||||
log:
|
||||
output: stderr
|
||||
severity: INFO
|
||||
format:
|
||||
output: text
|
||||
ca_pin: ""
|
||||
diag_addr: ""
|
||||
auth_service:
|
||||
enabled: "yes"
|
||||
listen_addr: 0.0.0.0:3025
|
||||
proxy_listener_mode: multiplex
|
||||
authentication:
|
||||
type: local
|
||||
second_factor: true
|
||||
webauthn:
|
||||
rp_id: <yourdomain.com>
|
||||
connector_name: passwordless
|
||||
ssh_service:
|
||||
enabled: "no"
|
||||
proxy_service:
|
||||
enabled: "yes"
|
||||
public_addr: <yourdomain.com>:443
|
||||
https_keypairs: []
|
||||
https_keypairs_reload_interval: 0s
|
||||
acme: {}
|
||||
```
|
||||
|
||||
## [SSH](SSH.md) Agent Setup
|
||||
1. Install teleport on your host:
|
||||
```shell
|
||||
curl https://goteleport.com/static/install.sh | bash -s 14.2.0
|
||||
```
|
||||
2. On your teleport proxy, create a join token:
|
||||
```shell
|
||||
tctl tokens add --type=node --format=text > token.file
|
||||
```
|
||||
3. Join the server to the cluster:
|
||||
```shell
|
||||
sudo teleport node configure \
|
||||
--output=file:///etc/teleport.yaml \
|
||||
--token=/path/to/token.file \
|
||||
--proxy=tele.example.com:443
|
||||
```
|
||||
|
||||
|
||||
## `tctl`
|
||||
Admin tool for the Teleport Access Platform
|
||||
Usage: `tctl [<flags>] <command> [<args> ...]`
|
||||
|
||||
### Commands
|
||||
#### users add
|
||||
Generate a user invitation token.
|
||||
Usage: `tctl users add --roles=ROLES [<flags>] <account>`
|
||||
|
||||
##### Options
|
||||
| Option | Description |
|
||||
| -------- | ------------------------------------------- |
|
||||
| --logins | List of allowed SSH logins for the new user |
|
||||
|
||||
#### users update
|
||||
Update user account.
|
||||
Usage: `tctl users update [<flags>] <account>`
|
||||
|
||||
##### Options
|
||||
| Option | Description |
|
||||
| -------------- | ---------------------------------------------------------------- |
|
||||
| `--set-roles` | List of roles for the user to assume, replaces current roles |
|
||||
| `--set-logins` | List of allowed SSH logins for the user, replaces current logins |
|
||||
|
||||
#### users ls
|
||||
Lists all user accounts.
|
||||
Usage: `tctl users ls`
|
||||
|
||||
#### users rm
|
||||
Deletes user accounts.
|
||||
Usage: `tctl users rm <logins>`
|
||||
|
||||
#### users reset
|
||||
Reset user password and generate a new token.
|
||||
Usage: `tctl users reset <account>`
|
||||
|
||||
#### nodes add
|
||||
Generate a node invitation token.
|
||||
Usage: `tctl nodes add [<flags>]`
|
||||
|
||||
##### Options
|
||||
| Option | Description |
|
||||
| --------- | -------------------------------------------------------- |
|
||||
| `--roles` | Comma-separated list of roles for the new node to assume |
|
||||
| `--ttl` | Time to live for a generated token |
|
||||
|
||||
#### nodes ls
|
||||
List all active SSH nodes within the cluster.
|
||||
Usage: `tctl nodes ls [<flags>] [<labels>]`
|
||||
|
||||
#### tokens add
|
||||
Create a invitation token.
|
||||
Usage: `tctl tokens add --type=TYPE [<flags>]`
|
||||
|
||||
##### Options
|
||||
| Option | Description |
|
||||
| ---------- | ------------------------------------------------------------ |
|
||||
| `--type` | Type(s) of token to add, e.g. `--type=node,app,db,proxy,etc` |
|
||||
| `--labels` | Set token labels, e.g. `env=prod,region=us-west` |
|
||||
| `--ttl` | Set expiration time for token, default is 30 minutes |
|
||||
| `--format` | Output format, 'text', 'json', or 'yaml' |
|
||||
|
||||
#### tokens rm
|
||||
Delete/revoke an invitation token.
|
||||
Usage: `tctl tokens rm [<token>]`
|
||||
|
||||
#### tokens ls
|
||||
List node and user invitation tokens.
|
||||
Usage: `tctl tokens ls`
|
||||
|
||||
#### status
|
||||
Report cluster status.
|
||||
Usage: `tctl status`
|
||||
|
||||
## `tsh`
|
||||
Teleport Command Line client for interacting with your infrastructure.
|
||||
Usage: `tsh [options...] <command> [<args> ...]`
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| --------- | --------------------------------------------- |
|
||||
| `--proxy` | Teleport proxy address |
|
||||
| `--user` | Teleport user, defaults to current local user |
|
||||
|
||||
### Commands
|
||||
#### ssh
|
||||
Run shell or execute a command on a remote SSH node.
|
||||
Usage: `tsh ssh [<flags>] <[user@]host> [<command>...]`
|
||||
|
||||
##### scp
|
||||
Transfer files to a remote SSH node.
|
||||
Usage: `tsh scp [<flags>] <from, to>...`
|
||||
|
||||
##### ls
|
||||
List remote SSH nodes.
|
||||
Usage: `tsh ls [<flags>] [<labels>]`
|
||||
|
||||
##### login
|
||||
Log in to a cluster and retrieve the session certificate.
|
||||
Usage: `tsh login [<flags>] [<cluster>]`
|
||||
|
||||
##### logout
|
||||
Delete a cluster certificate.
|
||||
Usage: `tsh logout`
|
||||
|
||||
##### status
|
||||
Display the list of proxy servers and retrieved certificates.
|
||||
Usage: `tsh status`
|
||||
|
||||
##### config
|
||||
Print [SSH](SSH.md) config details.
|
||||
This allows you to use regular `ssh` command to connect to teleport servers.
|
||||
```shell
|
||||
tsh config >> ~/.ssh/config
|
||||
```
|
19
technology/applications/backup/Vorta.md
Normal file
19
technology/applications/backup/Vorta.md
Normal file
|
@ -0,0 +1,19 @@
|
|||
---
|
||||
obj: application
|
||||
flatpak-id: com.borgbase.Vorta
|
||||
website: https://vorta.borgbase.com
|
||||
repo: https://github.com/borgbase/vorta
|
||||
---
|
||||
# Vorta
|
||||
Vorta is a graphical user interface (GUI) for the popular backup tool, [Borg](borg.md) Backup. It allows users to easily configure and manage backups of their data with a simple and intuitive interface.
|
||||
|
||||
## Features
|
||||
|
||||
Vorta includes a range of features to make backup management easy and accessible:
|
||||
- Backup configuration: Vorta allows users to configure backup jobs with a simple wizard interface. Users can specify the data to be backed up, the backup destination, and other backup settings.
|
||||
- Backup scheduling: Users can schedule backups to occur automatically at regular intervals or specific times.
|
||||
- Backup monitoring: Vorta provides real-time feedback on backup progress, allowing users to monitor the status of their backups.
|
||||
- Backup restoration: Vorta makes it easy to restore data from backups. Users can browse backups and select the data they want to restore.
|
||||
- Encryption and compression: [Borg](borg.md) Backup supports encryption and compression of backups, and Vorta makes it easy to configure these options.
|
||||
- Multiple backup repositories: Vorta allows users to manage multiple backup repositories and switch between them easily.
|
||||
- Backup reporting: Vorta provides detailed reports on backup history and status, allowing users to track changes over time.
|
42
technology/applications/backup/borg.md
Normal file
42
technology/applications/backup/borg.md
Normal file
|
@ -0,0 +1,42 @@
|
|||
---
|
||||
obj: application
|
||||
website: https://www.borgbackup.org/
|
||||
repo: https://github.com/borgbackup/borg
|
||||
---
|
||||
# Borg Backup
|
||||
#refactor add options + actions
|
||||
BorgBackup (short: Borg) is a deduplicating backup program. Optionally, it supports compression and authenticated encryption.
|
||||
|
||||
The main goal of Borg is to provide an efficient and secure way to backup data. The data deduplication technique used makes Borg suitable for daily backups since only changes are stored. The authenticated encryption technique makes it suitable for backups to not fully trusted targets.
|
||||
|
||||
## Usage
|
||||
Create a new repo:
|
||||
```shell
|
||||
borg init -e repokey /path/to/repo
|
||||
```
|
||||
|
||||
Create a backup:
|
||||
```shell
|
||||
borg create -v --progress --stats /path/to/repo::ARCHIVE_NAME ~/Documents
|
||||
```
|
||||
|
||||
List contents:
|
||||
```shell
|
||||
borg list /path/to/repo # List all archives
|
||||
borg list /path/to/repo::ARCHIVE # List content of Archive
|
||||
```
|
||||
|
||||
Extract archive:
|
||||
```shell
|
||||
borg extract /path/to/repo::ARCHIVE
|
||||
```
|
||||
|
||||
Delete archive:
|
||||
```shell
|
||||
borg delete /path/to/repo::ARCHIVE
|
||||
```
|
||||
|
||||
Garbage Collect Repo:
|
||||
```shell
|
||||
borg compact /path/to/repo
|
||||
```
|
34
technology/applications/clamav.md
Normal file
34
technology/applications/clamav.md
Normal file
|
@ -0,0 +1,34 @@
|
|||
---
|
||||
obj: application
|
||||
os: linux
|
||||
arch-wiki: https://wiki.archlinux.org/title/ClamAV
|
||||
website: https://www.clamav.net/
|
||||
---
|
||||
#refactor
|
||||
# ClamAV
|
||||
[Arch Wiki](https://wiki.archlinux.org/title/ClamAV)
|
||||
Clam AntiVirus is an open source (GPL) anti-virus toolkit for UNIX. It provides a number of utilities including a flexible and scalable multi-threaded daemon, a command line scanner and advanced tool for automatic database updates. Because ClamAV's main use is on file/mail servers for [Windows](../windows/Windows.md) desktops, it primarily detects [Windows](../windows/Windows.md) viruses and malware with its built-in signatures.
|
||||
|
||||
## Usage
|
||||
### Updating database
|
||||
Update the virus definitions with:
|
||||
```shell
|
||||
freshclam
|
||||
```
|
||||
|
||||
The database files are saved in:
|
||||
```
|
||||
/var/lib/clamav/daily.cvd
|
||||
/var/lib/clamav/main.cvd
|
||||
/var/lib/clamav/bytecode.cvd
|
||||
```
|
||||
|
||||
Start/Enable`clamav-freshclam.service` so that the virus definitions are kept recent.
|
||||
|
||||
### Starting the daemon
|
||||
|
||||
**Note:**
|
||||
- You will need to run `freshclam` before starting the service for the first time or you will run into trouble/errors which will prevent ClamAV from starting correctly.
|
||||
- The daemon is not needed if you only want to perform stand-alone scans. See [Scan for viruses](https://wiki.archlinux.org/title/ClamAV#Scan_for_viruses) below.
|
||||
|
||||
The service is called `clamav-daemon.service`.
|
629
technology/applications/cli/Core Utils.md
Normal file
629
technology/applications/cli/Core Utils.md
Normal file
|
@ -0,0 +1,629 @@
|
|||
---
|
||||
obj: application
|
||||
website: https://www.gnu.org/software/coreutils/
|
||||
repo: https://git.savannah.gnu.org/git/coreutils.git
|
||||
wiki: https://en.wikipedia.org/wiki/GNU_Core_Utilities
|
||||
---
|
||||
# GNU Core Utils
|
||||
The GNU Core Utilities or coreutils is a package of GNU software containing implementations for many of the basic tools, such as cat, ls, and rm, which are used on Unix-like operating systems.
|
||||
|
||||
## base64
|
||||
[base64](../../files/Base64.md) encode/decode data and print to standard output
|
||||
|
||||
Usage: `base64 [OPTION]... [FILE]`
|
||||
### Flags
|
||||
| Flag | Description |
|
||||
| ---------------------- | --------------------------------------------- |
|
||||
| `-d, --decode` | Decode Input |
|
||||
| `-i, --ignore-garbage` | when decoding, ignore non-alphabet characters |
|
||||
|
||||
## basename
|
||||
strip directory and suffix from filenames
|
||||
Usage: `basename NAME`
|
||||
|
||||
## cat
|
||||
concatenate files and print on the standard output
|
||||
Usage: `cat [OPTION]... [FILE]...`
|
||||
|
||||
### Flags
|
||||
| Flag | Description |
|
||||
| ----------------------- | -------------------------------------------- |
|
||||
| `-b, --number-nonblank` | number nonempty output lines, overrides `-n` |
|
||||
| `-n, --number` | number all output lines |
|
||||
|
||||
## chmod
|
||||
change file mode (permissions) bits
|
||||
Usage: `chmod [OPTION]... OCTAL-MODE FILE...`
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| ----------------------- | ----------------------------------------------------------------------------------------------------- |
|
||||
| `-c, --changes` | like verbose but report only when a change is made |
|
||||
| `-f, --silent, --quiet` | suppress most error messages |
|
||||
| `-v, --verbose` | output a diagnostic for every file processed |
|
||||
| `--reference=RFILE` | use RFILE's mode instead of specifying MODE values. RFILE is always dereferenced if a symbolic link. |
|
||||
| `-R, --recursive` | change files and directories recursively | |
|
||||
|
||||
## chown
|
||||
change file owner and group
|
||||
Usage: `chown [OPTION]... [OWNER][:[GROUP]] FILE...`
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| ----------------------- | ----------------------------------------------------------------------------------------------------- |
|
||||
| `-c, --changes` | like verbose but report only when a change is made |
|
||||
| `-f, --silent, --quiet` | suppress most error messages |
|
||||
| `-v, --verbose` | output a diagnostic for every file processed |
|
||||
| `--reference=RFILE` | use RFILE's owner and group rather than specifying OWNER:GROUP values. RFILE is always dereferenced. |
|
||||
| `-R, --recursive` | operate on files and directories recursively |
|
||||
|
||||
## chroot
|
||||
run command or interactive shell with special root directory
|
||||
Usage: `chroot [OPTION] NEWROOT [COMMAND [ARG]...]`
|
||||
|
||||
## cksum
|
||||
compute and verify file checksums
|
||||
Usage: `cksum [OPTION]... [FILE]...`
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| ---------------------- | ----------------------------- |
|
||||
| `-a, --algorithm=TYPE` | select the digest type to use |
|
||||
|
||||
### Digests
|
||||
- crc
|
||||
- md5
|
||||
- sha1
|
||||
- sha224
|
||||
- sha256
|
||||
- sha384
|
||||
- sha512
|
||||
- blake2b
|
||||
|
||||
## cp
|
||||
copy files and directories
|
||||
Usage: `cp [OPTION]... SOURCE DEST`
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| ---------------------------------- | --------------------------------------------------------------------------- |
|
||||
| `-l, --link` | hard link files instead of copying |
|
||||
| `-R, -r, --recursive` | copy directories recursively |
|
||||
| `-s, --symbolic-link` | make symbolic links instead of copying |
|
||||
| `-S, --suffix=SUFFIX` | override the usual backup suffix |
|
||||
| `-t, --target-directory=DIRECTORY` | copy all SOURCE arguments into DIRECTORY |
|
||||
| `--update[=UPDATE]` | control which existing files are updated; UPDATE={all,none,older(default)}. |
|
||||
| `-u` | equivalent to `--update[=older]` |
|
||||
| `-v, --verbose` | explain what is being done |
|
||||
| `-x, --one-file-system` | stay on this file system |
|
||||
| `-b` | Create a backup file if destination already exists |
|
||||
|
||||
## cut
|
||||
remove sections from each line of files
|
||||
Usage: `cut OPTION... [FILE]...`
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| --------------------------- | ------------------------------------------------------------------------------------------------------------------------ |
|
||||
| `-d, --delimiter=DELIM` | use DELIM instead of TAB for field delimiter |
|
||||
| `-f, --fields=LIST` | select only these fields; also print any line that contains no delimiter character, unless the `-s` option is specified |
|
||||
| `-s, --only-delimited` | do not print lines not containing delimiters |
|
||||
| `--output-delimiter=STRING` | use STRING as the output delimiter the default is to use the input delimiter |
|
||||
|
||||
## date
|
||||
print or set the system date and time
|
||||
Usage: `date [OPTION]... [+FORMAT]`
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| ---------------------- | ------------------------------------------ |
|
||||
| `-d, --date=STRING` | display time described by STRING |
|
||||
| `-r, --reference=FILE` | display the last modification time of FILE |
|
||||
| `-s, --set=STRING` | set time described by STRING |
|
||||
|
||||
## dd
|
||||
convert and copy a file
|
||||
Usage: `dd [OPERAND]...`
|
||||
|
||||
### Operands
|
||||
| Operand | Description |
|
||||
| -------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `bs=BYTES` | read and write up to BYTES bytes at a time (default: 512) |
|
||||
| `count=N` | copy only N input blocks |
|
||||
| `if=FILE` | read from FILE instead of stdin |
|
||||
| `of=FILE` | write to FILE instead of stdout |
|
||||
| `status=LEVEL` | The LEVEL of information to print to stderr; 'none' suppresses everything but error messages, 'noxfer' suppresses the final transfer statistics, 'progress' shows periodic transfer statistics |
|
||||
|
||||
## df
|
||||
report file system space usage
|
||||
Usage: `df [OPTION]... [FILE]...`
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| ---------------------- | ---------------------------------------------------- |
|
||||
| `-a, --all` | include pseudo, duplicate, inaccessible file systems |
|
||||
| `-h, --human-readable` | print sizes in powers of 1024 (e.g., 1023M) |
|
||||
| `-H, --si` | print sizes in powers of 1000 (e.g., 1.1G) |
|
||||
| `-i, --inodes` | list inode information instead of block usage |
|
||||
|
||||
## dirname
|
||||
strip last component from file name
|
||||
Usage: `dirname [OPTION] NAME...`
|
||||
|
||||
## du
|
||||
estimate file space usage
|
||||
Usage: `du [OPTION]... [FILE]...`
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| ---------------------- | -------------------------------------- |
|
||||
| `-h, --human-readable` | print sizes in human readable format |
|
||||
| `-s, --summarize` | display only a total for each argument |
|
||||
|
||||
## echo
|
||||
display a line of text
|
||||
Usage: `echo [OPTION]... [STRING]...`
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| ------ | ----------------------------------------------------- |
|
||||
| `-n` | do not output the trailing newline |
|
||||
| `-e` | enable interpretation of backslash escapes |
|
||||
| `-E` | disable interpretation of backslash escapes (default) |
|
||||
|
||||
### Backslash Interpretations
|
||||
| Sequence | Meaning |
|
||||
| -------- | ----------------------------------------- |
|
||||
| \\ | backslash |
|
||||
| \a | alert (BEL) |
|
||||
| \b | backspace |
|
||||
| \c | produce no further output |
|
||||
| \e | escape |
|
||||
| \f | form feed |
|
||||
| \n | new line |
|
||||
| \r | carriage return |
|
||||
| \t | horizontal tab |
|
||||
| \v | vertical tab |
|
||||
| \0NNN | byte with octal value NNN (1 to 3 digits) |
|
||||
| \xHH | byte with hexadecimal value HH (1 to 2) |
|
||||
|
||||
## env
|
||||
run a program in a modified environment
|
||||
Print [Environment Variables](../../linux/Environment%20Variables.md) with only `env`
|
||||
Usage: `env [OPTION]... [-] [NAME=VALUE]... [COMMAND [ARG]...]`
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| -------------------------- | -------------------------------------------------- |
|
||||
| `-i, --ignore-environment` | start with an empty environment |
|
||||
| `-u, --unset=NAME` | remove variable from the environment |
|
||||
| `-C, --chdir=DIR` | change working directory to DIR |
|
||||
| `--block-signal[=SIG]` | block delivery of SIG signal(s) to COMMAND |
|
||||
| `--ignore-signal[=SIG]` | set handling of SIG signal(s) to do nothing |
|
||||
| `-v, --debug` | print verbose information for each processing step |
|
||||
|
||||
## false
|
||||
do nothing, unsuccessfully
|
||||
Usage: `false`
|
||||
|
||||
## head
|
||||
output the first part of files
|
||||
Usage: `head [OPTION]... [FILE]...`
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| ----------------------- | ---------------------------------------------------------------------------------------------------------------------- |
|
||||
| `-c, --bytes=[-]NUM` | print the first NUM bytes of each file; with the leading '-', print all but the last NUM bytes of each file |
|
||||
| `-n, --lines=[-]NUM` | print the first NUM lines instead of the first 10; with the leading '-', print all but the last NUM lines of each file |
|
||||
| `-q, --quiet, --silent` | never print headers giving file names |
|
||||
|
||||
## id
|
||||
print real and effective user and group IDs
|
||||
Usage: `id [OPTION]... [USER]...`
|
||||
|
||||
### Option
|
||||
| Option | Description |
|
||||
| -------------- | -------------------------------------------- |
|
||||
| `-g, --group` | print only the effective group ID |
|
||||
| `-G, --groups` | print all group IDs |
|
||||
| `-n, --name` | print a name instead of a number, for `-ugG` |
|
||||
| `-u, --user` | print only the effective user ID |
|
||||
|
||||
## install
|
||||
copy files and set attributes
|
||||
Usage: `install [OPTION]... SOURCE... DIRECTORY`
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| ---------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `-b` | make a backup of each existing destination file |
|
||||
| `-S, --suffix=SUFFIX` | override the usual backup suffix |
|
||||
| `-C, --compare` | compare content of source and destination files, and if no change to content, ownership, and permissions, do not modify the destination at all |
|
||||
| `-d, --directory` | treat all arguments as directory names; create all components of the specified directories |
|
||||
| `-g, --group=GROUP` | set group ownership, instead of process' current group |
|
||||
| `-m, --mode=MODE` | set permission mode (as in chmod), instead of rwxr-xr-x |
|
||||
| `-o, --owner=OWNER` | set ownership (super-user only) |
|
||||
| `-p, --preserve-timestamps` | apply access/modification times of SOURCE files to corresponding destination files |
|
||||
| `-s, --strip` | strip symbol tables |
|
||||
| `-t, --target-directory=DIRECTORY` | copy all SOURCE arguments into DIRECTORY |
|
||||
| `-v, --verbose` | print the name of each created file or directory |
|
||||
|
||||
## kill
|
||||
terminate a process
|
||||
Usage: `kill [-signal|-s signal|-p] [-q value] [-a] [--timeout milliseconds signal] [--] pid|name...`
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| --------------------- | -------------------------------------------------------------------------------- |
|
||||
| `-s, --signal signal` | The signal to send. It may be given as a name or a number. |
|
||||
| `-p, --pid` | Only print the process ID (PID) of the named processes, do not send any signals. |
|
||||
| `--verbose` | Print PID(s) that will be signaled with kill along with the signal. |
|
||||
|
||||
## ln
|
||||
make links between files
|
||||
Usage: `ln [OPTION]... TARGET LINK_NAME`
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| ---------------------------------- | -------------------------------------------------- |
|
||||
| `-s, --symbolic` | make symbolic links instead of hard links |
|
||||
| `-t, --target-directory=DIRECTORY` | specify the DIRECTORY in which to create the links |
|
||||
| `-v, --verbose` | print name of each linked file |
|
||||
|
||||
## ls
|
||||
list directory contents
|
||||
Usage: `ls [OPTION]... [FILE]...`
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| ---------------------- | --------------------------------------------------- |
|
||||
| `-a, --all` | do not ignore entries starting with . |
|
||||
| `-h, --human-readable` | with `-l` print sizes like 1K 234M 2G etc |
|
||||
| `-I, --ignore=PATTERN` | do not list implied entries matching shell PATTERN |
|
||||
| `-l` | use a long listing format |
|
||||
| `-r, --reverse` | reverse order while sorting |
|
||||
| `-R, --recursive` | list subdirectories recursively |
|
||||
|
||||
## mkdir
|
||||
make directories
|
||||
Usage: `mkdir [OPTION]... DIRECTORY...`
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| ----------------- | ----------------------------------------------------------------------------------------------------------- |
|
||||
| `-m, --mode=MODE` | set file mode (as in chmod), not a=rwx - umask |
|
||||
| `-p, --parents` | no error if existing, make parent directories as needed, with their file modes unaffected by any -m option. |
|
||||
| `-v, --verbose` | print a message for each created directory |
|
||||
|
||||
## mkfifo
|
||||
make FIFOs (named pipes)
|
||||
Usage: `mkfifo [OPTION]... NAME...`
|
||||
|
||||
## mv
|
||||
move (rename) files
|
||||
Usage: `mv [OPTION]... SOURCE... DIRECTORY`
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| ---------------------------------- | ------------------------------------------------ |
|
||||
| `-b` | make a backup of each existing destination file |
|
||||
| `-S, --suffix=SUFFIX` | override the usual backup suffix |
|
||||
| `-t, --target-directory=DIRECTORY` | copy all SOURCE arguments into DIRECTORY |
|
||||
| `-v, --verbose` | print the name of each created file or directory |
|
||||
|
||||
## nl
|
||||
number lines of files
|
||||
Usage: `nl [FILE]...`
|
||||
|
||||
## nproc
|
||||
print the number of processing units available
|
||||
Usage: `nproc`
|
||||
|
||||
## pwd
|
||||
print name of current/working directory
|
||||
Usage: `pwd`
|
||||
|
||||
## readlink
|
||||
print resolved symbolic links or canonical file names
|
||||
Usage: `readlink [OPTION]... FILE...`
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| ----------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `-f, --canonicalize` | canonicalize by following every symlink in every component of the given name recursively; all but the last component must exist |
|
||||
| `-e, --canonicalize-existing` | canonicalize by following every symlink in every component of the given name recursively, all components must exist |
|
||||
| `-m, --canonicalize-missing` | canonicalize by following every symlink in every component of the given name recursively, without requirements on components existence |
|
||||
| `-n, --no-newline` | do not output the trailing delimiter |
|
||||
|
||||
## realpath
|
||||
print the resolved path
|
||||
Usage: `realpath [OPTION]... FILE...`
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| ----------------------------- | ----------------------------------------------- |
|
||||
| `-e, --canonicalize-existing` | all components of the path must exist |
|
||||
| `-m, --canonicalize-missing` | no path components need exist or be a directory |
|
||||
| `-L, --logical` | resolve '..' components before symlinks |
|
||||
| `--relative-to=DIR` | print the resolved path relative to DIR |
|
||||
| `--relative-base=DIR` | print absolute paths unless paths below DIR |
|
||||
| `-s, --strip, --no-symlinks` | don't expand symlinks |
|
||||
|
||||
## rm
|
||||
remove files or directories
|
||||
Usage: `rm [OPTION]... [FILE]...`
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| --------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `-f, --force` | ignore nonexistent files and arguments, never prompt |
|
||||
| `-i` | prompt before every removal |
|
||||
| `--one-file-system` | when removing a hierarchy recursively, skip any directory that is on a file system different from that of the corresponding command line argument |
|
||||
| `-r, -R, --recursive` | remove directories and their contents recursively |
|
||||
| `-d, --dir` | remove empty directories |
|
||||
| `-v, --verbose` | explain what is being done |
|
||||
|
||||
## rmdir
|
||||
remove empty directories
|
||||
Usage: `rmdir [OPTION]... DIRECTORY...`
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| --------------- | ------------------------------------------------------------------------------------ |
|
||||
| `-p, --parents` | remove DIRECTORY and its ancestors; e.g., 'rmdir -p a/b' is similar to 'rmdir a/b a' |
|
||||
| `-v, --verbose` | output a diagnostic for every directory processed |
|
||||
|
||||
## seq
|
||||
print a sequence of numbers
|
||||
Usage:
|
||||
- `seq [OPTION]... LAST`
|
||||
- `seq [OPTION]... FIRST LAST`
|
||||
- `seq [OPTION]... FIRST INCREMENT LAST`
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| ------------------------ | --------------------------------------------- |
|
||||
| `-f, --format=FORMAT` | use printf style floating-point FORMAT |
|
||||
| `-s, --separator=STRING` | use STRING to separate numbers (default: \n) |
|
||||
| `-w, --equal-width` | equalize width by padding with leading zeroes |
|
||||
|
||||
## shred
|
||||
overwrite a file to hide its contents, and optionally delete it
|
||||
Usage: `shred [OPTION]... FILE...`
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| ---------------------- | ------------------------------------------------ |
|
||||
| `-f, --force` | change permissions to allow writing if necessary |
|
||||
| `-n, --iterations=N` | overwrite N times instead of the default (3) |
|
||||
| `--random-source=FILE` | get random bytes from FILE |
|
||||
|
||||
## shuf
|
||||
generate random permutations
|
||||
Usage: `shuf [OPTION]... [FILE]`
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| ------------------------- | ------------------------------------------------ |
|
||||
| `-i, --input-range=LO-HI` | treat each number LO through HI as an input line |
|
||||
| `-n, --head-count=COUNT` | output at most COUNT lines |
|
||||
| `-o, --output=FILE` | write result to FILE instead of standard output |
|
||||
| `--random-source=FILE` | get random bytes from FILE |
|
||||
| `-r, --repeat` | output lines can be repeated |
|
||||
|
||||
## sleep
|
||||
delay for a specified amount of time
|
||||
Usage: `sleep NUMBER[SUFFIX]...`
|
||||
|
||||
## sort
|
||||
sort lines of text files
|
||||
Usage: `sort [OPTION]... [FILE]...`
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| ----------------------------- | ------------------------------------------------ |
|
||||
| `-b, --ignore-leading-blanks` | ignore leading blanks |
|
||||
| `-d, --dictionary-order` | consider only blanks and alphanumeric characters |
|
||||
| `-f, --ignore-case` | fold lower case to upper case characters |
|
||||
| `-g, --general-numeric-sort` | compare according to general numerical value |
|
||||
| `-i, --ignore-nonprinting` | consider only printable characters |
|
||||
| `-M, --month-sort` | compare (unknown) < 'JAN' < ... < 'DEC' |
|
||||
| `-h, --human-numeric-sort` | compare human readable numbers (e.g., 2K 1G) |
|
||||
| `-n, --numeric-sort` | compare according to string numerical value |
|
||||
| `-r, --reverse` | reverse the result of comparisons |
|
||||
|
||||
## split
|
||||
split a file into pieces
|
||||
Usage: `split [OPTION]... [FILE [PREFIX]]`
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| ---------------------------- | --------------------------------------------------------- |
|
||||
| `-a, --suffix-length=N` | generate suffixes of length N (default 2) |
|
||||
| `--additional-suffix=SUFFIX` | append an additional SUFFIX to file names |
|
||||
| `-b, --bytes=SIZE` | put SIZE bytes per output file |
|
||||
| `-C, --line-bytes=SIZE` | put at most SIZE bytes of records per output file |
|
||||
| `-d` | use numeric suffixes starting at 0, not alphabetic |
|
||||
| `--numeric-suffixes[=FROM]` | same as `-d`, but allow setting the start value |
|
||||
| `-x` | use hex suffixes starting at 0, not alphabetic |
|
||||
| `--hex-suffixes[=FROM]` | same as `-x`, but allow setting the start value |
|
||||
| `-l, --lines=NUMBER` | put NUMBER lines/records per output file |
|
||||
| `-n, --number=CHUNKS` | generate CHUNKS output files; see explanation below |
|
||||
| `--verbose` | print a diagnostic just before each output file is opened |
|
||||
|
||||
## stat
|
||||
display file or file system status
|
||||
Usage: `stat [OPTION]... FILE...`
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| --------------------- | ------------------------------------------------------------------------------------------ |
|
||||
| `-L, --dereference` | follow links |
|
||||
| `-f, --file-system` | display file system status instead of file status |
|
||||
| `-c --format=FORMAT` | use the specified FORMAT instead of the default; output a newline after each use of FORMAT |
|
||||
|
||||
### Format Modifiers
|
||||
| Format | Description |
|
||||
| ------ | ---------------------------------------------------------------------- |
|
||||
| %a | permission bits in octal (note '#' and '0' printf flags) |
|
||||
| %A | permission bits and file type in human readable form |
|
||||
| %b | number of blocks allocated (see %B) |
|
||||
| %B | the size in bytes of each block reported by %b |
|
||||
| %C | SELinux security context string |
|
||||
| %d | device number in decimal (st_dev) |
|
||||
| %D | device number in hex (st_dev) |
|
||||
| %Hd | major device number in decimal |
|
||||
| %Ld | minor device number in decimal |
|
||||
| %f | raw mode in hex |
|
||||
| %F | file type |
|
||||
| %g | group ID of owner |
|
||||
| %G | group name of owner |
|
||||
| %h | number of hard links |
|
||||
| %i | inode number |
|
||||
| %m | mount point |
|
||||
| %n | file name |
|
||||
| %N | quoted file name with dereference if symbolic link |
|
||||
| %o | optimal I/O transfer size hint |
|
||||
| %s | total size, in bytes |
|
||||
| %r | device type in decimal (st_rdev) |
|
||||
| %R | device type in hex (st_rdev) |
|
||||
| %Hr | major device type in decimal, for character/block device special files |
|
||||
| %Lr | minor device type in decimal, for character/block device special files |
|
||||
| %t | major device type in hex, for character/block device special files |
|
||||
| %T | minor device type in hex, for character/block device special files |
|
||||
| %u | user ID of owner |
|
||||
| %U | user name of owner |
|
||||
| %w | time of file birth, human-readable; - if unknown |
|
||||
| %W | time of file birth, seconds since Epoch; 0 if unknown |
|
||||
| %x | time of last access, human-readable |
|
||||
| %X | time of last access, seconds since Epoch |
|
||||
| %y | time of last data modification, human-readable |
|
||||
| %Y | time of last data modification, seconds since Epoch |
|
||||
| %z | time of last status change, human-readable |
|
||||
| %Z | time of last status change, seconds since Epoch |
|
||||
| %a | free blocks available to non-superuser |
|
||||
| %b | total data blocks in file system |
|
||||
| %c | total file nodes in file system |
|
||||
| %d | free file nodes in file system |
|
||||
| %f | free blocks in file system |
|
||||
| %i | file system ID in hex |
|
||||
| %l | maximum length of filenames |
|
||||
| %n | file name |
|
||||
| %s | block size (for faster transfers) |
|
||||
| %S | fundamental block size (for block counts) |
|
||||
| %t | file system type in hex |
|
||||
| %T | file system type in human readable form |
|
||||
|
||||
## tac
|
||||
concatenate and print files in reverse
|
||||
Usage: `tac [FILE]...`
|
||||
|
||||
## tail
|
||||
output the last part of files
|
||||
Usage: `tail [OPTION]... [FILE]...`
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| -------------------- | ---------------------------------------------------------------------------------------------------- |
|
||||
| `-c, --bytes=[+]NUM` | output the last NUM bytes; or use `-c` +NUM to output starting with byte NUM of each file |
|
||||
| `-f` | output appended data as the file grows; |
|
||||
| `-n, --lines=[+]NUM` | output the last NUM lines, instead of the last 10; or use `-n` +NUM to skip NUM-1 lines at the start |
|
||||
| `--pid=PID` | with `-f`, terminate after process ID, PID dies |
|
||||
|
||||
## tee
|
||||
read from standard input and write to standard output and files
|
||||
Usage: `tee [OPTION]... [FILE]...`
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| -------------- | ------------------------------------------- |
|
||||
| `-a, --append` | append to the given FILEs, do not overwrite |
|
||||
|
||||
## touch
|
||||
change file timestamps
|
||||
Usage: `touch [OPTION]... FILE...`
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| ---------------------- | ----------------------------------------------- |
|
||||
| `-a` | change only the access time |
|
||||
| `-c, --no-create` | do not create any files |
|
||||
| `-d, --date=STRING` | parse STRING and use it instead of current time |
|
||||
| `-r, --reference=FILE` | use this file's times instead of current time |
|
||||
|
||||
## tr
|
||||
translate or delete characters
|
||||
Usage: `tr [OPTION]... STRING1 [STRING2]`
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| -------------- | ---------------------------------------------- |
|
||||
| `-d, --delete` | delete characters in STRING1, do not translate |
|
||||
|
||||
## true
|
||||
do nothing, successfully
|
||||
Usage: `true`
|
||||
|
||||
## truncate
|
||||
shrink or extend the size of a file to the specified size
|
||||
Usage: `truncate OPTION... FILE...`
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| ----------------------- | ----------------------------------------- |
|
||||
| `-c, --no-create` | do not create any files |
|
||||
| `-r, --reference=RFILE` | base size on RFILE |
|
||||
| `-s, --size=SIZE` | set or adjust the file size by SIZE bytes |
|
||||
|
||||
## uname
|
||||
print system information
|
||||
Usage: `uname [OPTION]...`
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| ---------------------- | ------------------------------- |
|
||||
| `-a, --all` | print all information |
|
||||
| `-s, --kernel-name` | print the kernel name |
|
||||
| `-n, --nodename` | print the network node hostname |
|
||||
| `-r, --kernel-release` | print the kernel release |
|
||||
| `-v, --kernel-version` | print the kernel version |
|
||||
| `-m, --machine` | print the machine hardware name |
|
||||
|
||||
## uniq
|
||||
report or omit repeated lines
|
||||
Usage: `uniq [OPTION]... [INPUT [OUTPUT]]`
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| -------------------- | ---------------------------------------------- |
|
||||
| `-c, --count` | prefix lines by the number of occurrences |
|
||||
| `-d, --repeated` | only print duplicate lines, one for each group |
|
||||
| `-i, --ignore-case` | ignore differences in case when comparing |
|
||||
| `-s, --skip-chars=N` | avoid comparing the first N characters |
|
||||
| `-u, --unique` | only print unique lines |
|
||||
|
||||
## uptime
|
||||
Tell how long the system has been running
|
||||
Usage: `uptime [options]`
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| -------------- | ---------------------------------------------- |
|
||||
| `-p, --pretty` | show uptime in pretty format |
|
||||
| `-s, --since` | system up since, in yyyy-mm-dd HH:MM:SS format |
|
||||
|
||||
## wc
|
||||
print newline, word, and byte counts for each file
|
||||
Usage: `wc [OPTION]... [FILE]...`
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| ------------- | -------------------------- |
|
||||
| `-c, --bytes` | print the byte counts |
|
||||
| `-m, --chars` | print the character counts |
|
||||
| `-l, --lines` | print the newline counts |
|
||||
| `-w, --words` | print the word counts |
|
||||
|
||||
## whoami
|
||||
print effective user name
|
||||
Usage: `whoami`
|
27
technology/applications/cli/Loop.md
Normal file
27
technology/applications/cli/Loop.md
Normal file
|
@ -0,0 +1,27 @@
|
|||
---
|
||||
obj: application
|
||||
os: linux
|
||||
repo: https://github.com/Miserlou/Loop
|
||||
---
|
||||
# Loop
|
||||
[Repo](https://github.com/Miserlou/Loop)
|
||||
Loops in bash are surprisingly complicated and fickle! I wanted a simple and intuitive way to write controllable loops that:
|
||||
|
||||
- Run on controllable **timers**!
|
||||
- `$ loop --every 10s -- ls`
|
||||
- Have **custom counters**!
|
||||
- `$ loop --count-by 5 -- 'touch $COUNT.txt'`
|
||||
- Loop **until output matches** a condition!
|
||||
- `$ loop --until-contains 200 -- ./get_response_code.sh --site mysite.biz`
|
||||
- Loop **until a certain time**!
|
||||
- `$ loop --for-duration 8h -- ./poke_server`
|
||||
- Loop **until a program succeeds** (or fails!)
|
||||
- `$ loop --until-success -- ./poke_server`
|
||||
- Iterate over the **standard input**!
|
||||
- `$ cat files_to_create.txt | loop -- 'touch $ITEM'`
|
||||
- Get a **summary** of the runs!
|
||||
- `$ loop --for-duration 10min --summary -- ls`
|
||||
- Run until output **changes or stays the same** between invocations!
|
||||
- `$ loop --until-changes -- date +%s`
|
||||
- `$ loop --until-same -- date +%s`
|
||||
- ..and **much more!**
|
339
technology/applications/cli/Shell.md
Normal file
339
technology/applications/cli/Shell.md
Normal file
|
@ -0,0 +1,339 @@
|
|||
---
|
||||
obj: concept
|
||||
arch-wiki: https://wiki.archlinux.org/title/Command-line_shell
|
||||
wiki: https://en.wikipedia.org/wiki/Unix_shell
|
||||
---
|
||||
# Shell
|
||||
The shell is a command-line interpreter that provides a user interface to an operating system's services. It allows users to interact with the system through text-based commands and scripts. Shell scripting refers to writing a series of commands in a script file to automate tasks and perform complex operations.
|
||||
|
||||
The shell makes heavy use of [Environment Variables](../../linux/Environment%20Variables.md) for storing settings and configuration.
|
||||
|
||||
## Usage
|
||||
You can enter commands to be executed in the shell.
|
||||
```shell
|
||||
command
|
||||
```
|
||||
|
||||
Commands can be either a script, a binary or anything that can be executed. If you don't provide a full path to the file you want to run the shell will search in the locations defined in the `$PATH` environment variable.
|
||||
|
||||
### Arguments
|
||||
Everything after the command will be provided to the command as arguments. Each argument is separated by a space character. To avoid that you could quote an argument or escape the space character.
|
||||
|
||||
```shell
|
||||
command argument1 argument2 argument3
|
||||
|
||||
# These two have identical arguments
|
||||
command "argument 1" "argument 2" "argument 3"
|
||||
command argument\ 1 argument\ 2 argument\ 3
|
||||
```
|
||||
|
||||
Arguments are typically structured into flags and options.
|
||||
Flags (like `-v` or `--verbose`) toggle specific functionality while options (like `-o output.txt` or `--out output.txt`) allow you to specify a value for the command.
|
||||
|
||||
### Running in the background
|
||||
Append `&` to a command to run it in the background.
|
||||
```shell
|
||||
long-running-command &
|
||||
```
|
||||
|
||||
### Running Commands Sequentially
|
||||
The `;` symbol marks the end of a command. This allows you to execute multiple commands in one line.
|
||||
|
||||
```shell
|
||||
command1 ; command2
|
||||
```
|
||||
|
||||
### Globs
|
||||
The shell supports globs like `*` for matching.
|
||||
For example to delete all text files in the current directory:
|
||||
```shell
|
||||
rm -v *.txt
|
||||
```
|
||||
|
||||
## Redirections
|
||||
### 1. **Standard Input, Output, and Error**
|
||||
In a shell environment, there are three standard streams:
|
||||
- **Standard Input (stdin - fd 0):** Represents the input to a command.
|
||||
- **Standard Output (stdout - fd 1):** Represents the output of a command.
|
||||
- **Standard Error (stderr - fd 2):** Represents error messages generated by a command.
|
||||
|
||||
### **Output Redirection (`>` and `>>`)**
|
||||
- `>` is used to redirect standard output to a file, overwriting the file's contents if it already exists.
|
||||
```shell
|
||||
echo "Hello, World!" > output.txt
|
||||
```
|
||||
- `>>` is used to redirect standard output to a file, appending to the file if it exists.
|
||||
```shell
|
||||
echo "More text" >> output.txt
|
||||
```
|
||||
|
||||
### **Input Redirection (`<`)**
|
||||
`<` is used to redirect standard input from a file.
|
||||
```shell
|
||||
while read line; do
|
||||
echo "Line: $line"
|
||||
done < input.txt
|
||||
|
||||
cat < input.txt
|
||||
```
|
||||
|
||||
### **Error Redirection (`2>` and `2>>`)**
|
||||
- `2>` is used to redirect standard error to a file, overwriting the file if it exists.
|
||||
```shell
|
||||
command_that_might_fail 2> error.log
|
||||
```
|
||||
- `2>>` is used to redirect standard error to a file, appending to the file if it exists.
|
||||
```shell
|
||||
command_that_might_fail 2>> error.log
|
||||
```
|
||||
|
||||
### **Pipes (`|`)**
|
||||
The pipe operator (`|`) allows the output of one command to be used as the input for another command. This enables the creation of powerful and concise command pipelines.
|
||||
```shell
|
||||
command1 | command2
|
||||
```
|
||||
|
||||
Example: Counting Lines in a File
|
||||
```shell
|
||||
cat textfile.txt | wc -l
|
||||
```
|
||||
|
||||
### **Combining Redirection and Pipes**
|
||||
You can combine redirection and pipes to create more complex command sequences.
|
||||
```shell
|
||||
# Redirect stderr to a file, and then pipe the output to another command
|
||||
command1 2> error.log | command2
|
||||
```
|
||||
|
||||
### **Here Documents (`<<`)**
|
||||
Here documents allow you to include multiple lines of input in a script or command. `EOF` stands for End Of File.
|
||||
```shell
|
||||
cat << EOF
|
||||
This is a
|
||||
multi-line
|
||||
text block.
|
||||
EOF
|
||||
```
|
||||
|
||||
### **Command Substitution (`$()`)**
|
||||
Command substitution allows the output of a command to replace the command itself.
|
||||
```shell
|
||||
result=$(ls -l)
|
||||
echo "Listing: $result"
|
||||
```
|
||||
|
||||
### **Named Pipes (FIFOs)**
|
||||
Named pipes, or FIFOs (First In, First Out), are special files used for inter-process communication.
|
||||
```shell
|
||||
mkfifo mypipe
|
||||
command1 > mypipe & # Background process writing to the pipe
|
||||
command2 < mypipe # Reading from the pipe
|
||||
```
|
||||
|
||||
### **tee Command**
|
||||
The `tee` command reads from standard input and writes to standard output and files simultaneously.
|
||||
```shell
|
||||
echo "Hello, World!" | tee output.txt | wc -l
|
||||
```
|
||||
|
||||
### **/dev/null**
|
||||
`/dev/null` is a special file that discards all data written to it.
|
||||
|
||||
```shell
|
||||
command > /dev/null # Redirects output to null
|
||||
```
|
||||
|
||||
## Shell Scripting
|
||||
Shell scripting involves writing a series of commands for the shell to execute, allowing automation and the creation of more complex programs. It's a powerful way to streamline tasks and manage system configurations.
|
||||
|
||||
### Shebang
|
||||
The shebang is the first line of a script and it indicates which executable is used for the execution of the script. The syntax is `#!` followed by the absolute path to a executable.
|
||||
|
||||
For simple shell scripts add this to the file:
|
||||
```shell
|
||||
#!/bin/bash
|
||||
```
|
||||
|
||||
Although if you are scripting in another language you could change it for example to `python`
|
||||
|
||||
```shell
|
||||
#!/bin/python
|
||||
```
|
||||
|
||||
### Comments
|
||||
You can add comments to a script by using the `#` symbol.
|
||||
```shell
|
||||
# This is a comment
|
||||
```
|
||||
|
||||
### Variables
|
||||
You can store variables in a shell script by following a `key=value` syntax.
|
||||
|
||||
```shell
|
||||
name="Neo" # string
|
||||
age=25 # number
|
||||
fruits=("apple" "banana" "orange") # array
|
||||
|
||||
# using arrays
|
||||
echo "First elements ${fruits[0]}"
|
||||
|
||||
# using all elements of an array
|
||||
${fruits[*]}
|
||||
${fruits[@]}
|
||||
```
|
||||
|
||||
### Conditionals
|
||||
You can use conditional statements.
|
||||
```shell
|
||||
if [ "$age" -eq 18 ]; then
|
||||
echo "You're 18!"
|
||||
elif [ "$age" -gt 18 ]; then
|
||||
echo "You're an adult!"
|
||||
else
|
||||
echo "You're a minor."
|
||||
fi
|
||||
|
||||
# Case Statement (useful for checking a lot of conditions)
|
||||
case $name in
|
||||
"Neo")
|
||||
echo "You are the one"
|
||||
;;
|
||||
"Morpheus")
|
||||
echo "See you again"
|
||||
;;
|
||||
*)
|
||||
# default if nothing matches
|
||||
echo "Sorry, I don't understand"
|
||||
;;
|
||||
esac
|
||||
```
|
||||
|
||||
#### Operators
|
||||
##### Arithmetic Operators
|
||||
Assume variable **a** holds 10 and variable **b** holds 20 then −
|
||||
|
||||
| Operator | Description | Example |
|
||||
| ------------------ | --------------------------------------------------------------------- | --------------------------------------- |
|
||||
| + (Addition) | Adds values on either side of the operator | `expr $a + $b` will give 30 |
|
||||
| - (Subtraction) | Subtracts right hand operand from left hand operand | `expr $a - $b` will give -10 |
|
||||
| * (Multiplication) | Multiplies values on either side of the operator | `expr $a \* $b` will give 200 |
|
||||
| / (Division) | Divides left hand operand by right hand operand | `expr $b / $a` will give 2 |
|
||||
| % (Modulus) | Divides left hand operand by right hand operand and returns remainder | `expr $b % $a` will give 0 |
|
||||
| = (Assignment) | Assigns right operand in left operand | `a = $b` would assign value of b into a |
|
||||
| == (Equality) | Compares two numbers, if both are same then returns true. | `[$a == $b ]` would return false. |
|
||||
| != (Not Equality) | Compares two numbers, if both are different then returns true. | `[ $a != $b ]` would return true. |
|
||||
|
||||
##### Relational Operators
|
||||
For example, following operators will work to check a relation between 10 and 20 as well as in between "10" and "20" but not in between "ten" and "twenty".
|
||||
|
||||
Assume variable **a** holds 10 and variable **b** holds 20 then −
|
||||
|
||||
| Operator | Description | Example |
|
||||
| -------- | ------------------------------------------------------------------------------------------------------------------------------------ | ---------------------------- |
|
||||
| **-eq** | Checks if the value of two operands are equal or not; if yes, then the condition becomes true. | `[ $a -eq $b ]` is not true. |
|
||||
| **-ne** | Checks if the value of two operands are equal or not; if values are not equal, then the condition becomes true. | `[ $a -ne $b ]` is true. |
|
||||
| **-gt** | Checks if the value of left operand is greater than the value of right operand; if yes, then the condition becomes true. | `[ $a -gt $b ]` is not true. |
|
||||
| **-lt** | Checks if the value of left operand is less than the value of right operand; if yes, then the condition becomes true. | `[ $a -lt $b ]` is true. |
|
||||
| **-ge** | Checks if the value of left operand is greater than or equal to the value of right operand; if yes, then the condition becomes true. | `[ $a -ge $b ]` is not true. |
|
||||
| **-le** | Checks if the value of left operand is less than or equal to the value of right operand; if yes, then the condition becomes true. | `[ $a -le $b ]` is true. |
|
||||
|
||||
##### Boolean Operators
|
||||
Assume variable **a** holds 10 and variable **b** holds 20 then −
|
||||
|
||||
| Operator | Description | Example |
|
||||
| -------- | -------------------------------------------------------------------------------------------------------- | ------------------------------------- |
|
||||
| **!** | This is logical negation. This inverts a true condition into false and vice versa. | `[ ! false ]` is true. |
|
||||
| **-o** | This is logical **OR**. If one of the operands is true, then the condition becomes true. | `[ $a -lt 20 -o $b -gt 100 ]` is true. |
|
||||
| **-a** | This is logical **AND**. If both the operands are true, then the condition becomes true otherwise false. | `[ $a -lt 20 -a $b -gt 100 ]` is false. |
|
||||
|
||||
##### File Test Operators
|
||||
We have a few operators that can be used to test various properties associated with a Unix file.
|
||||
|
||||
Assume a variable **file** holds an existing file name "test" the size of which is 100 bytes and has **read**, **write** and **execute** permission.
|
||||
|
||||
| Operator | Description | Example |
|
||||
| ----------- | ---------------------------------------------------------------------------------------------------------------------- | --------------------------- |
|
||||
| **-b file** | Checks if file is a block special file; if yes, then the condition becomes true. | `[ -b $file ]` is false. |
|
||||
| **-c file** | Checks if file is a character special file; if yes, then the condition becomes true. | `[ -c $file ]` is false. |
|
||||
| **-d file** | Checks if file is a directory; if yes, then the condition becomes true. | `[ -d $file ]` is not true. |
|
||||
| **-f file** | Checks if file is an ordinary file as opposed to a directory or special file; if yes, then the condition becomes true. | `[ -f $file ]` is true. |
|
||||
| **-g file** | Checks if file has its set group ID (SGID) bit set; if yes, then the condition becomes true. | `[ -g $file ]` is false. |
|
||||
| **-k file** | Checks if file has its sticky bit set; if yes, then the condition becomes true. | `[ -k $file ]` is false. |
|
||||
| **-p file** | Checks if file is a named pipe; if yes, then the condition becomes true. | `[ -p $file ]` is false. |
|
||||
| **-t file** | Checks if file descriptor is open and associated with a terminal; if yes, then the condition becomes true. | `[ -t $file ]` is false. |
|
||||
| **-u file** | Checks if file has its Set User ID (SUID) bit set; if yes, then the condition becomes true. | `[ -u $file ]` is false. |
|
||||
| **-r file** | Checks if file is readable; if yes, then the condition becomes true. | `[ -r $file ]` is true. |
|
||||
| **-w file** | Checks if file is writable; if yes, then the condition becomes true. | `[ -w $file ]` is true. |
|
||||
| **-x file** | Checks if file is executable; if yes, then the condition becomes true. | `[ -x $file ]` is true. |
|
||||
| **-s file** | Checks if file has size greater than 0; if yes, then condition becomes true. | `[ -s $file ]` is true. |
|
||||
| **-e file** | Checks if file exists; is true even if file is a directory but exists. | `[ -e $file ]` is true. |
|
||||
|
||||
### Loops
|
||||
|
||||
You can also use loops.
|
||||
```shell
|
||||
# For Loop
|
||||
for fruit in "${fruits[@]}"; do
|
||||
echo "Fruit: $fruit"
|
||||
done
|
||||
|
||||
# Example: Loop over files
|
||||
for file in *.txt; do
|
||||
echo "File: $file";
|
||||
done
|
||||
|
||||
# While Loop
|
||||
count=0
|
||||
while [ $count -lt 5 ]; do
|
||||
echo "Count: $count"
|
||||
((count++))
|
||||
done
|
||||
```
|
||||
|
||||
You can use a few commands to control the loop.
|
||||
- `break` breaks out of the loop, ending it prematurely
|
||||
- `continue` skips to the next iteration of the loop, but skipping everything coming after it
|
||||
|
||||
If you want to loop over every single line even if one line might contain spaces (from a command output for example) you can use this trick:
|
||||
```shell
|
||||
command | while IFS= read -r varName; do
|
||||
echo "Working on $varName"
|
||||
done
|
||||
```
|
||||
|
||||
### Functions
|
||||
You can define your own functions. The arguments you give to the functions can be accessed via `$1`, `$2`, `$n`, etc. The same way the arguments passed to a shell script can be accessed.
|
||||
|
||||
```shell
|
||||
greet() {
|
||||
echo "Hello, $1!"
|
||||
}
|
||||
|
||||
# Function Call
|
||||
greet "Alice"
|
||||
```
|
||||
|
||||
Your functions can also have return values.
|
||||
```shell
|
||||
add() {
|
||||
local result=$(( $1 + $2 ))
|
||||
echo $result
|
||||
}
|
||||
|
||||
# Function Call
|
||||
sum=$(add 5 3)
|
||||
echo "Sum: $sum"
|
||||
```
|
||||
|
||||
### Input Output
|
||||
Read user input into a variable.
|
||||
```shell
|
||||
echo "Enter your name: "
|
||||
read username
|
||||
echo "Hello, $username!"
|
||||
```
|
||||
|
||||
Output to Screen (`echo`, `printf`):
|
||||
```shell
|
||||
echo "Hello, World!"
|
||||
```
|
154
technology/applications/cli/alacritty.md
Normal file
154
technology/applications/cli/alacritty.md
Normal file
File diff suppressed because one or more lines are too long
89
technology/applications/cli/aria2.md
Normal file
89
technology/applications/cli/aria2.md
Normal file
|
@ -0,0 +1,89 @@
|
|||
---
|
||||
obj: application
|
||||
os:
|
||||
- linux
|
||||
website: https://aria2.github.io/
|
||||
repo: https://github.com/aria2/aria2
|
||||
---
|
||||
# aria2
|
||||
[Repo](https://github.com/aria2/aria2)
|
||||
#refactor
|
||||
[aria2](https://aria2.github.io/) is a utility for downloading files. The supported protocols are [HTTP](../../internet/HTTP.md)(S), FTP, SFTP, [BitTorrent](../../tools/BitTorrent.md), and Metalink. aria2 can download a file from multiple sources/protocols and tries to utilize your maximum download bandwidth. It supports downloading a file from [HTTP](../../internet/HTTP.md)(S)/FTP/SFTP and [BitTorrent](../../tools/BitTorrent.md) at the same time, while the data downloaded from [HTTP](../../internet/HTTP.md)(S)/FTP/SFTP is uploaded to the [BitTorrent](../../tools/BitTorrent.md) swarm. Using Metalink's chunk checksums, aria2 automatically validates chunks of data while downloading a file like [BitTorrent](../../tools/BitTorrent.md). Aria2 can be used as a downloader by [yt-dlp](../media/yt-dlp.md).
|
||||
|
||||
## Usage
|
||||
|
||||
```shell
|
||||
aria2c [<OPTIONS>] [<URI>|<MAGNET>|<TORRENT_FILE>|<METALINK_FILE>]
|
||||
```
|
||||
|
||||
### Options
|
||||
- The directory to store the downloaded file.
|
||||
```shell
|
||||
-d, --dir=<DIR>
|
||||
```
|
||||
|
||||
- Downloads the URIs listed in **FILE**.
|
||||
```shell
|
||||
-i, --input-file=<FILE>
|
||||
```
|
||||
|
||||
- Set the maximum number of parallel downloads for every queue item.
|
||||
```shell
|
||||
-j, --max-concurrent-downloads=<N>
|
||||
```
|
||||
|
||||
- Check file integrity by validating piece hashes or a hash of entire file. This option has effect only in [BitTorrent](../../tools/BitTorrent.md), Metalink downloads with checksums or [HTTP](../../internet/HTTP.md)(S)/FTP downloads with --checksum option. If piece hashes are provided, this option can detect damaged portions of a file and re-download them. If a hash of entire file is provided, hash check is only done when file has been already download. This is determined by file length. If hash check fails, file is re-downloaded from scratch. If both piece hashes and a hash of entire file are provided, only piece hashes are used. Default: false
|
||||
```shell
|
||||
-V, --check-integrity [true|false]
|
||||
```
|
||||
|
||||
- Continue downloading a partially downloaded file. Use this option to resume a download started by a web browser or another program which downloads files sequentially from the beginning. Currently this option is only applicable to [HTTP](../../internet/HTTP.md)(S)/FTP downloads.
|
||||
```shell
|
||||
-c, --continue [true|false]
|
||||
```
|
||||
|
||||
- Set checksum. TYPE is hash type. The supported hash type is listed in Hash Algorithms in aria2c -v. DIGEST is hex digest. For example, setting sha-1 digest looks like this: sha-1=0192ba11326fe2298c8cb4de616f4d4140213838 This option applies only to [HTTP](../../internet/HTTP.md)(S)/FTP downloads.
|
||||
```shell
|
||||
--checksum=<TYPE>=<DIGEST>
|
||||
```
|
||||
|
||||
- The maximum number of connections to one server for each download. Default: **1**
|
||||
```shell
|
||||
-x, --max-connection-per-server=<NUM>
|
||||
```
|
||||
|
||||
- aria2 does not split less than 2*SIZE byte range. For example, let's consider downloading 20MiB file. If SIZE is 10M, aria2 can split file into 2 range (0-10MiB) and (10MiB-20MiB) and download it using 2 sources(if --split >= 2, of course). If SIZE is 15M, since 2*15M > 20MiB, aria2 does not split file and download it using 1 source. You can append K or M (1K = 1024, 1M = 1024K). Possible Values: 1M -1024M Default: 20M
|
||||
```shell
|
||||
-k, --min-split-size=<SIZE>
|
||||
```
|
||||
|
||||
- The file name of the downloaded file. It is always relative to the directory given in --dir option.
|
||||
```shell
|
||||
-o, --out=<FILE>
|
||||
```
|
||||
|
||||
- Download a file using N connections. If more than N URIs are given, first N URIs are used and remaining URIs are used for backup. If less than N URIs are given, those URIs are used more than once so that N connections total are made simultaneously. The number of connections to the same host is restricted by the --max-connection-per-server option. See also the --min-split-size option. Default: 5
|
||||
```shell
|
||||
-s, --split=<N>
|
||||
```
|
||||
|
||||
- Verify the peer using certificates specified in --ca-certificate option. Default: true
|
||||
```shell
|
||||
--check-certificate [true|false]
|
||||
```
|
||||
|
||||
- [HTTP](../../internet/HTTP.md) Auth
|
||||
```shell
|
||||
--http-user=<USER>
|
||||
--http-passwd=<PASSWD>
|
||||
```
|
||||
|
||||
- Load Cookies from FILE using the Firefox3 format (SQLite3), Chromium/Google Chrome (SQLite3) and the Mozilla/[Firefox](../network/browsers/Firefox.md)(1.x/2.x)/Netscape format.
|
||||
```shell
|
||||
--load-cookies=<FILE>
|
||||
```
|
||||
|
||||
- Save Cookies to FILE in Mozilla/[Firefox](../network/browsers/Firefox.md)(1.x/2.x)/ Netscape format. If FILE already exists, it is overwritten. Session Cookies are also saved and their expiry values are treated as 0. Possible Values: /path/to/file
|
||||
```shell
|
||||
--save-cookies=<FILE>
|
||||
```
|
10
technology/applications/cli/bash.md
Normal file
10
technology/applications/cli/bash.md
Normal file
|
@ -0,0 +1,10 @@
|
|||
---
|
||||
obj: application
|
||||
website: https://www.gnu.org/software/bash/
|
||||
wiki: https://wikipedia.org/wiki/Bash_(Unix_shell)
|
||||
arch-wiki: https://wiki.archlinux.org/title/Bash
|
||||
repo: https://git.savannah.gnu.org/git/bash.git
|
||||
---
|
||||
|
||||
# bash
|
||||
Bash (Bourne-again Shell) is a command-line [shell](Shell.md)/programming language by the GNU Project. Bash can be run on most UNIX-like operating systems, including [Linux](../../linux/Linux.md). It can be used interactively or execute [shell](Shell.md) scripts.
|
16
technology/applications/cli/bat.md
Normal file
16
technology/applications/cli/bat.md
Normal file
|
@ -0,0 +1,16 @@
|
|||
---
|
||||
obj: application
|
||||
os: ["macos", "linux", "windows"]
|
||||
repo: https://github.com/sharkdp/bat
|
||||
---
|
||||
# bat
|
||||
bat is a cat rewrite in [Rust](../../programming/languages/Rust.md)
|
||||
|
||||
## Usage
|
||||
Flags:
|
||||
```shell
|
||||
-r, --line-range <N:M>... Only print specific lines
|
||||
-A, --show-all Show non-printable characters
|
||||
-p, --plain Only show plain style, no decorations
|
||||
-d, --diff Only show changed lines
|
||||
```
|
31
technology/applications/cli/chattr.md
Normal file
31
technology/applications/cli/chattr.md
Normal file
|
@ -0,0 +1,31 @@
|
|||
---
|
||||
obj: application
|
||||
repo: https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git
|
||||
---
|
||||
# chattr
|
||||
change file attributes on a Linux file system
|
||||
Usage:
|
||||
```shell
|
||||
chattr +<Attribute> <FILE> # Add Attribute
|
||||
chattr -<Attribute> <FILE> # Remove Attribute
|
||||
chattr =<Attribute> <FILE> # Set Attribute
|
||||
```
|
||||
|
||||
## Options
|
||||
| Option | Description |
|
||||
| ------ | ---------------------------------------------------------------- |
|
||||
| `-R` | Recursively change attributes of directories and their contents. |
|
||||
|
||||
## Attributes
|
||||
| Attribute | Description |
|
||||
| --------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| `a` | A file with the 'a' attribute set can only be opened in append mode for writing. Only the superuser or a process possessing the CAP_LINUX_IMMUTABLE capability can set or clear this attribute. |
|
||||
| `A` | When a file with the 'A' attribute set is accessed, its atime record is not modified. This avoids a certain amount of disk I/O for laptop systems. |
|
||||
| `c` | A file with the 'c' attribute set is automatically compressed on the disk by the kernel. A read from this file returns uncompressed data. A write to this file compresses data before storing them on the disk. Note: please make sure to read the bugs and limitations section at the end of this document. (Note: For btrfs, If the 'c' flag is set, then the 'C' flag cannot be set. Also conflicts with btrfs mount option 'nodatasum') |
|
||||
| `C` | A file with the 'C' attribute set will not be subject to copy-on-write updates. This flag is only supported on file systems which perform copy-on-write. (Note: For btrfs, the 'C' flag should be set on new or empty files. If it is set on a file which already has data blocks, it is undefined when the blocks assigned to the file will be fully stable. If the 'C' flag is set on a directory, it will have no effect on the directory, but new files created in that directory will have the No_COW attribute set. If the 'C' flag is set, then the 'c' flag cannot be set.) |
|
||||
| `D` | When a directory with the 'D' attribute set is modified, the changes are written synchronously to the disk; this is equivalent to the 'dirsync' mount option applied to a subset of the files. |
|
||||
| `i` | A file with the 'i' attribute cannot be modified: it cannot be deleted or renamed, no link can be created to this file, most of the file's metadata can not be modified, and the file can not be opened in write mode. Only the superuser or a process possessing the CAP_LINUX_IMMUTABLE capability can set or clear this attribute. |
|
||||
| `m` | A file with the 'm' attribute is excluded from compression on file systems that support per-file compression. |
|
||||
| `S` | When a file with the 'S' attribute set is modified, the changes are written synchronously to the disk; this is equivalent to the 'sync' mount option applied to a subset of the files. |
|
||||
|
||||
|
24
technology/applications/cli/choose.md
Normal file
24
technology/applications/cli/choose.md
Normal file
|
@ -0,0 +1,24 @@
|
|||
---
|
||||
obj: application
|
||||
os:
|
||||
- macos
|
||||
- linux
|
||||
- windows
|
||||
repo: https://github.com/theryangeary/choose
|
||||
---
|
||||
# choose
|
||||
`choose`, a human-friendly and fast alternative to `cut` and (sometimes) `awk`
|
||||
|
||||
## Usage
|
||||
```shell
|
||||
choose [OPTIONS] <choices>...
|
||||
choose 1 # Choose the the second element
|
||||
choose -f" " 0:3 # Choose element 0 to 3 seperated by " "
|
||||
```
|
||||
|
||||
## Options
|
||||
| Option | Description |
|
||||
| ------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------- |
|
||||
| `-f, --field-separator <field-separator>` | Specify field separator other than whitespace, using [Rust](../../programming/languages/Rust.md) `regex` syntax |
|
||||
| `-i, --input <input>` | Input file |
|
||||
| `-o, --output-field-separator <output-field-separator>` | Specify output field separator |
|
23
technology/applications/cli/crunch.md
Normal file
23
technology/applications/cli/crunch.md
Normal file
|
@ -0,0 +1,23 @@
|
|||
---
|
||||
obj: application
|
||||
source: https://www.kali.org/tools/crunch/
|
||||
repo: https://salsa.debian.org/debian/crunch
|
||||
---
|
||||
|
||||
# crunch
|
||||
wordlist generator
|
||||
|
||||
Usage: `crunch <min-len> <max-len> [<charset string>] [options]`
|
||||
|
||||
## Options
|
||||
| Option | Description |
|
||||
| -------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `-c number` | Specifies the number of lines to write to output file, only works if -o START is used |
|
||||
| `-d numbersymbol` | Limits the number of duplicate characters. `-d 2@` limits the lower case alphabet to output like aab and aac. aaa would not be generated as that is 3 consecutive letters of a. |
|
||||
| `-e string` | Specifies when crunch should stop early |
|
||||
| `-f /path/to/charset.lst charset-name` | Specifies a character set from the charset.lst |
|
||||
| `-i` | Inverts the output so instead of aaa,aab,aac,aad, etc you get aaa,baa,caa,daa,aba,bba |
|
||||
| `-o wordlist.txt` | Specifies the file to write the output to, eg: wordlist.txt |
|
||||
| `-s startblock` | Specifies a starting string, eg: 03god22fs |
|
||||
| `-t @,%^` | Specifies a pattern, eg: @@god@@@@ where the only the @'s, ,'s, %'s, and ^'s will change. <br>`@` will insert lower case characters<br>`,` will insert upper case characters<br>`%` will insert numbers<br>`^` will insert symbols |
|
||||
| `-z gzip, bzip2, lzma, and 7z` | Compresses the output from the -o option. Valid parameters are gzip, bzip2, lzma, and [7z](p7zip.md). |
|
32
technology/applications/cli/curl.md
Normal file
32
technology/applications/cli/curl.md
Normal file
|
@ -0,0 +1,32 @@
|
|||
---
|
||||
obj: application
|
||||
os: linux
|
||||
website: https://curl.se/
|
||||
repo: https://github.com/curl/curl
|
||||
---
|
||||
# curl
|
||||
cURL is a command-line tool and library for transferring data with URLs. It supports a wide range of protocols, making it a versatile tool for making [HTTP](../../internet/HTTP.md) requests, downloading files, and more.
|
||||
|
||||
## Usage
|
||||
To make a simple GET request: `curl https://example.com`
|
||||
|
||||
## Options
|
||||
| Option | Description |
|
||||
| -------------------------------------- | ------------------------------------------------------------------------------------- |
|
||||
| `-C, --continue-at <offset>` | Continue/Resume a previous file transfer at the given offset. |
|
||||
| `-c, --cookie-jar <filename>` | Specify to which file you want curl to write all cookies after a completed operation. |
|
||||
| `-b, --cookie <data/filename>` | Pass the data to the [HTTP](../../internet/HTTP.md) server in the [Cookie](../../internet/Cookie.md) header. |
|
||||
| `-d, --data <data>` | Sends the specified data in a POST request to the [HTTP](../../internet/HTTP.md) server |
|
||||
| `-F, --form <name=content>` | Specify multipart MIME data |
|
||||
| `-k, --insecure` | Allow insecure server connections when using SSL |
|
||||
| `-L, --location` | Follow redirects |
|
||||
| `-o, --output <file>` | Write to file instead of stdout |
|
||||
| `-x, --proxy [protocol://]host[:port]` | Use this proxy |
|
||||
| `-X, --request <command>` | Specify request command to use |
|
||||
| `-r, --range <range>` | Retrieve only the bytes within RANGE |
|
||||
| `--retry <num> ` | Retry request if transient problems occur |
|
||||
| `-s, --silent` | Silent mode |
|
||||
| `--retry-delay <seconds> ` | Wait time between retries |
|
||||
| `-u, --user <user:password>` | Server user and password |
|
||||
| `-A, --user-agent <name>` | Send User-Agent \<name> to server |
|
||||
|
20
technology/applications/cli/diff.md
Normal file
20
technology/applications/cli/diff.md
Normal file
|
@ -0,0 +1,20 @@
|
|||
---
|
||||
obj: application
|
||||
wiki: https://en.wikipedia.org/wiki/Diff
|
||||
website: https://www.gnu.org/software/diffutils/
|
||||
repo: https://git.savannah.gnu.org/cgit/diffutils.git
|
||||
---
|
||||
# diff
|
||||
compare files line by line
|
||||
Usage: `diff [OPTION]... FILES`
|
||||
|
||||
## Options
|
||||
| Option | Description |
|
||||
| ------------------------------ | ----------------------------------------------- |
|
||||
| `--normal` | output a normal diff (the default) |
|
||||
| `-q, --brief` | report only when files differ |
|
||||
| `-s, --report-identical-files` | report when two files are the same |
|
||||
| `-c, -C NUM, --context[=NUM]` | output NUM (default 3) lines of copied context |
|
||||
| `-u, -U NUM, --unified[=NUM]` | output NUM (default 3) lines of unified context |
|
||||
| `-r, --recursive` | recursively compare any subdirectories found |
|
||||
| `-a, --text` | treat all files as text |
|
9
technology/applications/cli/diskonaut.md
Normal file
9
technology/applications/cli/diskonaut.md
Normal file
|
@ -0,0 +1,9 @@
|
|||
---
|
||||
obj: application
|
||||
os:
|
||||
- linux
|
||||
repo: https://github.com/imsnif/diskonaut
|
||||
---
|
||||
# Diskonaut
|
||||
[Repo](https://github.com/imsnif/diskonaut)
|
||||
Diskonaut shows disk usage graphically to find big files.
|
71
technology/applications/cli/doas.md
Normal file
71
technology/applications/cli/doas.md
Normal file
|
@ -0,0 +1,71 @@
|
|||
---
|
||||
obj: application
|
||||
repo: https://github.com/Duncaen/OpenDoas
|
||||
wiki: https://en.wikipedia.org/wiki/Doas
|
||||
arch-wiki: https://wiki.archlinux.org/title/Doas
|
||||
---
|
||||
|
||||
# doas
|
||||
doas is a program to execute commands as another user. The system administrator can configure it to give specified users privileges to execute specified commands. It is free and open-source under the ISC license and available in Unix and Unix-like operating systems ([FreeBSD](../../bsd/FreeBSD.md), [OpenBSD](../../bsd/OpenBSD.md), [Linux](../../linux/Linux.md)).
|
||||
|
||||
## Usage
|
||||
To use doas, simply prefix a command and its arguments with doas and a space:
|
||||
```shell
|
||||
$ doas cmd
|
||||
```
|
||||
|
||||
To get to an interactive shell with root prompt:
|
||||
```shell
|
||||
$ doas -s
|
||||
```
|
||||
|
||||
### Options
|
||||
| Option | Description |
|
||||
| --------- | ------------------------------------------------- |
|
||||
| `-s` | Execute the shell from `SHELL` or /etc/passwd. |
|
||||
| `-u user` | Execute the command as user. The default is root. |
|
||||
|
||||
## Configuration
|
||||
The configuration for doas is stored at `/etc/doas.conf`.
|
||||
|
||||
The config file consist of rules with the following format:
|
||||
`permit|deny [options] identity [as target] [cmd command [args ...]]`
|
||||
|
||||
Rules consist of the following parts:
|
||||
- `permit|deny`: The action to be taken if this rule matches.
|
||||
|
||||
Options:
|
||||
|
||||
| Option | Description |
|
||||
| -------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `nopass` | The user is not required to enter a password. |
|
||||
| `nolog` | Do not log successful command execution to syslogd |
|
||||
| `persist` | After the user successfully authenticates, do not ask for a password again for some time. |
|
||||
| `keepenv` | Environment variables other than those listed in doas are retained when creating the environment for the new process. |
|
||||
| `setenv {var=value}` | Keep or set the space-separated specified variables. Variables may also be removed with a leading ‘-’ or set using the latter syntax. If the first character of value is a ‘`$`’ then the value to be set is taken from the existing environment variable of the indicated name. This option is processed after the default environment has been created. |
|
||||
|
||||
|
||||
- `identity`: The username to match. Groups may be specified by prepending a colon (‘:’). Numeric IDs are also accepted.
|
||||
|
||||
- `as`: The target user the running user is allowed to run the command as. The default is all users.
|
||||
|
||||
- `cmd`: The command the user is allowed or denied to run. The default is all commands. Be advised that it is best to specify absolute paths. If a relative path is specified, only a restricted `PATH` will be searched.
|
||||
|
||||
- `args`: Arguments to command. The command arguments provided by the user need to match those specified. The keyword `args` alone means that command must be run without any arguments.
|
||||
|
||||
The last matching rule determines the action taken. If no rule matches, the action is denied.
|
||||
|
||||
Comments can be put anywhere in the file using a hash mark (‘#’), and extend to the end of the current line.
|
||||
|
||||
The following quoting rules apply:
|
||||
- The text between a pair of double quotes (‘"’) is taken as is.
|
||||
- The backslash character (‘\’) escapes the next character, including new line characters, outside comments; as a result, comments may not be extended over multiple lines.
|
||||
- If quotes or backslashes are used in a word, it is not considered a keyword.
|
||||
|
||||
### Examples
|
||||
```
|
||||
permit persist setenv { PKG_CACHE PKG_PATH } aja cmd pkg_add
|
||||
permit setenv { -ENV PS1=$DOAS_PS1 SSH_AUTH_SOCK } :wheel
|
||||
permit nopass tedu as root cmd /usr/sbin/procmap
|
||||
permit nopass keepenv setenv { PATH } root as root
|
||||
```
|
54
technology/applications/cli/duf.md
Normal file
54
technology/applications/cli/duf.md
Normal file
|
@ -0,0 +1,54 @@
|
|||
---
|
||||
obj: application
|
||||
os:
|
||||
- linux
|
||||
repo: https://github.com/muesli/duf
|
||||
---
|
||||
# duf
|
||||
Disk Usage/Free Utility
|
||||
|
||||
## Usage
|
||||
You can simply start duf without any command-line arguments:
|
||||
```
|
||||
duf
|
||||
```
|
||||
|
||||
If you supply arguments, duf will only list specific devices & mount points:
|
||||
```
|
||||
duf /home /some/file
|
||||
```
|
||||
|
||||
If you want to list everything (including pseudo, duplicate, inaccessible file systems):
|
||||
```
|
||||
duf --all
|
||||
```
|
||||
|
||||
### Filtering
|
||||
|
||||
You can show and hide specific tables:
|
||||
```
|
||||
duf --only local,network,fuse,special,loops,binds
|
||||
duf --hide local,network,fuse,special,loops,binds
|
||||
```
|
||||
|
||||
You can also show and hide specific [filesystems](../../linux/filesystems/Filesystems.md):
|
||||
```
|
||||
duf --only-fs tmpfs,vfat
|
||||
duf --hide-fs tmpfs,vfat
|
||||
```
|
||||
|
||||
...or specific mount points:
|
||||
```
|
||||
duf --only-mp /,/home,/dev
|
||||
duf --hide-mp /,/home,/dev
|
||||
```
|
||||
|
||||
Wildcards inside quotes work:
|
||||
```
|
||||
duf --only-mp '/sys/*,/dev/*'
|
||||
```
|
||||
|
||||
If you prefer your output as [JSON](../../files/JSON.md):
|
||||
```
|
||||
duf --json
|
||||
```
|
20
technology/applications/cli/eza.md
Normal file
20
technology/applications/cli/eza.md
Normal file
|
@ -0,0 +1,20 @@
|
|||
---
|
||||
obj: application
|
||||
os: linux
|
||||
repo: https://github.com/eza-community/eza
|
||||
---
|
||||
# exa
|
||||
[**eza**](https://eza.rocks/) is a modern replacement for the venerable file-listing command-line program `ls` that ships with Unix and Linux operating systems, giving it more features and better defaults. It uses colours to distinguish file types and metadata. It knows about symlinks, extended attributes, and Git. And it’s **small**, **fast**, and just **one single binary**.
|
||||
|
||||
## Usage
|
||||
Flags:
|
||||
```shell
|
||||
-l, --long display extended file metadata as a table
|
||||
-R, --recurse recurse into directories
|
||||
-L, --level DEPTH limit the depth of recursion
|
||||
-T, --tree recurse into directories as a tree
|
||||
-a, --all show hidden and 'dot' files
|
||||
-r, --reverse reverse the sort order
|
||||
-D, --only-dirs list only directories
|
||||
--git-ignore ignore files mentioned in '.gitignore'
|
||||
```
|
22
technology/applications/cli/fd.md
Normal file
22
technology/applications/cli/fd.md
Normal file
|
@ -0,0 +1,22 @@
|
|||
---
|
||||
obj: application
|
||||
os: linux
|
||||
repo: https://github.com/sharkdp/fd
|
||||
---
|
||||
# fd
|
||||
`fd` is a program to find entries in your filesystem. It is a simple, fast and user-friendly alternative to [`find`](https://www.gnu.org/software/findutils/). While it does not aim to support all of `find`'s powerful functionality, it provides sensible (opinionated) defaults for a majority of use cases.
|
||||
|
||||
## Usage
|
||||
Flags:
|
||||
```shell
|
||||
-H, --hidden Include hidden files
|
||||
-d, --max-depth <depth> Limit recursion
|
||||
-e, --extension <ext> Search by extension
|
||||
-x, --exec <cmd>... Execute command for every search result.
|
||||
The following placeholders are substituted before the command is executed:
|
||||
'{}': path (of the current search result)
|
||||
'{/}': basename
|
||||
'{//}': parent directory
|
||||
'{.}': path without file extension
|
||||
'{/.}': basename without file extension
|
||||
```
|
15
technology/applications/cli/file.md
Normal file
15
technology/applications/cli/file.md
Normal file
|
@ -0,0 +1,15 @@
|
|||
---
|
||||
obj: application
|
||||
repo: https://github.com/file/file
|
||||
wiki: https://en.wikipedia.org/wiki/File_(command)
|
||||
website: https://darwinsys.com/file
|
||||
---
|
||||
# file
|
||||
determine file / [MIME](../../files/MIME.md) type
|
||||
Usage: `file [OPTION] [FILE]...`
|
||||
|
||||
## Options
|
||||
| Option | Description |
|
||||
| ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `-b, --brief` | Do not prepend filenames to output lines (brief mode) |
|
||||
| `-i, --mime` | Causes the file command to output mime type strings rather than the more traditional human readable ones. Thus it may say ‘text/plain; charset=us-ascii’ rather than "ASCII text" |
|
10
technology/applications/cli/glow.md
Normal file
10
technology/applications/cli/glow.md
Normal file
|
@ -0,0 +1,10 @@
|
|||
---
|
||||
obj: application
|
||||
os: linux
|
||||
repo: https://github.com/charmbracelet/glow
|
||||
---
|
||||
# Glow
|
||||
[Repo](https://github.com/charmbracelet/glow)
|
||||
Glow is a terminal based markdown reader designed from the ground up to bring out the beauty—and power—of the CLI.
|
||||
|
||||
Use it to discover markdown files, read documentation directly on the command line and stash markdown files to your own private collection so you can read them anywhere. Glow will find local markdown files in subdirectories or a local Git repository.
|
34
technology/applications/cli/handlr.md
Normal file
34
technology/applications/cli/handlr.md
Normal file
|
@ -0,0 +1,34 @@
|
|||
---
|
||||
obj: application
|
||||
os: linux
|
||||
repo: https://github.com/chmln/handlr
|
||||
---
|
||||
# Handlr
|
||||
Manage your default applications with ease using `handlr`!
|
||||
|
||||
Open files in default application:
|
||||
```shell
|
||||
handlr open <file>
|
||||
```
|
||||
|
||||
List default apps:
|
||||
```shell
|
||||
handlr list
|
||||
```
|
||||
|
||||
Get default app:
|
||||
```sh
|
||||
handlr get .png
|
||||
```
|
||||
|
||||
Set default apps:
|
||||
```sh
|
||||
# Set default handler for png files
|
||||
handlr set .png feh.desktop
|
||||
|
||||
# Set wildcard handler for all text files
|
||||
handlr set 'text/*' nvim.desktop
|
||||
|
||||
# Set default handler based on mime
|
||||
handlr set application/pdf evince.desktop
|
||||
```
|
8
technology/applications/cli/hashcat.md
Normal file
8
technology/applications/cli/hashcat.md
Normal file
|
@ -0,0 +1,8 @@
|
|||
---
|
||||
obj: application
|
||||
repo: https://github.com/hashcat/hashcat
|
||||
website: https://hashcat.net/hashcat
|
||||
---
|
||||
|
||||
# hashcat
|
||||
#wip #🐇
|
28
technology/applications/cli/hck.md
Normal file
28
technology/applications/cli/hck.md
Normal file
|
@ -0,0 +1,28 @@
|
|||
---
|
||||
obj: application
|
||||
os: linux
|
||||
repo: https://github.com/sstadick/hck
|
||||
---
|
||||
# hck
|
||||
_`hck` is a shortening of `hack`, a rougher form of `cut`._
|
||||
|
||||
A close to drop in replacement for cut that can use a regex delimiter instead of a fixed string. Additionally this tool allows for specification of the order of the output columns using the same column selection syntax as cut (see below for examples).
|
||||
|
||||
No single feature of `hck` on its own makes it stand out over `awk`, `cut`, `xsv` or other such tools. Where `hck` excels is making common things easy, such as reordering output fields, or splitting records on a weird delimiter. It is meant to be simple and easy to use while exploring datasets. Think of this as filling a gap between `cut` and `awk`.
|
||||
|
||||
## Usage
|
||||
Usage: `hck [options]`
|
||||
Options:
|
||||
|
||||
| Option | Description |
|
||||
| ------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `-o, --output <OUTPUT>` | Output file to write to, defaults to `stdout` |
|
||||
| `-d, --delimiter <DELIMITER>` | Delimiter to use on input files, this is a substring literal by default. To treat it as a literal add the `-L` flag<br>\[default: `\s+`] |
|
||||
| `-L, --delim-is-literal` | Treat the delimiter as a string literal. This can significantly improve performance, especially for single byte delimiters |
|
||||
| `-I, --use-input-delim` | Use the input delimiter as the output delimiter if the input is literal and no other output delimiter has been set |
|
||||
| `-D, --output-delimiter <OUTPUT_DELIMITER>` | Delimiter string to use on outputs<br>[default: "\t"] |
|
||||
| `-f, --fields <FIELDS>` | Fields to keep in the output, ex: 1,2-,-5,2-5. Fields are 1-based and inclusive |
|
||||
| `-e, --exclude <EXCLUDE>` | Fields to exclude from the output, ex: 3,9-11,15-. Exclude fields are 1 based and inclusive. Exclude fields take precedence over `fields` |
|
||||
| `-E, --exclude-header <EXCLUDE_HEADER>` | Headers to exclude from the output, ex: '^badfield.\*$'. This is a string literal by default. Add the `-r` flag to treat as a regex |
|
||||
| `-F, --header-field <HEADER_FIELD>` | A string literal or regex to select headers, ex: '^is\_.\*$'. This is a string literal by default. add the `-r` flag to treat it as a regex |
|
||||
| `-r, --header-is-regex` | Treat the header_fields as regexs instead of string literals |
|
17
technology/applications/cli/hexyl.md
Normal file
17
technology/applications/cli/hexyl.md
Normal file
|
@ -0,0 +1,17 @@
|
|||
---
|
||||
obj: application
|
||||
os:
|
||||
- linux
|
||||
repo: https://github.com/sharkdp/hexyl
|
||||
---
|
||||
# Hexyl
|
||||
[Repo](https://github.com/sharkdp/hexyl)
|
||||
#refactor
|
||||
`hexyl` is a simple hex viewer for the terminal. It uses a colored output to distinguish different categories of bytes (NULL bytes, printable [ASCII](../../files/ASCII.md) characters, [ASCII](../../files/ASCII.md) whitespace characters, other [ASCII](../../files/ASCII.md) characters and non-[ASCII](../../files/ASCII.md)).
|
||||
|
||||
## Usage
|
||||
Flags:
|
||||
```shell
|
||||
-n, --length <N> Read n bytes fom input
|
||||
-s, --skip <N> Skip n bytes from input
|
||||
```
|
20
technology/applications/cli/huniq.md
Normal file
20
technology/applications/cli/huniq.md
Normal file
|
@ -0,0 +1,20 @@
|
|||
---
|
||||
obj: application
|
||||
os: linux
|
||||
repo: https://github.com/koraa/huniq
|
||||
---
|
||||
# Huniq
|
||||
[Repo](https://github.com/koraa/huniq)
|
||||
Command line utility to remove duplicates from the given input. Note that huniq does not sort the input, it just removes duplicates.
|
||||
|
||||
## Usage
|
||||
Flags:
|
||||
```shell
|
||||
-c, --count Output the amount of times a line was encountered
|
||||
|
||||
-d, --delim <delim> Which delimiter between elements to use. [default: "\n"]
|
||||
|
||||
-s, --sort Sort output by the number of occurences, in ascending order
|
||||
|
||||
-S, --sort-descending Order output by the number of occurences, in descending order
|
||||
```
|
81
technology/applications/cli/hyperfine.md
Normal file
81
technology/applications/cli/hyperfine.md
Normal file
|
@ -0,0 +1,81 @@
|
|||
---
|
||||
obj: application
|
||||
repo: https://github.com/sharkdp/hyperfine
|
||||
---
|
||||
# hyperfine
|
||||
A command-line benchmarking tool.
|
||||
|
||||
## Usage
|
||||
To run a benchmark, you can simply call hyperfine <command>.... The argument(s) can be any [shell](Shell.md) command. For example:
|
||||
```shell
|
||||
hyperfine 'sleep 0.3'
|
||||
```
|
||||
|
||||
Hyperfine will automatically determine the number of runs to perform for each command. By default, it will perform at least 10 benchmarking runs and measure for at least 3 seconds. To change this, you can use the -r/--runs option:
|
||||
```shell
|
||||
hyperfine --runs 5 'sleep 0.3'
|
||||
```
|
||||
|
||||
If you want to compare the runtimes of different programs, you can pass multiple commands:
|
||||
```shell
|
||||
hyperfine 'hexdump file' 'xxd file'
|
||||
```
|
||||
|
||||
### Warmup runs and preparation commands
|
||||
For programs that perform a lot of disk I/O, the benchmarking results can be heavily influenced by disk caches and whether they are cold or warm.
|
||||
|
||||
If you want to run the benchmark on a warm cache, you can use the -w/--warmup option to perform a certain number of program executions before the actual benchmark:
|
||||
```shell
|
||||
hyperfine --warmup 3 'grep -R TODO *'
|
||||
```
|
||||
|
||||
Conversely, if you want to run the benchmark for a cold cache, you can use the -p/--prepare option to run a special command before each timing run. For example, to clear harddisk caches on [Linux](../../linux/Linux.md), you can run
|
||||
```shell
|
||||
sync; echo 3 | sudo tee /proc/sys/vm/drop_caches
|
||||
```
|
||||
|
||||
To use this specific command with hyperfine, call sudo -v to temporarily gain sudo permissions and then call:
|
||||
```shell
|
||||
hyperfine --prepare 'sync; echo 3 | sudo tee /proc/sys/vm/drop_caches' 'grep -R TODO *'
|
||||
```
|
||||
|
||||
### Parameterized benchmarks
|
||||
If you want to run a series of benchmarks where a single parameter is varied (say, the number of threads), you can use the -P/--parameter-scan option and call:
|
||||
```shell
|
||||
hyperfine --prepare 'make clean' --parameter-scan num_threads 1 12 'make -j {num_threads}'
|
||||
```
|
||||
|
||||
This also works with decimal numbers. The -D/--parameter-step-size option can be used to control the step size:
|
||||
```shell
|
||||
hyperfine --parameter-scan delay 0.3 0.7 -D 0.2 'sleep {delay}'
|
||||
```
|
||||
|
||||
This runs sleep 0.3, sleep 0.5 and sleep 0.7.
|
||||
|
||||
For non-numeric parameters, you can also supply a list of values with the -L/--parameter-list option:
|
||||
```shell
|
||||
hyperfine -L compiler gcc,clang '{compiler} -O2 main.cpp'
|
||||
```
|
||||
|
||||
## Options
|
||||
| Option | Description |
|
||||
| ---------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| `-w, --warmup <NUM>` | Perform NUM warmup runs before the actual benchmark. This can be used to fill (disk) caches for I/O-heavy programs. |
|
||||
| `-m, --min-runs <NUM>` | Perform at least NUM runs for each command (default: 10). |
|
||||
| `-M, --max-runs <NUM>` | Perform at most NUM runs for each command. By default, there is no limit. |
|
||||
| `-r, --runs <NUM>` | Perform exactly NUM runs for each command. If this option is not specified, hyperfine automatically determines the number of runs. |
|
||||
| `-s, --setup <CMD>` | Execute CMD before each set of timing runs. This is useful for compiling your software with the provided parameters, or to do any other work that should happen once before a series of benchmark runs, not every time as would happen with the --prepare option. |
|
||||
| `-p, --prepare <CMD>` | Execute CMD before each timing run. This is useful for clearing disk caches, for example. The --prepare option can be specified once for all commands or multiple times, once for each command. In the latter case, each preparation command will be run prior to the corresponding benchmark command. |
|
||||
| `-c, --cleanup <CMD>` | Execute CMD after the completion of all benchmarking runs for each individual command to be benchmarked. This is useful if the commands to be benchmarked produce artifacts that need to be cleaned up. |
|
||||
| `-P, --parameter-scan <VAR> <MIN> <MAX>` | Perform benchmark runs for each value in the range MIN..MAX. Replaces the string '{VAR}' in each command by the current parameter value. |
|
||||
| `-D, --parameter-step-size <DELTA>` | This argument requires --parameter-scan to be specified as well. Traverse the range MIN..MAX in steps of DELTA. |
|
||||
| `-L, --parameter-list <VAR> <VALUES>` | Perform benchmark runs for each value in the comma-separated list VALUES. Replaces the string '{VAR}' in each command by the current parameter value. |
|
||||
| `-i, --ignore-failure` | Ignore non-zero exit codes of the benchmarked programs. |
|
||||
| `--export-asciidoc <FILE>` | Export the timing summary statistics as an AsciiDoc table to the given FILE. |
|
||||
| `--export-csv <FILE>` | Export the timing summary statistics as [CSV](../../files/CSV.md) to the given FILE. If you need the timing results for each individual run, use the [JSON](../../files/JSON.md) export format. The output time unit is always seconds. |
|
||||
| `--export-json <FILE>` | Export the timing summary statistics and timings of individual runs as [JSON](../../files/JSON.md) to the given FILE. The output time unit is always seconds |
|
||||
| `--export-markdown <FILE>` | Export the timing summary statistics as a [Markdown](../../files/Markdown.md) table to the given FILE. |
|
||||
| `--show-output` | Print the stdout and stderr of the benchmark instead of suppressing it. This will increase the time it takes for benchmarks to run, so it should only be used for debugging purposes or when trying to benchmark output speed. |
|
||||
| `-n, --command-name <NAME>` | Give a meaningful name to a command. This can be specified multiple times if several commands are benchmarked. |
|
||||
| `--output <WHERE>` | Control where the output of the benchmark is redirected. Note that some programs like 'grep' detect when standard output is /dev/null and apply certain optimizations. To avoid that, consider using '--output=pipe'.<br>\<WHERE> can be:<br><br>- null: Redirect output to /dev/null (the default).<br>- pipe: Feed the output through a pipe before discarding it.<br>- inherit: Don't redirect the output at all (same as '--show-output').<br>- \<FILE>: Write the output to the given file. |
|
||||
| `--input <WHERE>` | Control where the input of the benchmark comes from.<br><br>\<WHERE> can be:<br>- null: Read from /dev/null (the default).<br>- \<FILE>: Read the input from the given file. |
|
38
technology/applications/cli/intermodal.md
Normal file
38
technology/applications/cli/intermodal.md
Normal file
|
@ -0,0 +1,38 @@
|
|||
---
|
||||
obj: application
|
||||
os: linux
|
||||
repo: https://github.com/casey/intermodal
|
||||
---
|
||||
# Intermodal
|
||||
[Repo](https://github.com/casey/intermodal)
|
||||
Intermodal is a user-friendly and featureful command-line [BitTorrent](../../tools/BitTorrent.md) metainfo utility. The binary is called `imdl` and runs on [Linux](../../linux/Linux.md), [Windows](../../windows/Windows.md), and [macOS](../../macos/macOS.md).
|
||||
|
||||
## Usage
|
||||
### Create torrent file:
|
||||
```shell
|
||||
imdl torrent create file
|
||||
```
|
||||
|
||||
Flags:
|
||||
```shell
|
||||
-N, --name <TEXT> Set name of torrent
|
||||
-i, --input <INPUT> Torrent Files
|
||||
-c, --comment <TEXT> Torrent Comment
|
||||
-a, --announce <URL> Torrent Tracker
|
||||
```
|
||||
|
||||
### Show torrent information
|
||||
```shell
|
||||
imdl torrent show <torrent>
|
||||
```
|
||||
|
||||
### Verify torrent
|
||||
```shell
|
||||
imdl torrent verify <torrent>
|
||||
imdl torrent verify --input torr.torrent --content file
|
||||
```
|
||||
|
||||
### Generate magnet link
|
||||
```shell
|
||||
imdl torrent link <torrent>
|
||||
```
|
6
technology/applications/cli/jless.md
Normal file
6
technology/applications/cli/jless.md
Normal file
|
@ -0,0 +1,6 @@
|
|||
---
|
||||
obj: application
|
||||
os: linux
|
||||
---
|
||||
# Jless
|
||||
[`jless`](https://jless.io/) is a command-line [JSON](../../files/JSON.md) viewer. Use it as a replacement for whatever combination of `less`, `jq`, `cat` and your editor you currently use for viewing [JSON](../../files/JSON.md) files. It is written in [Rust](../../programming/languages/Rust.md) and can be installed as a single standalone binary.
|
8
technology/applications/cli/joshuto.md
Normal file
8
technology/applications/cli/joshuto.md
Normal file
|
@ -0,0 +1,8 @@
|
|||
---
|
||||
obj: application
|
||||
os: linux
|
||||
repo: https://github.com/kamiyaa/joshuto
|
||||
---
|
||||
# Joshuto
|
||||
[Repo](https://github.com/kamiyaa/joshuto)
|
||||
Joshuto is a [ranger](https://github.com/ranger/ranger)-like terminal file manager written in [Rust](../../programming/languages/Rust.md).
|
157
technology/applications/cli/jq.md
Normal file
157
technology/applications/cli/jq.md
Normal file
|
@ -0,0 +1,157 @@
|
|||
---
|
||||
obj: application
|
||||
website: https://jqlang.github.io/jq/
|
||||
repo: https://github.com/jqlang/jq
|
||||
---
|
||||
# jq
|
||||
jq is a lightweight and flexible command-line [JSON](../../files/JSON.md) processor akin to sed,awk,grep, and friends for [JSON](../../files/JSON.md) data. It's written in portable C and has zero runtime dependencies, allowing you to easily slice, filter, map, and transform structured data.
|
||||
|
||||
## Usage
|
||||
```shell
|
||||
cat data.json | jq [FILTER]
|
||||
```
|
||||
|
||||
## Filters
|
||||
### Identity
|
||||
The absolute simplest filter is `.` . This filter takes its input and produces the same value as output. That is, this is the identity operator.
|
||||
|
||||
### Object Identifier
|
||||
The simplest _useful_ filter has the form `.foo`. When given a [JSON](../../files/JSON.md) object (aka dictionary or hash) as input, `.foo` produces the value at the key "foo" if the key is present, or null otherwise.
|
||||
|
||||
The `.foo` syntax only works for simple, identifier-like keys, that is, keys that are all made of alphanumeric characters and underscore, and which do not start with a digit.
|
||||
|
||||
If the key contains special characters or starts with a digit, you need to surround it with double quotes like this: `."foo$"`, or else `.["foo$"]`.
|
||||
|
||||
### Array Index
|
||||
When the index value is an integer, `.[<number>]` can index arrays. Arrays are zero-based, so `.[2]` returns the third element.
|
||||
|
||||
Negative indices are allowed, with -1 referring to the last element, -2 referring to the next to last element, and so on.
|
||||
|
||||
### Array/String Slice
|
||||
The `.[<number>:<number>]` syntax can be used to return a subarray of an array or substring of a string. The array returned by `.[10:15]` will be of length 5, containing the elements from index 10 (inclusive) to index 15 (exclusive). Either index may be negative (in which case it counts backwards from the end of the array), or omitted (in which case it refers to the start or end of the array). Indices are zero-based.
|
||||
|
||||
### Array/Object Value Iterator
|
||||
If you use the `.[index]` syntax, but omit the index entirely, it will return _all_ of the elements of an array. Running `.[]` with the input `[1,2,3]` will produce the numbers as three separate results, rather than as a single array. A filter of the form `.foo[]` is equivalent to `.foo | .[]`.
|
||||
|
||||
You can also use this on an object, and it will return all the values of the object.
|
||||
|
||||
Note that the iterator operator is a generator of values.
|
||||
|
||||
### Comma
|
||||
If two filters are separated by a comma, then the same input will be fed into both and the two filters' output value streams will be concatenated in order: first, all of the outputs produced by the left expression, and then all of the outputs produced by the right. For instance, filter `.foo, .bar`, produces both the "foo" fields and "bar" fields as separate outputs.
|
||||
|
||||
The `,` operator is one way to contruct generators.
|
||||
|
||||
### Pipe
|
||||
The `|` operator combines two filters by feeding the output(s) of the one on the left into the input of the one on the right. It's similar to the Unix [shell](Shell.md)'s pipe, if you're used to that.
|
||||
|
||||
If the one on the left produces multiple results, the one on the right will be run for each of those results. So, the expression `.[] | .foo` retrieves the "foo" field of each element of the input array. This is a cartesian product, which can be surprising.
|
||||
|
||||
Note that `.a.b.c` is the same as `.a | .b | .c`.
|
||||
|
||||
Note too that `.` is the input value at the particular stage in a "pipeline", specifically: where the `.` expression appears. Thus `.a | . | .b` is the same as `.a.b`, as the `.` in the middle refers to whatever value `.a` produced.
|
||||
|
||||
### Array Construction: `[]`
|
||||
As in [JSON](../../files/JSON.md), `[]` is used to construct arrays, as in `[1,2,3]`. The elements of the arrays can be any jq expression, including a pipeline. All of the results produced by all of the expressions are collected into one big array. You can use it to construct an array out of a known quantity of values (as in `[.foo, .bar, .baz]`) or to "collect" all the results of a filter into an array (as in `[.items[].name]`)
|
||||
|
||||
Once you understand the "," operator, you can look at jq's array syntax in a different light: the expression `[1,2,3]` is not using a built-in syntax for comma-separated arrays, but is instead applying the `[]` operator (collect results) to the expression 1,2,3 (which produces three different results).
|
||||
|
||||
If you have a filter `X` that produces four results, then the expression `[X]` will produce a single result, an array of four elements.
|
||||
|
||||
### Object Construction: `{}`
|
||||
Like [JSON](../../files/JSON.md), `{}` is for constructing objects (aka dictionaries or hashes), as in: `{"a": 42, "b": 17}`.
|
||||
|
||||
If the keys are "identifier-like", then the quotes can be left off, as in `{a:42, b:17}`. Variable references as key expressions use the value of the variable as the key. Key expressions other than constant literals, identifiers, or variable references, need to be parenthesized, e.g., `{("a"+"b"):59}`.
|
||||
|
||||
The value can be any expression (although you may need to wrap it in parentheses if, for example, it contains colons), which gets applied to the {} expression's input (remember, all filters have an input and an output).
|
||||
```
|
||||
{foo: .bar}
|
||||
```
|
||||
|
||||
will produce the [JSON](../../files/JSON.md) object `{"foo": 42}` if given the [JSON](../../files/JSON.md) object `{"bar":42, "baz":43}` as its input. You can use this to select particular fields of an object: if the input is an object with "user", "title", "id", and "content" fields and you just want "user" and "title", you can write
|
||||
```
|
||||
{user: .user, title: .title}
|
||||
```
|
||||
|
||||
Because that is so common, there's a shortcut syntax for it: `{user, title}`.
|
||||
|
||||
If one of the expressions produces multiple results, multiple dictionaries will be produced. If the input's
|
||||
```
|
||||
{"user":"stedolan","titles":["JQ Primer", "More JQ"]}
|
||||
```
|
||||
|
||||
then the expression
|
||||
```
|
||||
{user, title: .titles[]}
|
||||
```
|
||||
|
||||
will produce two outputs:
|
||||
```
|
||||
{"user":"stedolan", "title": "JQ Primer"}
|
||||
{"user":"stedolan", "title": "More JQ"}
|
||||
```
|
||||
|
||||
Putting parentheses around the key means it will be evaluated as an expression. With the same input as above,
|
||||
```
|
||||
{(.user): .titles}
|
||||
```
|
||||
|
||||
produces
|
||||
```
|
||||
{"stedolan": ["JQ Primer", "More JQ"]}
|
||||
```
|
||||
|
||||
## Functions
|
||||
### `has(key)`
|
||||
The builtin function `has` returns whether the input object has the given key, or the input array has an element at the given index.
|
||||
|
||||
### `map(f)`, `map_values(f)`
|
||||
For any filter `f`, `map(f)` and `map_values(f)` apply `f` to each of the values in the input array or object, that is, to the values of `.[]`.
|
||||
|
||||
In the absence of errors, `map(f)` always outputs an array whereas `map_values(f)` outputs an array if given an array, or an object if given an object.
|
||||
|
||||
When the input to `map_values(f)` is an object, the output object has the same keys as the input object except for those keys whose values when piped to `f` produce no values at all.
|
||||
|
||||
`map(f)` is equivalent to `[.[] | f]` and `map_values(f)` is equivalent to `.[] |= f`.
|
||||
|
||||
### `del(path)`
|
||||
The builtin function `del` removes a key and its corresponding value from an object.
|
||||
|
||||
### `reverse`
|
||||
This function reverses an array.
|
||||
|
||||
### `contains(element)`
|
||||
The filter `contains(b)` will produce true if b is completely contained within the input. A string B is contained in a string A if B is a substring of A. An array B is contained in an array A if all elements in B are contained in any element in A. An object B is contained in object A if all of the values in B are contained in the value in A with the same key. All other types are assumed to be contained in each other if they are equal.
|
||||
|
||||
### `startswith(str)`
|
||||
Outputs `true` if . starts with the given string argument.
|
||||
|
||||
### `endswith(str)`
|
||||
Outputs `true` if . ends with the given string argument.
|
||||
|
||||
### `split(str)`
|
||||
Splits an input string on the separator argument.
|
||||
|
||||
### `join(str)`
|
||||
Joins the array of elements given as input, using the argument as separator. It is the inverse of `split`: that is, running `split("foo") | join("foo")` over any input string returns said input string.
|
||||
|
||||
## Conditionals
|
||||
### if-then-else-end
|
||||
`if A then B else C end` will act the same as `B` if `A` produces a value other than false or null, but act the same as `C` otherwise.
|
||||
|
||||
`if A then B end` is the same as `if A then B else . end`. That is, the `else` branch is optional, and if absent is the same as `.`. This also applies to `elif` with absent ending `else` branch.
|
||||
|
||||
Checking for false or null is a simpler notion of "truthiness" than is found in JavaScript or [Python](../../programming/languages/Python.md), but it means that you'll sometimes have to be more explicit about the condition you want. You can't test whether, e.g. a string is empty using `if .name then A else B end`; you'll need something like `if .name == "" then A else B end` instead.
|
||||
|
||||
If the condition `A` produces multiple results, then `B` is evaluated once for each result that is not false or null, and `C` is evaluated once for each false or null.
|
||||
|
||||
More cases can be added to an if using `elif A then B` syntax.
|
||||
|
||||
Example: `jq 'if . == 0 then "zero" elif . == 1 then "one" else "many" end'`
|
||||
|
||||
### Alternative Operator `//`
|
||||
The `//` operator produces all the values of its left-hand side that are neither `false` nor `null`, or, if the left-hand side produces no values other than `false` or `null`, then `//` produces all the values of its right-hand side.
|
||||
|
||||
A filter of the form `a // b` produces all the results of `a` that are not `false` or `null`. If `a` produces no results, or no results other than `false` or `null`, then `a // b` produces the results of `b`.
|
||||
|
||||
This is useful for providing defaults: `.foo // 1` will evaluate to `1` if there's no `.foo` element in the input.
|
1141
technology/applications/cli/just.md
Normal file
1141
technology/applications/cli/just.md
Normal file
File diff suppressed because one or more lines are too long
63
technology/applications/cli/losetup.md
Normal file
63
technology/applications/cli/losetup.md
Normal file
|
@ -0,0 +1,63 @@
|
|||
---
|
||||
obj: application
|
||||
repo: git://git.kernel.org/pub/scm/utils/util-linux/util-linux.git
|
||||
---
|
||||
|
||||
# losetup
|
||||
set up and control [loop devices](../../linux/Loop%20Device.md)
|
||||
|
||||
## Usage
|
||||
Get info:
|
||||
```shell
|
||||
losetup [loopdev]
|
||||
losetup -l [-a]
|
||||
losetup -j file
|
||||
```
|
||||
|
||||
Attach a loop device:
|
||||
```shell
|
||||
losetup -fP file
|
||||
```
|
||||
|
||||
Detach a loop device:
|
||||
```shell
|
||||
losetup -d loopdev ...
|
||||
```
|
||||
|
||||
Detach all associated loop devices:
|
||||
```shell
|
||||
losetup -D
|
||||
```
|
||||
|
||||
## Options
|
||||
| Option | Description |
|
||||
| --------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `-a, --all` | Show the status of all loop devices |
|
||||
| `-d, --detach loopdev...` | Detach the file or device associated with the specified loop device(s) |
|
||||
| `-D, --detach-all` | Detach all associated loop devices |
|
||||
| `-f, --find [file]` | Find the first unused loop device. If a file argument is present, use the found device as loop device. Otherwise, just print its name |
|
||||
| `--show` | Display the name of the assigned loop device if the `-f` option and a file argument are present |
|
||||
| `-L, --nooverlap` | Check for conflicts between loop devices to avoid situation when the same backing file is shared between more loop devices. If the file is already used by another device then re-use the device rather than a new one. The option makes sense only with `--find` |
|
||||
| `-j, --associated file` | Show the status of all loop devices associated with the given file |
|
||||
| `-P, --partscan` | Force the kernel to scan the partition table on a newly created loop device. Note that the partition table parsing depends on sector sizes |
|
||||
| `-r, --read-only` | Set up a read-only loop device |
|
||||
| `-v, --verbose` | Verbose mode |
|
||||
| `-l, --list` | If a loop device or the -a option is specified, print the default columns for either the specified loop device or all loop devices |
|
||||
| `-O, --output column[,column]...` | Specify the columns that are to be printed for the `--list` output |
|
||||
| `-J, --json` | Use JSON format for `--list` output |
|
||||
|
||||
## Output Columns
|
||||
| Output | Description |
|
||||
| ------------ | -------------------------------------- |
|
||||
| NAME | loop device name |
|
||||
| AUTOCLEAR | autoclear flag set |
|
||||
| BACK-FILE | device backing file |
|
||||
| BACK-INO | backing file inode number |
|
||||
| BACK-MAJ:MIN | backing file major:minor device number |
|
||||
| MAJ:MIN | loop device major:minor number |
|
||||
| OFFSET | offset from the beginning |
|
||||
| PARTSCAN | partscan flag set |
|
||||
| RO | read-only device |
|
||||
| SIZELIMIT | size limit of the file in bytes |
|
||||
| DIO | access backing file with direct-io |
|
||||
| LOG-SEC | logical sector size in bytes |
|
92
technology/applications/cli/lsblk.md
Normal file
92
technology/applications/cli/lsblk.md
Normal file
|
@ -0,0 +1,92 @@
|
|||
---
|
||||
obj: application
|
||||
repo: git://git.kernel.org/pub/scm/utils/util-linux/util-linux.git
|
||||
---
|
||||
# lsblk
|
||||
List block devices
|
||||
Usage: `lsblk [options] [device...]`
|
||||
|
||||
## Options
|
||||
| Option | Description |
|
||||
| ------------------- | ------------------------------------------------------------------------------------------------------------------------ |
|
||||
| `-A, --noempty` | Don’t print empty devices |
|
||||
| `-a, --all` | Disable all built-in filters and list all empty devices and RAM disk devices too |
|
||||
| `-f, --fs` | Output info about filesystems. This option is equivalent to `-o NAME,FSTYPE,FSVER,LABEL,UUID,FSAVAIL,FSUSE%,MOUNTPOINTS` |
|
||||
| `-J, --json` | Use JSON output format |
|
||||
| `-l, --list` | Produce output in the form of a list |
|
||||
| `-N, --nvme` | Output info about NVMe devices only |
|
||||
| `-o, --output list` | Specify which output columns to print |
|
||||
| `-p, --paths` | Print full device paths |
|
||||
|
||||
## Output Columns
|
||||
| Output | Description |
|
||||
| ------------ | ------------------------------------------------- |
|
||||
| ALIGNMENT | alignment offset |
|
||||
| ID-LINK | the shortest udev /dev/disk/by-id link name |
|
||||
| ID | udev ID (based on ID-LINK) |
|
||||
| DISC-ALN | discard alignment offset |
|
||||
| DAX | dax-capable device |
|
||||
| DISC-GRAN | discard granularity |
|
||||
| DISK-SEQ | disk sequence number |
|
||||
| DISC-MAX | discard max bytes |
|
||||
| DISC-ZERO | discard zeroes data |
|
||||
| FSAVAIL | filesystem size available |
|
||||
| FSROOTS | mounted filesystem roots |
|
||||
| FSSIZE | filesystem size |
|
||||
| FSTYPE | filesystem type |
|
||||
| FSUSED | filesystem size used |
|
||||
| FSUSE% | filesystem use percentage |
|
||||
| FSVER | filesystem version |
|
||||
| GROUP | group name |
|
||||
| HCTL | Host:Channel:Target:Lun for SCSI |
|
||||
| HOTPLUG | removable or hotplug device (usb, pcmcia, ...) |
|
||||
| KNAME | internal kernel device name |
|
||||
| LABEL | filesystem LABEL |
|
||||
| LOG-SEC | logical sector size |
|
||||
| MAJ:MIN | major:minor device number |
|
||||
| MIN-IO | minimum I/O size |
|
||||
| MODE | device node permissions |
|
||||
| MODEL | device identifier |
|
||||
| MQ | device queues |
|
||||
| NAME | device name |
|
||||
| OPT-IO | optimal I/O size |
|
||||
| OWNER | user name |
|
||||
| PARTFLAGS | partition flags |
|
||||
| PARTLABEL | partition LABEL |
|
||||
| PARTN | partition number as read from the partition table |
|
||||
| PARTTYPE | partition type code or UUID |
|
||||
| PARTTYPENAME | partition type name |
|
||||
| PARTUUID | partition UUID |
|
||||
| PATH | path to the device node |
|
||||
| PHY-SEC | physical sector size |
|
||||
| PKNAME | internal parent kernel device name |
|
||||
| PTTYPE | partition table type |
|
||||
| PTUUID | partition table identifier (usually UUID) |
|
||||
| RA | read-ahead of the device |
|
||||
| RAND | adds randomness |
|
||||
| REV | device revision |
|
||||
| RM | removable device |
|
||||
| RO | read-only device |
|
||||
| ROTA | rotational device |
|
||||
| RQ-SIZE | request queue size |
|
||||
| SCHED | I/O scheduler name |
|
||||
| SERIAL | disk serial number |
|
||||
| SIZE | size of the device |
|
||||
| START | partition start offset |
|
||||
| STATE | state of the device |
|
||||
| SUBSYSTEMS | de-duplicated chain of subsystems |
|
||||
| MOUNTPOINT | where the device is mounted |
|
||||
| MOUNTPOINTS | all locations where device is mounted |
|
||||
| TRAN | device transport type |
|
||||
| TYPE | device type |
|
||||
| UUID | filesystem UUID |
|
||||
| VENDOR | device vendor |
|
||||
| WSAME | write same max bytes |
|
||||
| WWN | unique storage identifier |
|
||||
| ZONED | zone model |
|
||||
| ZONE-SZ | zone size |
|
||||
| ZONE-WGRAN | zone write granularity |
|
||||
| ZONE-APP | zone append max bytes |
|
||||
| ZONE-NR | number of zones |
|
||||
| ZONE-OMAX | maximum number of open zones |
|
||||
| ZONE-AMAX | maximum number of active zones |
|
17
technology/applications/cli/man.md
Normal file
17
technology/applications/cli/man.md
Normal file
|
@ -0,0 +1,17 @@
|
|||
---
|
||||
obj: application
|
||||
arch-wiki: https://wiki.archlinux.org/title/man_page
|
||||
wiki: https://en.wikipedia.org/wiki/Man_page
|
||||
---
|
||||
# man
|
||||
A man page (short for manual page) is a form of software documentation usually found on a Unix or Unix-like operating system. Topics covered include computer programs (including library and system calls), formal standards and conventions, and even abstract concepts. A user may invoke a man page by issuing the man command.
|
||||
|
||||
To read a man page, simply enter:
|
||||
```shell
|
||||
man page_name
|
||||
```
|
||||
|
||||
## Online Man Pages
|
||||
- [Arch Linux Man Pages](https://man.archlinux.org)
|
||||
- [FreeBSD Man Pages](https://man.freebsd.org/cgi/man.cgi)
|
||||
- [OpenBSD Man Pages](https://man.openbsd.org/)
|
799
technology/applications/cli/micro.md
Normal file
799
technology/applications/cli/micro.md
Normal file
File diff suppressed because one or more lines are too long
56
technology/applications/cli/netcat.md
Normal file
56
technology/applications/cli/netcat.md
Normal file
|
@ -0,0 +1,56 @@
|
|||
---
|
||||
obj: application
|
||||
wiki: https://en.wikipedia.org/wiki/Netcat
|
||||
---
|
||||
|
||||
# netcat
|
||||
The `nc` (or `netcat`) utility is used for just about anything under the sun involving [TCP](../../internet/TCP.md), [UDP](../../internet/UDP.md), or UNIX-domain sockets. It can open [TCP](../../internet/TCP.md) connections, send [UDP](../../internet/UDP.md) packets, listen on arbitrary [TCP](../../internet/TCP.md) and [UDP](../../internet/UDP.md) ports, do port scanning, and deal with both IPv4 and IPv6.
|
||||
|
||||
Common uses include:
|
||||
- simple [TCP](../../internet/TCP.md) proxies
|
||||
- shell-script based [HTTP](../../internet/HTTP.md) clients and servers
|
||||
- network daemon testing
|
||||
- a SOCKS or [HTTP](../../internet/HTTP.md) ProxyCommand for [ssh](../SSH.md)
|
||||
|
||||
## Options
|
||||
| Option | Description |
|
||||
| ------------------ | --------------------------------------------------------------------------------------------------- |
|
||||
| `-4` | Use IPv4 addresses only |
|
||||
| `-6` | Use IPv6 addresses only |
|
||||
| `-b` | Allow broadcast |
|
||||
| `-l` | Listen for an incoming connection rather than initiating a connection to a remote host |
|
||||
| `-N` | shutdown the network socket after EOF on the input. Some servers require this to finish their work |
|
||||
| `-p <source_port>` | Specify the source port `nc` should use, subject to privilege restrictions and availability |
|
||||
|
||||
## Examples
|
||||
### Client/Server Model
|
||||
On one console, start `nc` listening on a specific port for a connection. For example:
|
||||
```shell
|
||||
nc -l 1234
|
||||
```
|
||||
|
||||
`nc` is now listening on port 1234 for a connection. On a second console (or a second machine), connect to the machine and port being listened on:
|
||||
```shell
|
||||
nc -N 127.0.0.1 1234
|
||||
```
|
||||
|
||||
There should now be a connection between the ports. Anything typed at the second console will be concatenated to the first, and vice-versa. After the connection has been set up, `nc` does not really care which side is being used as a ‘server’ and which side is being used as a ‘client’. The connection may be terminated using an `EOF` (`^D`), as the `-N` flag was given.
|
||||
|
||||
### Data Transfer
|
||||
The example in the previous section can be expanded to build a basic data transfer model. Any information input into one end of the connection will be output to the other end, and input and output can be easily captured in order to emulate file transfer.
|
||||
|
||||
Start by using `nc` to listen on a specific port, with output captured into a file:
|
||||
```shell
|
||||
nc -l 1234 > filename.out
|
||||
```
|
||||
|
||||
Using a second machine, connect to the listening `nc` process, feeding it the file which is to be transferred:
|
||||
```shell
|
||||
nc -N host.example.com 1234 < filename.in
|
||||
```
|
||||
|
||||
### Talking to Servers
|
||||
It is sometimes useful to talk to servers “by hand” rather than through a user interface. It can aid in troubleshooting, when it might be necessary to verify what data a server is sending in response to commands issued by the client. For example, to retrieve the home page of a web site:
|
||||
```shell
|
||||
printf "GET / HTTP/1.0\r\n\r\n" | nc host.example.com 80
|
||||
```
|
33
technology/applications/cli/netdiscover.md
Normal file
33
technology/applications/cli/netdiscover.md
Normal file
|
@ -0,0 +1,33 @@
|
|||
---
|
||||
obj: application
|
||||
source: https://www.kali.org/tools/netdiscover
|
||||
repo: https://github.com/netdiscover-scanner/netdiscover
|
||||
---
|
||||
|
||||
# netdiscover
|
||||
Netdiscover is an active/passive address reconnaissance tool, mainly developed for those wireless networks without [dhcp](../../internet/DHCP.md) server, when you are wardriving. It can be also used on hub/switched networks.
|
||||
|
||||
Built on top of libnet and libpcap, it can passively detect online hosts, or search for them, by actively sending ARP requests.
|
||||
|
||||
Netdiscover can also be used to inspect your network ARP traffic, or find network addresses using auto scan mode, which will scan for common local networks.
|
||||
|
||||
Netdiscover uses the OUI table to show the vendor of the each MAC address discovered and is very useful for security checks or in pentests.
|
||||
|
||||
## Options
|
||||
| Option | Description |
|
||||
| ------------ | -------------------------------------------------------------------------------------------- |
|
||||
| ` -i device` | network device used |
|
||||
| ` -r range` | scan a given range instead of auto scan. 192.168.6.0/24,/16,/8 |
|
||||
| ` -l file` | scan the list of ranges contained into the given file |
|
||||
| ` -p` | passive mode, do not send anything, only sniff |
|
||||
| ` -m file` | scan a list of known MACs and host names |
|
||||
| ` -F filter` | customize pcap filter expression (default: "arp") |
|
||||
| ` -s time` | time to sleep between each ARP request (milliseconds) |
|
||||
| ` -c count` | number of times to send each ARP request (for nets with packet loss) |
|
||||
| ` -n node` | last source IP octet used for scanning (from 2 to 253) |
|
||||
| ` -d` | ignore home config files for autoscan and fast mode |
|
||||
| ` -f` | enable fastmode scan, saves a lot of time, recommended for auto |
|
||||
| ` -P` | print results in a format suitable for parsing by another program and stop after active scan |
|
||||
| ` -L` | similar to `-P` but continue listening after the active scan is completed |
|
||||
| ` -N` | Do not print header. Only valid when `-P` or `-L` is enabled. |
|
||||
| ` -S` | enable sleep time suppression between each request (hardcore mode) |
|
122
technology/applications/cli/nmap.md
Normal file
122
technology/applications/cli/nmap.md
Normal file
|
@ -0,0 +1,122 @@
|
|||
---
|
||||
obj: application
|
||||
website: https://nmap.org
|
||||
repo: https://github.com/nmap/nmap
|
||||
---
|
||||
|
||||
# nmap
|
||||
Network exploration tool and security / port scanner
|
||||
|
||||
## Usage
|
||||
Usage: `nmap [Scan Type(s)] [Options] {target specification}`
|
||||
|
||||
### Options
|
||||
#### TARGET SPECIFICATION
|
||||
Can pass hostnames, IP addresses, networks, etc.
|
||||
Ex: scanme.nmap.org, 192.168.0.1; 10.0.0-255.1-254
|
||||
|
||||
| Option | Description |
|
||||
| --------------------------------------- | --------------------------------- |
|
||||
| `-iL <inputfilename>` | Input from list of hosts/networks |
|
||||
| `--exclude <host1[,host2][,host3],...>` | Exclude hosts/networks |
|
||||
| `--excludefile <exclude_file>` | Exclude list from file |
|
||||
|
||||
#### HOST DISCOVERY
|
||||
| Option | Description |
|
||||
| ----------------------------------- | --------------------------------------------------------------------------------------------------- |
|
||||
| `-sL` | List Scan - simply list targets to scan |
|
||||
| `-sn` | Ping Scan - disable port scan |
|
||||
| `-PS/PA/PU/PY[portlist]` | [TCP](../../internet/TCP.md) SYN/ACK, [UDP](../../internet/UDP.md) or SCTP discovery to given ports |
|
||||
| `-PE/PP/PM` | ICMP echo, timestamp, and netmask request discovery probes |
|
||||
| `-n/-R` | Never do [DNS](../../internet/DNS.md) resolution/Always resolve \[default: sometimes] |
|
||||
| `--dns-servers <serv1[,serv2],...>` | Specify custom [DNS](../../internet/DNS.md) servers |
|
||||
| `--traceroute` | Trace hop path to each host |
|
||||
|
||||
#### SCAN TECHNIQUES
|
||||
| Option | Description |
|
||||
| --------------------- | ------------------------------------------------------------------ |
|
||||
| `-sS/sT/sA/sW/sM` | [TCP](../../internet/TCP.md) SYN/Connect()/ACK/Window/Maimon scans |
|
||||
| `-sU` | [UDP](../../internet/UDP.md) Scan |
|
||||
| `-sN/sF/sX` | [TCP](../../internet/TCP.md) Null, FIN, and Xmas scans |
|
||||
| `--scanflags <flags>` | Customize [TCP](../../internet/TCP.md) scan flags |
|
||||
| `-sO` | IP protocol scan |
|
||||
|
||||
#### PORT SPECIFICATION AND SCAN ORDER
|
||||
| Option | Description |
|
||||
| ------------------------------- | --------------------------------------------------------------------------------------------- |
|
||||
| `-p <port ranges>` | Only scan specified ports. Ex: `-p22`; `-p1-65535`; `-p U:53,111,137,T:21-25,80,139,8080,S:9` |
|
||||
| `--exclude-ports <port ranges>` | Exclude the specified ports from scanning |
|
||||
| `-F` | Fast mode - Scan fewer ports than the default scan |
|
||||
| `-r` | Scan ports sequentially - don't randomize |
|
||||
| `-top-ports <number>` | Scan \<number> most common ports |
|
||||
|
||||
#### SERVICE/VERSION DETECTION
|
||||
| Option | Description |
|
||||
| ----------------------------- | -------------------------------------------------- |
|
||||
| `-sV` | Probe open ports to determine service/version info |
|
||||
| `--version-intensity <level>` | Set from 0 (light) to 9 (try all probes) |
|
||||
| `--version-light` | Limit to most likely probes (intensity 2) |
|
||||
| `--version-all` | Try every single probe (intensity 9) |
|
||||
|
||||
#### SCRIPT SCAN
|
||||
| Option | Description |
|
||||
| ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `-sC` | equivalent to `--script=default` |
|
||||
| `--script=<Lua scripts>` | \<Lua scripts> is a comma separated list of directories, script-files or script-categories. The scripts are commonly found at `/usr/share/nmap/scripts` |
|
||||
| `--script-updatedb` | Update the script database. |
|
||||
|
||||
#### OS DETECTION
|
||||
| Option | Description |
|
||||
| ---------------- | --------------------------------------- |
|
||||
| `-O` | Enable OS detection |
|
||||
| `--osscan-limit` | Limit OS detection to promising targets |
|
||||
| `--osscan-guess` | Guess OS more aggressively |
|
||||
|
||||
#### TIMING AND PERFORMANCE
|
||||
Options which take \<time> are in seconds, or append 'ms' (milliseconds), 's' (seconds), 'm' (minutes), or 'h' (hours) to the value (e.g. 30m).
|
||||
|
||||
| Option | Descriptions |
|
||||
| -------------------------------------------------------------- | ------------------------------------------------ |
|
||||
| `-T<0-5>` | Set timing template (higher is faster) |
|
||||
| `--min-hostgroup/max-hostgroup <size>` | Parallel host scan group sizes |
|
||||
| `--min-parallelism/max-parallelism <numprobes>` | Probe parallelization |
|
||||
| `--min-rtt-timeout/max-rtt-timeout/initial-rtt-timeout <time>` | Specifies probe round trip time. |
|
||||
| `--max-retries <tries>` | Caps number of port scan probe retransmissions. |
|
||||
| `--host-timeout <time>` | Give up on target after this long |
|
||||
| `--scan-delay/--max-scan-delay <time>` | Adjust delay between probes |
|
||||
| `--min-rate <number>` | Send packets no slower than \<number> per second |
|
||||
| `--max-rate <number>` | Send packets no faster than \<number> per second |
|
||||
|
||||
#### FIREWALL/IDS EVASION AND SPOOFING
|
||||
| Option | Description |
|
||||
| ---------------------------------------------- | ------------------------------------------------------------------------------------------------- |
|
||||
| `-f; --mtu <val>` | fragment packets (optionally w/given MTU) |
|
||||
| `-D <decoy1,decoy2[,ME],...>` | Cloak a scan with IP decoys |
|
||||
| `-S <IP_Address>` | Spoof source address |
|
||||
| `-e <iface>` | Use specified interface |
|
||||
| `-g/--source-port <portnum>` | Use given port number |
|
||||
| `--proxies <url1,[url2],...>` | Relay connections through [HTTP](../../internet/HTTP.md)/SOCKS4 proxies |
|
||||
| `--data <hex string>` | Append a custom payload to sent packets |
|
||||
| `--data-string <string>` | Append a custom [ASCII](../../files/ASCII.md) string to sent packets |
|
||||
| `--data-length <num>` | Append random data to sent packets |
|
||||
| `--ip-options <options>` | Send packets with specified ip options |
|
||||
| `--ttl <val>` | Set IP time-to-live field |
|
||||
| `--spoof-mac <mac address/prefix/vendor name>` | Spoof your MAC address |
|
||||
| `--badsum` | Send packets with a bogus [TCP](../../internet/TCP.md)/[UDP](../../internet/UDP.md)/SCTP checksum |
|
||||
|
||||
#### OUTPUT
|
||||
| Option | Description |
|
||||
| ------------------------- | -------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `-oN/-oX/-oS/-oG <file>` | Output scan in normal, [XML](../../files/XML.md), scrIpt kIddi3, and Grepable format, respectively, to the given filename. |
|
||||
| `-oA <basename>` | Output in the three major formats at once |
|
||||
| `-v` | Increase verbosity level (use `-vv` or more for greater effect) |
|
||||
| `--open` | Only show open (or possibly open) ports |
|
||||
| `--append-output` | Append to rather than clobber specified output files |
|
||||
| `--resume <filename>` | Resume an aborted scan |
|
||||
| `--stylesheet <path/URL>` | XSL stylesheet to transform [XML](../../files/XML.md) output to [HTML](../../internet/HTML.md) |
|
||||
| `--webxml` | Reference stylesheet from Nmap.Org for more portable [XML](../../files/XML.md) |
|
||||
| `--no-stylesheet` | Prevent associating of XSL stylesheet w/[XML](../../files/XML.md) output |
|
||||
|
||||
|
||||
|
||||
|
36
technology/applications/cli/p7zip.md
Normal file
36
technology/applications/cli/p7zip.md
Normal file
|
@ -0,0 +1,36 @@
|
|||
---
|
||||
obj: application
|
||||
os: linux
|
||||
aliases: ["7zip", "7z"]
|
||||
---
|
||||
# 7Zip
|
||||
7-Zip is a file archiver with the highest compression ratio. The program supports 7z (that implements LZMA compression algorithm), [ZIP](../../files/ZIP.md), CAB, ARJ, GZIP, BZIP2, [TAR](tar.md), CPIO, RPM and DEB formats. Compression ratio in the new 7z format is 30-50% better than ratio in [ZIP](../../files/ZIP.md) format.
|
||||
|
||||
## Usage
|
||||
Add file/directory to the archive (or create a new one):
|
||||
```shell
|
||||
7z a archive_name file_name
|
||||
7z a archive_name file_name -p -mhe=on # Password Protect
|
||||
```
|
||||
|
||||
Update existing files in the archive or add new ones:
|
||||
```shell
|
||||
7z u archive_name file_name
|
||||
```
|
||||
|
||||
List the content of an archive:
|
||||
```shell
|
||||
7z l archive_name
|
||||
```
|
||||
|
||||
Extract files:
|
||||
```shell
|
||||
7z e archive_name # without using dir names
|
||||
7z x archive_name # extract with full paths
|
||||
7z x -ofolder_name archive_name # extract into folder
|
||||
```
|
||||
|
||||
Check integrity of the archive:
|
||||
```shell
|
||||
7z t archive_name
|
||||
```
|
29
technology/applications/cli/patch.md
Normal file
29
technology/applications/cli/patch.md
Normal file
|
@ -0,0 +1,29 @@
|
|||
---
|
||||
obj: application
|
||||
repo: https://git.savannah.gnu.org/git/patch.git
|
||||
wiki: https://en.wikipedia.org/wiki/Patch_(Unix)
|
||||
---
|
||||
# patch
|
||||
patch takes a patch file patchfile containing a difference listing produced by the [diff](diff.md) program and applies those differences to one or more original files, producing patched versions. Normally the patched versions are put in place of the originals. Backups can be made; see the -b or --backup option. The names of the files to be patched are usually taken from the patch file, but if there's just one file to be patched it can be specified on the command line as originalfile.
|
||||
|
||||
## Usage
|
||||
```shell
|
||||
# To patch one file:
|
||||
patch <file> < <patch-file>
|
||||
|
||||
# To reverse a patch:
|
||||
patch -R <file> < <patch-file>
|
||||
|
||||
# To patch all files in a directory, adding any missing new files:
|
||||
patch -i <dir> <patch-file>
|
||||
```
|
||||
|
||||
## Options
|
||||
| Option | Description |
|
||||
| ------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `-b, --backup` | Make backup files. That is, when patching a file, rename or copy the original instead of removing it. When backing up a file that does not exist, an empty, unreadable backup file is created as a placeholder to represent the nonexistent file |
|
||||
| `-d, --directory <dir>` | Change to the directory dir immediately, before doing anything else. |
|
||||
| `--dry-run` | Print the results of applying the patches without actually changing any files |
|
||||
| `-i, --input <patchfile>` | Read the patch from patchfile. If patchfile is -, read from standard input, the default |
|
||||
| `--merge` | Merge a patch file into the original files. If a conflict is found, patch outputs a warning and brackets the conflict with <<<<<<< and >>>>>>> lines. |
|
||||
| `-R, --reverse` | Reverse the patch |
|
159
technology/applications/cli/pueue.md
Normal file
159
technology/applications/cli/pueue.md
Normal file
|
@ -0,0 +1,159 @@
|
|||
---
|
||||
obj: application
|
||||
repo: https://github.com/nukesor/pueue
|
||||
---
|
||||
# pueue
|
||||
Pueue is a command-line task management tool for sequential and parallel execution of long-running tasks.
|
||||
|
||||
Simply put, it's a tool that **p**rocesses a q**ueue** of [shell](Shell.md) commands. On top of that, there are a lot of convenient features and abstractions.
|
||||
|
||||
Since Pueue is not bound to any terminal, you can control your tasks from any terminal on the same machine. The queue will be continuously processed, even if you no longer have any active [ssh](../SSH.md) sessions.
|
||||
|
||||
## Start the Daemon
|
||||
Before you can use the `pueue` client, you have to start the daemon.
|
||||
|
||||
**Local:** The daemon can be run in the current [shell](Shell.md). Just run `pueued` anywhere on your command line. It'll exit if you close the terminal, though.
|
||||
|
||||
**Background:** To fork and run `pueued` into the background, add the `-d` or `--daemonize` flag. E.g. `pueued -d`.
|
||||
The daemon can always be shut down using the client command `pueue shutdown`.
|
||||
|
||||
### Systemd
|
||||
[Systemd](../../linux/Systemd.md) user services allow every user to start/enable their own session on [Linux](../../linux/Linux.md) operating system distributions.
|
||||
|
||||
If you didn't install Pueue with a package manager, follow these instructions first:
|
||||
1. download `pueued.service` from the GitHub Releases page;
|
||||
2. place `pueued.service` in `/etc/systemd/user/` or `~/.config/systemd/user/`;
|
||||
3. make sure the `pueued` binary is placed at `/usr/bin`, which is where `pueued.service` expects is to be.
|
||||
|
||||
Then, regardless of how you installed Pueue, run:
|
||||
1. `systemctl --user start pueued`, to start the `pueued` service;
|
||||
2. `systemctl --user enable pueued`, to run the `pueued` service at system startup;
|
||||
3. `systemctl --user status pueued`, to ensure it is **active (running)**.
|
||||
|
||||
## Using pueue
|
||||
Usage: `pueue <action> <options>`
|
||||
|
||||
### `pueue add`
|
||||
Enqueue a task for execution.
|
||||
Usage: `pueue add <options> <command>`
|
||||
|
||||
#### Options
|
||||
| Option | Description |
|
||||
| --------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `-w, --working-directory <working-directory>` | Specify current working directory |
|
||||
| `-e, --escape` | Escape any special [shell](Shell.md) characters (" ", "&", "!", etc.). Beware: This implicitly disables nearly all [shell](Shell.md) specific syntax ("&&", "&>") |
|
||||
| `-i, --immediate` | Immediately start the task |
|
||||
| `-s, --stashed` | Create the task in Stashed state. Useful to avoid immediate execution if the queue is empty |
|
||||
| `-d, --delay <delay>` | Prevents the task from being enqueued until \<delay> elapses. |
|
||||
| `-g, --group <GROUP>` | Assign the task to a group. Groups kind of act as separate queues. I.e. all groups run in parallel and you can specify the amount of parallel tasks for each group. If no group is specified, the default group will be used |
|
||||
| `-a, --after <after>` | Start the task once all specified tasks have successfully finished. As soon as one of the dependencies fails, this task will fail as well |
|
||||
| `-o, --priority <PRIORITY>` | Start this task with a higher priority. The higher the number, the faster it will be processed |
|
||||
| `-l, --label <LABEL>` | Add some information for yourself. This string will be shown in the "status" table. There's no additional logic connected to it |
|
||||
| `-p, --print-task-id` | Only return the task id instead of a text. This is useful when working with dependencies |
|
||||
|
||||
### `pueue remove`
|
||||
Remove tasks from the list. Running or paused tasks need to be killed first
|
||||
Usage: `pueue remove <TASK_IDS>...`
|
||||
|
||||
### `pueue switch`
|
||||
Switches the queue position of two commands. Only works on queued and stashed commands
|
||||
Usage: `pueue switch <TASK_ID_1> <TASK_ID_2>`
|
||||
|
||||
### `pueue stash`
|
||||
Stashed tasks won't be automatically started. You have to enqueue them or start them by hand
|
||||
Usage: `pueue stash <TASK_IDS>...`
|
||||
|
||||
### `pueue enqueue`
|
||||
Enqueue stashed tasks. They'll be handled normally afterwards
|
||||
Usage: `pueue enqueue [-d, --delay <delay>] [TASK_IDS]...`
|
||||
|
||||
### `pueue start`
|
||||
Resume operation of specific tasks or groups of tasks.
|
||||
By default, this resumes the default group and all its tasks.
|
||||
Can also be used force-start specific tasks.
|
||||
Usage: `pueue start [-g, --group <GROUP>] [TASK_IDS]...`
|
||||
|
||||
|
||||
### `pueue restart`
|
||||
restart Restart failed or successful task(s).
|
||||
By default, identical tasks will be created and enqueued, but it's possible to restart in-place.
|
||||
You can also edit a few properties, such as the path and the command, before restarting.
|
||||
|
||||
### `pueue pause`
|
||||
Restart failed or successful task(s).
|
||||
By default, identical tasks will be created and enqueued, but it's possible to restart in-place.
|
||||
You can also edit a few properties, such as the path and the command, before restarting.
|
||||
Usage: `pueue restart [OPTIONS] [TASK_IDS]...`
|
||||
|
||||
#### Options
|
||||
| Option | Description |
|
||||
| ----------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `-a, --all-failed` | Restart all failed tasks across all groups.<br>Nice to use in combination with `-i/--in-place` |
|
||||
| `-g, --failed-in-group <FAILED_IN_GROUP>` | Like `--all-failed`, but only restart tasks failed tasks of a specific group. The group will be set to running and its paused tasks will be resumed |
|
||||
| `-k, --start-immediately` | Immediately start the tasks, no matter how many open slots there are. This will ignore any dependencies tasks may have |
|
||||
| `-s, --stashed` | Set the restarted task to a "Stashed" state. Useful to avoid immediate execution |
|
||||
| `-i, --in-place` | Restart the task by reusing the already existing tasks. This will overwrite any previous logs of the restarted tasks |
|
||||
| `--not-in-place` | Restart the task by creating a new identical tasks. Only applies, if you have the restart_in_place configuration set to true |
|
||||
| `-e, --edit` | Edit the tasks' commands before restarting |
|
||||
| `-p, --edit-path` | Edit the tasks' paths before restarting |
|
||||
| `-l, --edit-label` | Edit the tasks' labels before restarting |
|
||||
|
||||
### `pueue kill`
|
||||
Kill specific running tasks or whole task groups..
|
||||
Kills all tasks of the default group when no ids or a specific group are provided.
|
||||
Usage: `pueue kill [-g, --group <GROUP>] [TASK_IDS]...`
|
||||
|
||||
### `pueue send`
|
||||
Send something to a task. Useful for sending confirmations such as 'y\n'
|
||||
|
||||
Usage: `pueue send <TASK_ID> <INPUT>`
|
||||
|
||||
### `pueue edit`
|
||||
Edit the command, path or label of a stashed or queued task.
|
||||
By default only the command is edited.
|
||||
Multiple properties can be added in one go.
|
||||
Usage: `pueue edit [OPTIONS] <TASK_ID>`
|
||||
|
||||
#### Options
|
||||
| Option | Description |
|
||||
| --------------- | ----------------------- |
|
||||
| `-c, --command` | Edit the task's command |
|
||||
| `-p, --path` | Edit the task's path |
|
||||
| `-l, --label` | Edit the task's label |
|
||||
| `-h, --help` | Print help |
|
||||
|
||||
### `pueue group`
|
||||
Use this to add or remove groups.
|
||||
By default, this will simply display all known groups.
|
||||
Usage: `pueue group [-j, --json] [COMMAND]`
|
||||
|
||||
Add a group by name:
|
||||
Usage: `pueue group add [-p, --parallel <PARALLEL>] <NAME>`
|
||||
|
||||
Remove a group by name. This will move all tasks in this group to the default group!
|
||||
Usage: `pueue group remove <NAME>`
|
||||
|
||||
### `pueue status`
|
||||
Display the current status of all tasks
|
||||
Usage: `pueue status [-j, --json] [-g, --group <GROUP>] [QUERY]...`
|
||||
|
||||
### `pueue log`
|
||||
Display the log output of finished tasks.
|
||||
Only the last few lines will be shown by default.
|
||||
If you want to follow the output of a task, please use the "follow" subcommand.
|
||||
Usage: `pueue log [-l, --lines <LINES>] [-f, --full] [TASK_IDS]...`
|
||||
|
||||
### `pueue follow`
|
||||
Follow the output of a currently running task. This command works like "tail -f"
|
||||
Usage: `pueue follow [TASK_ID]`
|
||||
|
||||
### `pueue wait`
|
||||
Wait until tasks are finished.
|
||||
By default, this will wait for all tasks in the default group to finish.
|
||||
Note: This will also wait for all tasks that aren't somehow 'Done'.
|
||||
|
||||
Usage: `pueue wait [ -g, --group <GROUP>] [-a, --all] [-q, --quiet] [TASK_IDS]...`
|
||||
|
||||
### `pueue clean`
|
||||
Remove all finished tasks from the list
|
||||
Usage: `pueue clean [-s, --successful-only] [-g, --group <GROUP>]`
|
10
technology/applications/cli/ripgrep.md
Normal file
10
technology/applications/cli/ripgrep.md
Normal file
|
@ -0,0 +1,10 @@
|
|||
---
|
||||
obj: application
|
||||
os:
|
||||
- linux
|
||||
repo: https://github.com/BurntSushi/ripgrep
|
||||
---
|
||||
# ripgrep
|
||||
#refactor
|
||||
[Repo](https://github.com/BurntSushi/ripgrep)
|
||||
ripgrep is a line-oriented search tool that recursively searches the current directory for a regex pattern. By default, ripgrep will respect gitignore rules and automatically skip hidden files/directories and binary files. ripgrep has first class support on Windows, [macOS](../../macos/macOS.md) and [Linux](../../linux/Linux.md) with binary downloads available for [every release](https://github.com/BurntSushi/ripgrep/releases). ripgrep is similar to other popular search tools like The Silver Searcher, ack and grep.
|
20
technology/applications/cli/rnr.md
Normal file
20
technology/applications/cli/rnr.md
Normal file
|
@ -0,0 +1,20 @@
|
|||
---
|
||||
obj: application
|
||||
os: linux
|
||||
repo: https://github.com/ismaelgv/rnr
|
||||
---
|
||||
# rnr
|
||||
[Repo](https://github.com/ismaelgv/rnr)
|
||||
**RnR** is a command-line tool to **securely rename** multiple files and directories that supports regular expressions.
|
||||
|
||||
## Usage
|
||||
Flags
|
||||
```shell
|
||||
-n, --dry-run Only show what would be done (default mode)
|
||||
-f, --force Make actual changes to files
|
||||
-x, --hidden Include hidden files and directories
|
||||
-D, --include-dirs Rename matching directories
|
||||
-r, --recursive Recursive mode
|
||||
-s, --silent Do not print any information
|
||||
--no-dump Do not dump operations into a file
|
||||
```
|
47
technology/applications/cli/rsync.md
Normal file
47
technology/applications/cli/rsync.md
Normal file
|
@ -0,0 +1,47 @@
|
|||
---
|
||||
obj: application
|
||||
os: linux
|
||||
website: https://rsync.samba.org/
|
||||
repo: https://github.com/WayneD/rsync
|
||||
---
|
||||
# rsync
|
||||
Rsync is an open-source tool that is used for synchronizing and transferring files between two locations. It is widely used by system administrators and developers for efficient and reliable file transfer.
|
||||
|
||||
## Usage
|
||||
To use rsync:
|
||||
```shell
|
||||
rsync [options] source destination
|
||||
```
|
||||
|
||||
Either `source` or `destination` can be a local folder or a remote path (`user@host:path`)
|
||||
|
||||
|
||||
## Options
|
||||
| option | description |
|
||||
| --------------------- | ------------------------------------------- |
|
||||
| --quiet, -q | suppress non-error messages |
|
||||
| --archive, -a | archive mode |
|
||||
| --recursive, -r | recurse into directories |
|
||||
| --update, -u | skip files that are newer on the receiver |
|
||||
| --append | append data onto shorter files |
|
||||
| --verbose, -v | increase verbosity |
|
||||
| --links, -l | copy symlinks as symlinks |
|
||||
| --perms, -p | preserve permissions |
|
||||
| --chmod=CHMOD | affect file and/or directory permissions |
|
||||
| --owner, -o | preserve owner (super-user only) |
|
||||
| --group, -g | preserve group |
|
||||
| --dry-run, -n | perform a trial run with no changes made |
|
||||
| --one-file-system, -x | don't cross filesystem boundaries |
|
||||
| --rsh=COMMAND, -e | specify the remote shell to use |
|
||||
| --delete | delete extraneous files from dest dirs |
|
||||
| --remove-source-files | sender removes synchronized files (non-dir) |
|
||||
| --compress, -z | compress file data during the transfer |
|
||||
| --exclude=PATTERN | exclude files matching PATTERN |
|
||||
| --exclude-from=FILE | read exclude patterns from FILE |
|
||||
| --stats | give some file-transfer stats |
|
||||
| --progress | show progress during transfer |
|
||||
| --human-readable, -h | output numbers in a human-readable format |
|
||||
| --log-file=FILE | log what we're doing to the specified FILE |
|
||||
| --partial | keep partially transferred files |
|
||||
| -P | same as --partial --progress |
|
||||
|
14
technology/applications/cli/sd.md
Normal file
14
technology/applications/cli/sd.md
Normal file
|
@ -0,0 +1,14 @@
|
|||
---
|
||||
obj: application
|
||||
os: linux
|
||||
repo: https://github.com/chmln/sd
|
||||
---
|
||||
# sd
|
||||
[Repo](https://github.com/chmln/sd)
|
||||
`sd` is an intuitive find & replace CLI.
|
||||
|
||||
## Usage
|
||||
```shell
|
||||
sd before after # replace before with after
|
||||
sd -p before after # preview changes without changing
|
||||
```
|
15
technology/applications/cli/skim.md
Normal file
15
technology/applications/cli/skim.md
Normal file
|
@ -0,0 +1,15 @@
|
|||
---
|
||||
obj: application
|
||||
os: linux
|
||||
repo: https://github.com/lotabout/skim
|
||||
---
|
||||
# skim
|
||||
[Repo](https://github.com/lotabout/skim)
|
||||
Skim is a fuzzy finder.
|
||||
#refactor
|
||||
|
||||
## Usage
|
||||
Flags:
|
||||
```shell
|
||||
-m, --multi Enable Multiple Selection
|
||||
```
|
73
technology/applications/cli/smbmap.md
Normal file
73
technology/applications/cli/smbmap.md
Normal file
|
@ -0,0 +1,73 @@
|
|||
---
|
||||
obj: application
|
||||
repo: https://github.com/ShawnDEvans/smbmap
|
||||
source: https://www.kali.org/tools/smbmap
|
||||
---
|
||||
|
||||
# smbmap
|
||||
SMBMap allows users to enumerate [samba](../web/Samba.md) share drives across an entire domain. List share drives, drive permissions, share contents, upload/download functionality, file name auto-download pattern matching, and even execute remote commands. This tool was designed with pen testing in mind, and is intended to simplify searching for potentially sensitive data across large networks.
|
||||
|
||||
## Usage
|
||||
Usage: `smbmap [options]...`
|
||||
|
||||
### Options
|
||||
#### Main arguments
|
||||
| Option | Description |
|
||||
| ------------------------ | --------------------------------------------------- |
|
||||
| `-H HOST` | IP of host |
|
||||
| `--host-file FILE` | File containing a list of hosts |
|
||||
| `-u USERNAME` | Username, if omitted null session assumed |
|
||||
| `-p PASSWORD` | Password or NTLM hash |
|
||||
| `--prompt` | Prompt for a password |
|
||||
| `-s SHARE` | Specify a share |
|
||||
| `-d DOMAIN` | Domain name (default WORKGROUP) |
|
||||
| `-P PORT` | SMB port (default 445) |
|
||||
| `-v` | Return the OS version of the remote host |
|
||||
| `--admin` | Just report if the user is an admin |
|
||||
| `--no-banner` | Removes the banner from the top of the output |
|
||||
| `--no-color` | Removes the color from output |
|
||||
| `--no-update` | Removes the "Working on it" message |
|
||||
| `--timeout SCAN_TIMEOUT` | Set port scan socket timeout. Default is .5 seconds |
|
||||
|
||||
#### Command Execution
|
||||
Options for executing commands on the specified host
|
||||
|
||||
| Option | Description |
|
||||
| ---------------- | ---------------------------------------------------------- |
|
||||
| `-x COMMAND` | Execute a command ex. `ipconfig /all` |
|
||||
| `--mode CMDMODE` | Set the execution method, `wmi` or `psexec`, default `wmi` |
|
||||
|
||||
#### Shard drive Search
|
||||
Options for searching/enumerating the share of the specified host(s)
|
||||
|
||||
| Option | Description |
|
||||
| ----------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `-L` | List all drives on the specified host, requires ADMIN rights. |
|
||||
| `-r [PATH]` | Recursively list dirs and files (no share\path lists the root of ALL shares), ex. 'email/backup' |
|
||||
| `-A PATTERN` | Define a file name pattern ([regex](../../tools/Regex.md)) that auto downloads a file on a match (requires `-r`), not case sensitive, |
|
||||
| `-g FILE` | Output to a file in a grep friendly format, used with `-r` (otherwise it outputs nothing) |
|
||||
| `--csv FILE` | Output to a [CSV](../../files/CSV.md) file |
|
||||
| `--dir-only` | List only directories, ommit files. |
|
||||
| `--no-write-check` | Skip check to see if drive grants WRITE access. |
|
||||
| `-q` | Quiet verbose output. Only shows shares you have READ or WRITE on, and suppresses file listing when performing a search. |
|
||||
| `--depth DEPTH` | Traverse a directory tree to a specific depth. (Default: 5) |
|
||||
| `--exclude SHARE [SHARE ...]` | Exclude share(s) from searching and listing |
|
||||
|
||||
#### File Content Search
|
||||
Options for searching the content of files (must run as root), kind of experimental
|
||||
|
||||
| Option | Description |
|
||||
| -------------------------- | ------------------------------------------------------------------------------------------------------------------ |
|
||||
| `-F PATTERN` | File content search, `-F '[Pp]assword'` (requires admin access to execute commands, and PowerShell on victim host) |
|
||||
| `--search-path PATH` | Specify drive/path to search |
|
||||
| `--search-timeout TIMEOUT` | Specifcy a timeout (in seconds) before the file search job gets killed. Default is 300 seconds. |
|
||||
|
||||
#### Filesystem interaction
|
||||
Options for interacting with the specified host's filesystem
|
||||
|
||||
| Option | Description |
|
||||
| ----------------------- | -------------------------------------- |
|
||||
| `--download PATH` | Download a file from the remote system |
|
||||
| `--upload SRC DST` | Upload a file to the remote system |
|
||||
| `--delete PATH TO FILE` | Delete a remote file |
|
||||
| `--skip` | Skip delete file confirmation prompt |
|
21
technology/applications/cli/tailspin.md
Normal file
21
technology/applications/cli/tailspin.md
Normal file
File diff suppressed because one or more lines are too long
31
technology/applications/cli/tar.md
Normal file
31
technology/applications/cli/tar.md
Normal file
|
@ -0,0 +1,31 @@
|
|||
---
|
||||
obj: application
|
||||
os: linux
|
||||
website: https://savannah.gnu.org/projects/tar
|
||||
repo: https://git.savannah.gnu.org/cgit/tar.git
|
||||
---
|
||||
# Tar
|
||||
Tar is the most widely used command in Unix and Linux like operating system for creating archive of multiple files and folders into a single archive file and that archive file can be further compressed using other compression techniques
|
||||
|
||||
Creating Archives:
|
||||
```shell
|
||||
tar -cvf myarchive.tar /etc /root
|
||||
```
|
||||
|
||||
List contents:
|
||||
```shell
|
||||
tar -tvf myarchive.tar # List contents
|
||||
tar -tvf myarchive.tar path # List contents of path
|
||||
```
|
||||
|
||||
Append or add files:
|
||||
```shell
|
||||
tar -rvf data.tar /etc/fstab
|
||||
```
|
||||
|
||||
Extract files:
|
||||
```shell
|
||||
tar -xvf myarchive.tar # Extract
|
||||
tar -xvf myarchive.tar -C /tmp # Extract to /temp
|
||||
tar -xvf myarchive.tar /etc/fstab -C /etc # Only extract /etc/fstab to /etc
|
||||
```
|
38
technology/applications/cli/tmux.md
Normal file
38
technology/applications/cli/tmux.md
Normal file
|
@ -0,0 +1,38 @@
|
|||
---
|
||||
obj: application
|
||||
repo: https://github.com/tmux/tmux
|
||||
arch-wiki: https://wiki.archlinux.org/title/tmux
|
||||
wiki: https://en.wikipedia.org/wiki/Tmux
|
||||
---
|
||||
# tmux
|
||||
tmux is a terminal multiplexer: it enables a number of terminals to be created, accessed, and controlled from a single screen. tmux may be detached from a screen and continue running in the background, then later reattached.
|
||||
|
||||
# Usage
|
||||
**New tmux session:**
|
||||
```shell
|
||||
tmux new -s name
|
||||
```
|
||||
|
||||
**List existing sessions:**
|
||||
```shell
|
||||
tmux ls
|
||||
```
|
||||
|
||||
**Attach to a named session:**
|
||||
```shell
|
||||
tmux attach -t name
|
||||
```
|
||||
|
||||
**Kill a session:**
|
||||
```shell
|
||||
tmux kill-session -t name
|
||||
```
|
||||
|
||||
# Keybinds
|
||||
- Switch sessions: `Ctrl-b s`
|
||||
- Detach from a running session: `Ctrl-b + d`
|
||||
- Create a new window inside session: `Ctrl-b c`
|
||||
- Go to next window: `Ctrl-b n`
|
||||
- Switch sessions and windows: `Ctrl-B w`
|
||||
- Go to window: `Ctrl-b [0-9]`
|
||||
- Kill a window: `Ctrl-b x`
|
18
technology/applications/cli/tokei.md
Normal file
18
technology/applications/cli/tokei.md
Normal file
|
@ -0,0 +1,18 @@
|
|||
---
|
||||
obj: application
|
||||
repo: https://github.com/XAMPPRocky/tokei
|
||||
---
|
||||
# tokei
|
||||
Count your code
|
||||
Usage: `tokei [OPTIONS] [--] [input]...`
|
||||
|
||||
## Options
|
||||
| Option | Description |
|
||||
| ---------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `-e, --exclude <exclude>...` | Ignore all files & directories matching the pattern |
|
||||
| `-f, --files` | Will print out statistics on individual files |
|
||||
| `--hidden` | Count hidden files |
|
||||
| `-l, --languages` | Prints out supported languages and their extensions |
|
||||
| `--no-ignore` | Don't respect ignore files (.gitignore, .ignore, etc.) |
|
||||
| `-o, --output <output>` | Outputs Tokei in a specific format. Compile with additional features for more format support. \[possible values: cbor, json, yaml] |
|
||||
| `-t, --types <types>` | Filters output by language type, separated by a comma. i.e. -t=Rust,Markdown |
|
79
technology/applications/cli/wget.md
Normal file
79
technology/applications/cli/wget.md
Normal file
|
@ -0,0 +1,79 @@
|
|||
---
|
||||
obj: application
|
||||
arch-wiki: https://wiki.archlinux.org/title/Wget
|
||||
wiki: https://en.wikipedia.org/wiki/Wget
|
||||
repo: https://git.savannah.gnu.org/cgit/wget.git
|
||||
---
|
||||
# wget
|
||||
GNU Wget is a free utility for non-interactive download of files from the Web. It supports [HTTP](../../internet/HTTP.md), HTTPS, and [FTP](../../internet/FTP.md) protocols, as well as retrieval through [HTTP](../../internet/HTTP.md) proxies.
|
||||
|
||||
Wget is non-interactive, meaning that it can work in the background, while the user is not logged on. This allows you to start a retrieval and disconnect from the system, letting Wget finish the work. By contrast, most of the Web browsers require constant user's presence, which can be a great hindrance when transferring a lot of data.
|
||||
|
||||
Wget can follow links in [HTML](../../internet/HTML.md), XHTML, and [CSS](../../internet/CSS.md) pages, to create local versions of remote web sites, fully recreating the directory structure of the original site. This is sometimes referred to as "recursive downloading." While doing that, Wget respects the Robot Exclusion Standard (/robots.txt). Wget can be instructed to convert the links in downloaded files to point at the local files, for offline viewing.
|
||||
|
||||
Wget has been designed for robustness over slow or unstable network connections; if a download fails due to a network problem, it will keep retrying until the whole file has been retrieved. If the server supports regetting, it will instruct the server to continue the download from where it left off.
|
||||
|
||||
## Options
|
||||
| Option | Description |
|
||||
| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `-b, --background` | Go to background immediately after startup. If no output file is specified via the -o, output is redirected to wget-log. |
|
||||
| `-e, --execute command` | Execute command as if it were a part of .wgetrc. A command thus invoked will be executed after the commands in .wgetrc, thus taking precedence over them. If you need to specify more than one wgetrc command, use multiple instances of -e. |
|
||||
|
||||
### Logging Options
|
||||
| Option | Description |
|
||||
| ----------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `-o, --output-file=logfile` | Log all messages to logfile. The messages are normally reported to standard error. |
|
||||
| `-a, --append-output=logfile` | Append to logfile. This is the same as -o, only it appends to logfile instead of overwriting the old log file. |
|
||||
| `-q, --quiet` | Turn off Wget's output. |
|
||||
| `-i, --input-file=file` | Read URLs from a local or external file. If - is specified as file, URLs are read from the standard input. (Use ./- to read from a file literally named -.). If this function is used, no URLs need be present on the command line. If there are URLs both on the command line and in an input file, those on the command lines will be the first ones to be retrieved. If --force-html is not specified, then file should consist of a series of URLs, one per line. However, if you specify --force-html, the document will be regarded as [html](../../internet/HTML.md). In that case you may have problems with relative links, which you can solve either by adding "\<base href="url">" to the documents or by specifying --base=url on the command line. If the file is an external one, the document will be automatically treated as [html](../../internet/HTML.md) if the Content-Type matches text/html. Furthermore, the file's location will be implicitly used as base href if none was specified. |
|
||||
| `-B, --base=URL` | Resolves relative links using URL as the point of reference, when reading links from an [HTML](../../internet/HTML.md) file specified via the -i/--input-file option (together with --force-html, or when the input file was fetched remotely from a server describing it as [HTML](../../internet/HTML.md)). This is equivalent to the presence of a "BASE" tag in the [HTML](../../internet/HTML.md) input file, with URL as the value for the "href" attribute. |
|
||||
|
||||
### Download Options
|
||||
| Option | Description |
|
||||
| ---------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `-t, --tries=number` | Set number of tries to number. Specify 0 or inf for infinite retrying. The default is to retry 20 times, with the exception of fatal errors like "connection refused" or "not found" (404), which are not retried. |
|
||||
| `-O, --output-document=file` | The documents will not be written to the appropriate files, but all will be concatenated together and written to file. If - is used as file, documents will be printed to standard output, disabling link conversion. (Use ./- to print to a file literally named -.) |
|
||||
| `--backups=backups` | Before (over)writing a file, back up an existing file by adding a .1 suffix (\_1 on VMS) to the file name. Such backup files are rotated to .2, .3, and so on, up to `backups` (and lost beyond that) |
|
||||
| `-c, --continue` | Continue getting a partially-downloaded file. This is useful when you want to finish up a download started by a previous instance of Wget, or by another program. |
|
||||
| `--show-progress` | Force wget to display the progress bar in any verbosity. |
|
||||
| `-T, --timeout=seconds` | Set the network timeout to `seconds` seconds. |
|
||||
| `--limit-rate=amount` | Limit the download speed to amount bytes per second. Amount may be expressed in bytes, kilobytes with the k suffix, or megabytes with the m suffix. For example, --limit-rate=20k will limit the retrieval rate to 20KB/s. This is useful when, for whatever reason, you don't want Wget to consume the entire available bandwidth. |
|
||||
| `-w, --wait=seconds` | Wait the specified number of seconds between the retrievals. Use of this option is recommended, as it lightens the server load by making the requests less frequent. Instead of in seconds, the time can be specified in minutes using the "m" suffix, in hours using "h" suffix, or in days using "d" suffix. |
|
||||
| `--waitretry=seconds` | If you don't want Wget to wait between every retrieval, but only between retries of failed downloads, you can use this option. Wget will use linear backoff, waiting 1 second after the first failure on a given file, then waiting 2 seconds after the second failure on that file, up to the maximum number of seconds you specify. |
|
||||
| `--random-wait` | Some web sites may perform log analysis to identify retrieval programs such as Wget by looking for statistically significant similarities in the time between requests. This option causes the time between requests to vary between 0.5 and 1.5 * wait seconds, where wait was specified using the --wait option, in order to mask Wget's presence from such analysis. |
|
||||
| `--user=user, --password=password` | Specify the username and password for both [FTP](../../internet/FTP.md) and [HTTP](../../internet/HTTP.md) file retrieval. |
|
||||
| `--ask-password` | Prompt for a password for each connection established. |
|
||||
|
||||
### Directory Options
|
||||
| Option | Description |
|
||||
| ------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `-nH, --no-host-directories` | Disable generation of host-prefixed directories. By default, invoking Wget with -r http://fly.srk.fer.hr/ will create a structure of directories beginning with fly.srk.fer.hr/. This option disables such behavior. |
|
||||
| `--cut-dirs=number` | Ignore number directory components. This is useful for getting a fine-grained control over the directory where recursive retrieval will be saved. |
|
||||
| `-P, --directory-prefix=prefix` | Set directory prefix to prefix. The directory prefix is the directory where all other files and subdirectories will be saved to, i.e. the top of the retrieval tree. The default is . (the current directory). |
|
||||
|
||||
### HTTP Options
|
||||
| Option | Description |
|
||||
| ---------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `--no-cookies` | Disable the use of cookies. |
|
||||
| `--load-cookies file` | Load cookies from file before the first [HTTP](../../internet/HTTP.md) retrieval. file is a textual file in the format originally used by Netscape's cookies.txt file. |
|
||||
| `--save-cookies file` | Save cookies to file before exiting. This will not save cookies that have expired or that have no expiry time (so-called "session cookies"), but also see --keep-session-cookies. |
|
||||
| `--keep-session-cookies` | When specified, causes --save-cookies to also save session cookies. Session cookies are normally not saved because they are meant to be kept in memory and forgotten when you exit the browser. Saving them is useful on sites that require you to log in or to visit the home page before you can access some pages. With this option, multiple Wget runs are considered a single browser session as far as the site is concerned. |
|
||||
| `--header=header-line` | Send header-line along with the rest of the headers in each [HTTP](../../internet/HTTP.md) request. The supplied header is sent as-is, which means it must contain name and value separated by colon, and must not contain newlines. |
|
||||
| `--proxy-user=user, --proxy-password=password` | Specify the username user and password password for authentication on a proxy server. Wget will encode them using the "basic" authentication scheme. |
|
||||
| `--referer=url` | Include 'Referer: url' header in [HTTP](../../internet/HTTP.md) request. Useful for retrieving documents with server-side processing that assume they are always being retrieved by interactive web browsers and only come out properly when Referer is set to one of the pages that point to them. |
|
||||
| `-U, --user-agent=agent-string` | Identify as `agent-string` to the [HTTP](../../internet/HTTP.md) server. |
|
||||
|
||||
### HTTPS Options
|
||||
| Option | Description |
|
||||
| -------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| `--no-check-certificate` | Don't check the server certificate against the available certificate authorities. Also don't require the URL host name to match the common name presented by the certificate. |
|
||||
| `--ca-certificate=file` | Use file as the file with the bundle of certificate authorities ("CA") to verify the peers. The certificates must be in PEM format. |
|
||||
| `--ca-directory=directory` | Specifies directory containing CA certificates in PEM format. |
|
||||
|
||||
### Recursive Retrieval Options
|
||||
| Option | Description |
|
||||
| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| `-r, --recursive` | Turn on recursive retrieving. The default maximum depth is 5. |
|
||||
| `-l, --level=depth` | Set the maximum number of subdirectories that Wget will recurse into to depth. |
|
||||
| `-k, --convert-links` | After the download is complete, convert the links in the document to make them suitable for local viewing. This affects not only the visible hyperlinks, but any part of the document that links to external content, such as embedded images, links to style sheets, hyperlinks to non-[HTML](../../internet/HTML.md) content, etc. |
|
||||
| `-p, --page-requisites` | This option causes Wget to download all the files that are necessary to properly display a given [HTML](../../internet/HTML.md) page. This includes such things as inlined images, sounds, and referenced stylesheets. |
|
13
technology/applications/cli/zsh.md
Normal file
13
technology/applications/cli/zsh.md
Normal file
|
@ -0,0 +1,13 @@
|
|||
---
|
||||
obj: application
|
||||
website: https://zsh.sourceforge.io/
|
||||
wiki: https://en.wikipedia.org/wiki/Z_shell
|
||||
arch-wiki: https://wiki.archlinux.org/title/zsh
|
||||
repo: https://github.com/zsh-users/zsh
|
||||
---
|
||||
|
||||
# zsh
|
||||
Zsh is a powerful [shell](Shell.md) that operates as both an interactive [shell](Shell.md) and as a scripting language interpreter.
|
||||
|
||||
## Configuration
|
||||
`~/.zshrc`: Used for setting user's interactive [shell](Shell.md) configuration and executing commands, will be read when starting as an _**interactive shell**_.
|
12
technology/applications/communication/Discord.md
Normal file
12
technology/applications/communication/Discord.md
Normal file
File diff suppressed because one or more lines are too long
81
technology/applications/communication/Element.md
Normal file
81
technology/applications/communication/Element.md
Normal file
File diff suppressed because one or more lines are too long
8
technology/applications/communication/Gmail.md
Normal file
8
technology/applications/communication/Gmail.md
Normal file
|
@ -0,0 +1,8 @@
|
|||
---
|
||||
obj: application
|
||||
android-id: com.google.android.gm
|
||||
website: https://mail.google.com
|
||||
wiki: https://en.wikipedia.org/wiki/Gmail
|
||||
---
|
||||
# Gmail
|
||||
Gmail is a free [email](../../internet/eMail.md) service provided by Google. As of 2019, it had 1.5 billion active users worldwide making it the largest [email](../../internet/eMail.md) service in the world. It also provides a webmail interface, accessible through a web browser, and is also accessible through the official mobile application. Google also supports the use of third-party [email](../../internet/eMail.md) clients via the POP and IMAP protocols.
|
9
technology/applications/communication/ProtonMail.md
Normal file
9
technology/applications/communication/ProtonMail.md
Normal file
|
@ -0,0 +1,9 @@
|
|||
---
|
||||
obj: application
|
||||
android-id: ch.protonmail.android
|
||||
website:
|
||||
- https://mail.protonmail.com
|
||||
- https://protonmailrmez3lotccipshtkleegetolb73fuirgj7r4o4vfu7ozyd.onion
|
||||
---
|
||||
# ProtonMail
|
||||
Proton Mail is an encrypted [email](../../internet/eMail.md) service based in Switzerland.
|
12
technology/applications/communication/Thunderbird.md
Normal file
12
technology/applications/communication/Thunderbird.md
Normal file
|
@ -0,0 +1,12 @@
|
|||
---
|
||||
obj: application
|
||||
os: ["linux", "macos", "windows"]
|
||||
website: https://www.thunderbird.net
|
||||
flatpak-id: org.mozilla.Thunderbird
|
||||
---
|
||||
|
||||
# Thunderbird
|
||||
#refactor
|
||||
|
||||
Thunderbird is an email-client from Mozilla.
|
||||
Additional features are Contacts based on CardDAV ([WebDAV](../../tools/WebDAV.md)) and Calendars on CalDAV.
|
24
technology/applications/desktops/KDE Plasma.md
Normal file
24
technology/applications/desktops/KDE Plasma.md
Normal file
File diff suppressed because one or more lines are too long
16
technology/applications/desktops/dwm.md
Normal file
16
technology/applications/desktops/dwm.md
Normal file
|
@ -0,0 +1,16 @@
|
|||
---
|
||||
obj: application
|
||||
os:
|
||||
- linux
|
||||
arch-wiki: https://wiki.archlinux.org/title/Dwm
|
||||
website: https://dwm.suckless.org
|
||||
repo: https://git.suckless.org/dwm/
|
||||
---
|
||||
# dwm
|
||||
[Arch Wiki](https://wiki.archlinux.org/title/Dwm)
|
||||
dwm is a dynamic window manager for Xorg. It manages windows in tiled, stacked, and full-screen layouts, as well as many others with the help of optional patches. Layouts can be applied dynamically, optimizing the environment for the application in use and the task being performed. dwm is extremely lightweight and fast, written in C and with a stated design goal of remaining under 2000 source lines of code. dwm can be used with compositor ([picom](picom.md))
|
||||
|
||||
## Configuration
|
||||
dwm is configured at compile-time by editing some of its source files, specifically `config.h`. For detailed information on these settings see the included, well-commented `config.def.h` as well as the [customisation section](https://dwm.suckless.org/customisation/) on the dwm website.
|
||||
|
||||
The official website has a number of [patches](https://dwm.suckless.org/patches/) that can add extra functionality to dwm. These patches primarily make changes to the `dwm.c` file but also make changes to the `config.h` file where appropriate.
|
747
technology/applications/desktops/hyprland.md
Normal file
747
technology/applications/desktops/hyprland.md
Normal file
File diff suppressed because one or more lines are too long
16
technology/applications/desktops/picom.md
Normal file
16
technology/applications/desktops/picom.md
Normal file
|
@ -0,0 +1,16 @@
|
|||
---
|
||||
obj: application
|
||||
os: linux
|
||||
---
|
||||
# picom
|
||||
picom is a standalone compositor for Xorg, suitable for use with window managers that do not provide compositing. picom is a fork of compton, which is a fork of xcompmgr-dana, which in turn is a fork of xcompmgr.
|
||||
|
||||
## Configuration
|
||||
The default configuration is available in `/etc/xdg/picom.conf`. For modifications, it can be copied to `~/.config/picom/picom.conf` or `~/.config/picom.conf`.
|
||||
|
||||
## Usage
|
||||
To manually enable default compositing effects during a session, use the following command:
|
||||
`picom &`
|
||||
|
||||
To autostart picom as a background process for a session, the `-b` argument can be used (may cause a display freeze):
|
||||
`picom -b`
|
28
technology/applications/development/DB Browser for SQLite.md
Normal file
28
technology/applications/development/DB Browser for SQLite.md
Normal file
File diff suppressed because one or more lines are too long
8
technology/applications/development/Ghidra.md
Normal file
8
technology/applications/development/Ghidra.md
Normal file
|
@ -0,0 +1,8 @@
|
|||
---
|
||||
obj: application
|
||||
website: https://ghidra-sre.org
|
||||
repo: https://github.com/NationalSecurityAgency/ghidra
|
||||
---
|
||||
|
||||
# Ghidra
|
||||
#wip #🐇 #notnow
|
629
technology/applications/development/GitHub Actions.md
Normal file
629
technology/applications/development/GitHub Actions.md
Normal file
|
@ -0,0 +1,629 @@
|
|||
---
|
||||
obj: concept
|
||||
website: https://github.com/features/actions
|
||||
---
|
||||
|
||||
# GitHub Actions
|
||||
GitHub Actions is a continuous integration and continuous delivery (CI/CD) platform that allows you to automate your build, test, and deployment pipeline. You can create workflows that build and test every pull request to your repository, or deploy merged pull requests to production.
|
||||
|
||||
GitHub Actions goes beyond just DevOps and lets you run workflows when other events happen in your repository. For example, you can run a workflow to automatically add the appropriate labels whenever someone creates a new issue in your repository.
|
||||
|
||||
You can configure a GitHub Actions _workflow_ to be triggered when an _event_ occurs in your repository, such as a pull request being opened or an issue being created. Your workflow contains one or more _jobs_ which can run in sequential order or in parallel. Each job will run inside its own virtual machine _runner_, or inside a container, and has one or more _steps_ that either run a script that you define or run an _action_, which is a reusable extension that can simplify your workflow.
|
||||
|
||||
Example:
|
||||
```yml
|
||||
name: Rust
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Build
|
||||
run: cargo build --verbose
|
||||
- name: Run tests
|
||||
run: cargo test --verbose
|
||||
```
|
||||
|
||||
## Expressions
|
||||
You can use expressions to programmatically set [environment variables](../../linux/Environment%20Variables.md) in workflow files and access contexts. An expression can be any combination of literal values, references to a context, or functions. You can combine literals, context references, and functions using operators.
|
||||
|
||||
Expressions are commonly used with the conditional `if` keyword in a workflow file to determine whether a step should run. When an `if` conditional is `true`, the step will run.
|
||||
|
||||
You need to use specific syntax to tell [GitHub](GitHub.md) to evaluate an expression rather than treat it as a string.
|
||||
`${{ <expression> }}`
|
||||
|
||||
Secrets passed to GitHub Actions can be used:
|
||||
`${{ secrets.MYSECRET }}`
|
||||
|
||||
### Functions
|
||||
#### contains
|
||||
`contains( search, item )`
|
||||
|
||||
Returns `true` if `search` contains `item`. If `search` is an array, this function returns `true` if the `item` is an element in the array. If `search` is a string, this function returns `true` if the `item` is a substring of `search`. This function is not case sensitive. Casts values to a string.
|
||||
|
||||
#### startsWith
|
||||
`startsWith( searchString, searchValue )`
|
||||
|
||||
Returns `true` when `searchString` starts with `searchValue`. This function is not case sensitive. Casts values to a string.
|
||||
|
||||
#### endsWith
|
||||
`endsWith( searchString, searchValue )`
|
||||
|
||||
Returns `true` if `searchString` ends with `searchValue`. This function is not case sensitive. Casts values to a string.
|
||||
|
||||
#### format
|
||||
`format( string, replaceValue0, replaceValue1, ..., replaceValueN)`
|
||||
|
||||
Replaces values in the `string`, with the variable `replaceValueN`. Variables in the `string` are specified using the `{N}` syntax, where `N` is an integer. You must specify at least one `replaceValue` and `string`. There is no maximum for the number of variables (`replaceValueN`) you can use. Escape curly braces using double braces.
|
||||
|
||||
#### always
|
||||
Causes the step to always execute, and returns `true`, even when canceled. The `always` expression is best used at the step level or on tasks that you expect to run even when a job is canceled. For example, you can use `always` to send logs even when a job is canceled.
|
||||
|
||||
Example of `always`:
|
||||
```yaml
|
||||
if: ${{ always() }}
|
||||
```
|
||||
|
||||
#### failure
|
||||
Returns `true` when any previous step of a job fails. If you have a chain of dependent jobs, `failure()` returns `true` if any ancestor job fails.
|
||||
|
||||
Example of `failure`:
|
||||
```yaml
|
||||
steps:
|
||||
...
|
||||
- name: The job has failed
|
||||
if: ${{ failure() }}
|
||||
```
|
||||
|
||||
|
||||
## Workflows
|
||||
A workflow is a configurable automated process that will run one or more jobs. Workflows are defined by a [YAML](../../files/YAML.md) file checked in to your repository and will run when triggered by an event in your repository, or they can be triggered manually, or at a defined schedule.
|
||||
|
||||
Workflows are defined in the `.github/workflows` directory in a repository, and a repository can have multiple workflows, each of which can perform a different set of tasks. For example, you can have one workflow to build and test pull requests, another workflow to deploy your application every time a release is created, and still another workflow that adds a label every time someone opens a new issue.
|
||||
|
||||
### Syntax
|
||||
#### `name`
|
||||
The name of the workflow. [GitHub](GitHub.md) displays the names of your workflows under your repository's "Actions" tab. If you omit `name`, [GitHub](GitHub.md) displays the workflow file path relative to the root of the repository.
|
||||
|
||||
#### `on`
|
||||
To automatically trigger a workflow, use `on` to define which events can cause the workflow to run.
|
||||
|
||||
You can define single or multiple events that can trigger a workflow, or set a time schedule. You can also restrict the execution of a workflow to only occur for specific files, tags, or branch changes. These options are described in the following sections.
|
||||
|
||||
**Using a single event:**
|
||||
For example, a workflow with the following `on` value will run when a push is made to any branch in the workflow's repository:
|
||||
```yaml
|
||||
on: push
|
||||
```
|
||||
|
||||
**Using multiple events:**
|
||||
You can specify a single event or multiple events. For example, a workflow with the following `on` value will run when a push is made to any branch in the repository or when someone forks the repository:
|
||||
```yaml
|
||||
on: [push, fork]
|
||||
```
|
||||
|
||||
If you specify multiple events, only one of those events needs to occur to trigger your workflow. If multiple triggering events for your workflow occur at the same time, multiple workflow runs will be triggered.
|
||||
|
||||
**Using activity types:**
|
||||
Some events have activity types that give you more control over when your workflow should run. Use `on.<event_name>.types` to define the type of event activity that will trigger a workflow run.
|
||||
|
||||
For example, the `issue_comment` event has the `created`, `edited`, and `deleted` activity types. If your workflow triggers on the `label` event, it will run whenever a label is created, edited, or deleted. If you specify the `created` activity type for the `label` event, your workflow will run when a label is created but not when a label is edited or deleted.
|
||||
```yaml
|
||||
on:
|
||||
label:
|
||||
types:
|
||||
- created
|
||||
```
|
||||
|
||||
If you specify multiple activity types, only one of those event activity types needs to occur to trigger your workflow. If multiple triggering event activity types for your workflow occur at the same time, multiple workflow runs will be triggered. For example, the following workflow triggers when an issue is opened or labeled. If an issue with two labels is opened, three workflow runs will start: one for the issue opened event and two for the two issue labeled events.
|
||||
```yaml
|
||||
on:
|
||||
issues:
|
||||
types:
|
||||
- opened
|
||||
- labeled
|
||||
```
|
||||
|
||||
**Using filters:**
|
||||
Some events have filters that give you more control over when your workflow should run.
|
||||
|
||||
For example, the `push` event has a `branches` filter that causes your workflow to run only when a push to a branch that matches the `branches` filter occurs, instead of when any push occurs.
|
||||
```yaml
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- 'releases/**'
|
||||
```
|
||||
|
||||
#### `on.<event_name>.types`
|
||||
Use `on.<event_name>.types` to define the type of activity that will trigger a workflow run. Most [GitHub](GitHub.md) events are triggered by more than one type of activity. For example, the `label` is triggered when a label is `created`, `edited`, or `deleted`. The `types` keyword enables you to narrow down activity that causes the workflow to run. When only one activity type triggers a [webhook](../../internet/Webhook.md) event, the `types` keyword is unnecessary.
|
||||
|
||||
```yaml
|
||||
on:
|
||||
label:
|
||||
types: [created, edited]
|
||||
```
|
||||
|
||||
#### `on.schedule`
|
||||
You can use `on.schedule` to define a time schedule for your workflows. You can schedule a workflow to run at specific UTC times using POSIX cron syntax. Scheduled workflows run on the latest commit on the default or base branch. The shortest interval you can run scheduled workflows is once every 5 minutes.
|
||||
|
||||
This example triggers the workflow every day at 5:30 and 17:30 UTC:
|
||||
```yaml
|
||||
on:
|
||||
schedule:
|
||||
# * is a special character in YAML so you have to quote this string
|
||||
- cron: '30 5,17 * * *'
|
||||
```
|
||||
|
||||
A single workflow can be triggered by multiple `schedule` events. You can access the schedule event that triggered the workflow through the `github.event.schedule` context. This example triggers the workflow to run at 5:30 UTC every Monday-Thursday, but skips the `Not on Monday or Wednesday` step on Monday and Wednesday.
|
||||
```yaml
|
||||
on:
|
||||
schedule:
|
||||
- cron: '30 5 * * 1,3'
|
||||
- cron: '30 5 * * 2,4'
|
||||
|
||||
jobs:
|
||||
test_schedule:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Not on Monday or Wednesday
|
||||
if: github.event.schedule != '30 5 * * 1,3'
|
||||
run: echo "This step will be skipped on Monday and Wednesday"
|
||||
- name: Every time
|
||||
run: echo "This step will always run"
|
||||
```
|
||||
|
||||
#### `on.workflow_dispatch`
|
||||
When using the `workflow_dispatch` event, you can manually run this workflow from the UI.
|
||||
|
||||
#### `env`
|
||||
A `map` of variables that are available to the steps of all jobs in the workflow. You can also set variables that are only available to the steps of a single job or to a single step. For more information, see `jobs.<job_id>.env` and `jobs.<job_id>.steps[*].env`.
|
||||
|
||||
Variables in the `env` map cannot be defined in terms of other variables in the map.
|
||||
|
||||
**Example of `env`:**
|
||||
```yaml
|
||||
env:
|
||||
SERVER: production
|
||||
```
|
||||
|
||||
#### `jobs`
|
||||
A workflow run is made up of one or more `jobs`, which run in parallel by default. To run jobs sequentially, you can define dependencies on other jobs using the `jobs.<job_id>.needs` keyword.
|
||||
|
||||
Each job runs in a runner environment specified by `runs-on`.
|
||||
|
||||
##### `jobs.<job_id>`
|
||||
Use `jobs.<job_id>` to give your job a unique identifier. The key `job_id` is a string and its value is a map of the job's configuration data. You must replace `<job_id>` with a string that is unique to the `jobs` object. The `<job_id>` must start with a letter or `_` and contain only alphanumeric characters, `-`, or `_`.
|
||||
|
||||
Use `jobs.<job_id>.name` to set a name for the job, which is displayed in the [GitHub](GitHub.md) UI.
|
||||
|
||||
**Example: Creating jobs:**
|
||||
In this example, two jobs have been created, and their `job_id` values are `my_first_job` and `my_second_job`.
|
||||
|
||||
```yaml
|
||||
jobs:
|
||||
my_first_job:
|
||||
name: My first job
|
||||
my_second_job:
|
||||
name: My second job
|
||||
```
|
||||
|
||||
##### `jobs.<job_id>.container`
|
||||
Use `jobs.<job_id>.container` to create a container to run any steps in a job that don't already specify a container. If you have steps that use both script and container actions, the container actions will run as sibling containers on the same network with the same volume mounts.
|
||||
|
||||
If you do not set a `container`, all steps will run directly on the host specified by `runs-on` unless a step refers to an action configured to run in a container.
|
||||
|
||||
**Example: Running a job within a container:**
|
||||
```yaml
|
||||
name: CI
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
jobs:
|
||||
container-test-job:
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: node:18
|
||||
env:
|
||||
NODE_ENV: development
|
||||
ports:
|
||||
- 80
|
||||
volumes:
|
||||
- my_docker_volume:/volume_mount
|
||||
options: --cpus 1
|
||||
steps:
|
||||
- name: Check for dockerenv file
|
||||
run: (ls /.dockerenv && echo Found dockerenv) || (echo No dockerenv)
|
||||
```
|
||||
|
||||
When you only specify a container image, you can omit the `image` keyword.
|
||||
```yaml
|
||||
jobs:
|
||||
container-test-job:
|
||||
runs-on: ubuntu-latest
|
||||
container: node:18
|
||||
```
|
||||
|
||||
##### `jobs.<job_id>.needs`
|
||||
Use `jobs.<job_id>.needs` to identify any jobs that must complete successfully before this job will run. It can be a string or array of strings. If a job fails or is skipped, all jobs that need it are skipped unless the jobs use a conditional expression that causes the job to continue. If a run contains a series of jobs that need each other, a failure or skip applies to all jobs in the dependency chain from the point of failure or skip onwards. If you would like a job to run even if a job it is dependent on did not succeed, use the `always()` conditional expression in `jobs.<job_id>.if`.
|
||||
|
||||
**Example: Requiring successful dependent jobs:**
|
||||
```yaml
|
||||
jobs:
|
||||
job1:
|
||||
job2:
|
||||
needs: job1
|
||||
job3:
|
||||
needs: [job1, job2]
|
||||
```
|
||||
|
||||
##### `jobs.<job_id>.if`
|
||||
You can use the `jobs.<job_id>.if` conditional to prevent a job from running unless a condition is met. You can use any supported context and expression to create a conditional.
|
||||
|
||||
When you use expressions in an `if` conditional, you can, optionally, omit the `${{ }}` expression syntax because GitHub Actions automatically evaluates the `if` conditional as an expression. However, this exception does not apply everywhere.
|
||||
|
||||
You must always use the `${{ }}` expression syntax or escape with `''`, `""`, or `()` when the expression starts with `!`, since `!` is reserved notation in [YAML](../../files/YAML.md) format. For example:
|
||||
```yaml
|
||||
if: ${{ ! startsWith(github.ref, 'refs/tags/') }}
|
||||
```
|
||||
|
||||
For more information, see "Expressions."
|
||||
|
||||
##### `jobs.<job_id>.runs-on`
|
||||
Use `jobs.<job_id>.runs-on` to define the type of machine to run the job on.
|
||||
|
||||
##### `jobs.<job_id>.env`
|
||||
A `map` of variables that are available to all steps in the job. You can set variables for the entire workflow or an individual step.
|
||||
|
||||
##### `jobs.<job_id>.steps`
|
||||
A job contains a sequence of tasks called `steps`. Steps can run commands, run setup tasks, or run an action in your repository, a public repository, or an action published in a [Docker](../../tools/Docker.md) registry. Not all steps run actions, but all actions run as a step. Each step runs in its own process in the runner environment and has access to the workspace and filesystem. Because steps run in their own process, changes to [environment variables](../../linux/Environment%20Variables.md) are not preserved between steps. [GitHub](GitHub.md) provides built-in steps to set up and complete a job.
|
||||
|
||||
**Example of `jobs.<job_id>.steps`:**
|
||||
```yaml
|
||||
name: Greeting from Mona
|
||||
|
||||
on: push
|
||||
|
||||
jobs:
|
||||
my-job:
|
||||
name: My Job
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Print a greeting
|
||||
env:
|
||||
MY_VAR: Hi there! My name is
|
||||
FIRST_NAME: Mona
|
||||
MIDDLE_NAME: The
|
||||
LAST_NAME: Octocat
|
||||
run: |
|
||||
echo $MY_VAR $FIRST_NAME $MIDDLE_NAME $LAST_NAME.
|
||||
```
|
||||
|
||||
- `jobs.<job_id>.steps[*].if`
|
||||
You can use the `if` conditional to prevent a step from running unless a condition is met. You can use any supported context and expression to create a conditional.
|
||||
- `jobs.<job_id>.steps[*].name`
|
||||
A name for your step to display on [GitHub](GitHub.md).
|
||||
- `jobs.<job_id>.steps[*].uses`
|
||||
Selects an action to run as part of a step in your job. An action is a reusable unit of code. You can use an action defined in the same repository as the workflow, a public repository, or in a published [Docker](../../tools/Docker.md) container image.
|
||||
|
||||
Some actions require inputs that you must set using the `with` keyword. Review the action's README file to determine the inputs required.
|
||||
|
||||
**Example: Using versioned actions**
|
||||
```yaml
|
||||
steps:
|
||||
# Reference a specific commit
|
||||
- uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3
|
||||
# Reference the major version of a release
|
||||
- uses: actions/checkout@v4
|
||||
# Reference a specific version
|
||||
- uses: actions/checkout@v4.2.0
|
||||
# Reference a branch
|
||||
- uses: actions/checkout@main
|
||||
```
|
||||
- `jobs.<job_id>.steps[*].run`
|
||||
Runs command-line programs using the operating system's [shell](../cli/Shell.md). If you do not provide a `name`, the step name will default to the text specified in the `run` command. Commands run using non-login shells by default.
|
||||
|
||||
Each `run` keyword represents a new process and [shell](../cli/Shell.md) in the runner environment. When you provide multi-line commands, each line runs in the same [shell](../cli/Shell.md). For example:
|
||||
|
||||
A single-line command:
|
||||
```yaml
|
||||
- name: Install Dependencies
|
||||
run: npm install
|
||||
```
|
||||
|
||||
A multi-line command:
|
||||
```yaml
|
||||
- name: Clean install dependencies and build
|
||||
run: |
|
||||
npm ci
|
||||
npm run build
|
||||
```
|
||||
- `jobs.<job_id>.steps[*].working-directory`
|
||||
Using the `working-directory` keyword, you can specify the working directory of where to run the command.
|
||||
|
||||
```yaml
|
||||
- name: Clean temp directory
|
||||
run: rm -rf *
|
||||
working-directory: ./temp
|
||||
```
|
||||
- `jobs.<job_id>.steps[*].with`
|
||||
A `map` of the input parameters defined by the action. Each input parameter is a key/value pair. Input parameters are set as [environment variables](../../linux/Environment%20Variables.md). The variable is prefixed with `INPUT_` and converted to upper case.
|
||||
|
||||
Input parameters defined for a [Docker](../../tools/Docker.md) container must use `args`. For more information, see "`jobs.<job_id>.steps[*].with.args`."
|
||||
|
||||
**Example of `jobs.<job_id>.steps[*].with`
|
||||
|
||||
Defines the three input parameters (`first_name`, `middle_name`, and `last_name`) defined by the `hello_world` action. These input variables will be accessible to the `hello-world` action as `INPUT_FIRST_NAME`, `INPUT_MIDDLE_NAME`, and `INPUT_LAST_NAME` [environment variables](../../linux/Environment%20Variables.md).
|
||||
```yaml
|
||||
jobs:
|
||||
my_first_job:
|
||||
steps:
|
||||
- name: My first step
|
||||
uses: actions/hello_world@main
|
||||
with:
|
||||
first_name: Mona
|
||||
middle_name: The
|
||||
last_name: Octocat
|
||||
```
|
||||
- `jobs.<job_id>.steps[*].with.args`
|
||||
A `string` that defines the inputs for a [Docker](../../tools/Docker.md) container. [GitHub](GitHub.md) passes the `args` to the container's `ENTRYPOINT` when the container starts up. An `array of strings` is not supported by this parameter. A single argument that includes spaces should be surrounded by double quotes `""`.
|
||||
- `jobs.<job_id>.steps[*].env`
|
||||
Sets variables for steps to use in the runner environment. You can also set variables for the entire workflow or a job. For more information, see `env` and `jobs.<job_id>.env`.
|
||||
|
||||
When more than one environment variable is defined with the same name, [GitHub](GitHub.md) uses the most specific variable. For example, an environment variable defined in a step will override job and workflow [environment variables](../../linux/Environment%20Variables.md) with the same name, while the step executes. An environment variable defined for a job will override a workflow variable with the same name, while the job executes.
|
||||
|
||||
Public actions may specify expected variables in the README file. If you are setting a secret or sensitive value, such as a password or token, you must set secrets using the `secrets` context.
|
||||
|
||||
**Example of `jobs.<job_id>.steps[*].env`**
|
||||
```yaml
|
||||
steps:
|
||||
- name: My first action
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
FIRST_NAME: Mona
|
||||
LAST_NAME: Octocat
|
||||
```
|
||||
|
||||
## Events
|
||||
An event is a specific activity in a repository that triggers a workflow run. For example, activity can originate from [GitHub](GitHub.md) when someone creates a pull request, opens an issue, or pushes a commit to a repository. You can also trigger a workflow to run on a schedule, by posting to a REST API, or manually.
|
||||
|
||||
### `create`
|
||||
Runs your workflow when someone creates a [Git](../../dev/Git.md) reference ([Git](../../dev/Git.md) branch or tag) in the workflow's repository.
|
||||
|
||||
For example, you can run a workflow when the `create` event occurs.
|
||||
```yaml
|
||||
on:
|
||||
create
|
||||
```
|
||||
|
||||
### `delete`
|
||||
Runs your workflow when someone deletes a [Git](../../dev/Git.md) reference ([Git](../../dev/Git.md) branch or tag) in the workflow's repository.
|
||||
|
||||
For example, you can run a workflow when the `delete` event occurs.
|
||||
```yaml
|
||||
on:
|
||||
delete
|
||||
```
|
||||
|
||||
### `discussion`
|
||||
Runs your workflow when a discussion in the workflow's repository is created or modified.
|
||||
|
||||
Activity Types:
|
||||
- `created`
|
||||
- `edited`
|
||||
- `deleted`
|
||||
- `transferred`
|
||||
- `pinned`
|
||||
- `unpinned`
|
||||
- `labeled`
|
||||
- `unlabeled`
|
||||
- `locked`
|
||||
- `unlocked`
|
||||
- `category_changed`
|
||||
- `answered`
|
||||
- `unanswered`
|
||||
|
||||
For example, you can run a workflow when a discussion has been `created`, `edited`, or `answered`.
|
||||
```yaml
|
||||
on:
|
||||
discussion:
|
||||
types: [created, edited, answered]
|
||||
```
|
||||
|
||||
### `discussion_comment`
|
||||
Runs your workflow when a comment on a discussion in the workflow's repository is created or modified.
|
||||
|
||||
Activity Types:
|
||||
- `created`
|
||||
- `edited`
|
||||
- `deleted`
|
||||
|
||||
For example, you can run a workflow when a discussion comment has been `created` or `deleted`.
|
||||
```yaml
|
||||
on:
|
||||
discussion_comment:
|
||||
types: [created, deleted]
|
||||
```
|
||||
|
||||
### `fork`
|
||||
Runs your workflow when someone forks a repository.
|
||||
|
||||
For example, you can run a workflow when the `fork` event occurs.
|
||||
```yaml
|
||||
on:
|
||||
fork
|
||||
```
|
||||
|
||||
### `issue_comment`
|
||||
Runs your workflow when an issue or pull request comment is created, edited, or deleted.
|
||||
|
||||
Activity Types:
|
||||
- `created`
|
||||
- `edited`
|
||||
- `deleted`
|
||||
|
||||
For example, you can run a workflow when an issue or pull request comment has been `created` or `deleted`.
|
||||
```yaml
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created, deleted]
|
||||
```
|
||||
|
||||
### `issues`
|
||||
Runs your workflow when an issue in the workflow's repository is created or modified.
|
||||
|
||||
Activity Types:
|
||||
- `opened`
|
||||
- `edited`
|
||||
- `deleted`
|
||||
- `transferred`
|
||||
- `pinned`
|
||||
- `unpinned`
|
||||
- `closed`
|
||||
- `reopened`
|
||||
- `assigned`
|
||||
- `unassigned`
|
||||
- `labeled`
|
||||
- `unlabeled`
|
||||
- `locked`
|
||||
- `unlocked`
|
||||
- `milestoned`
|
||||
- `demilestoned`
|
||||
|
||||
For example, you can run a workflow when an issue has been `opened`, `edited`, or `milestoned`.
|
||||
```yaml
|
||||
on:
|
||||
issues:
|
||||
types: [opened, edited, milestoned]
|
||||
```
|
||||
|
||||
### `milestone`
|
||||
Runs your workflow when a milestone in the workflow's repository is created or modified.
|
||||
|
||||
Activity Types:
|
||||
- `created`
|
||||
- `closed`
|
||||
- `opened`
|
||||
- `edited`
|
||||
- `deleted`
|
||||
|
||||
For example, you can run a workflow when a milestone has been `opened` or `deleted`.
|
||||
```yaml
|
||||
on:
|
||||
milestone:
|
||||
types: [opened, deleted]
|
||||
```
|
||||
|
||||
### `pull_request`
|
||||
Runs your workflow when activity on a pull request in the workflow's repository occurs. For example, if no activity types are specified, the workflow runs when a pull request is opened or reopened or when the head branch of the pull request is updated.
|
||||
|
||||
Activity Types:
|
||||
- `assigned`
|
||||
- `unassigned`
|
||||
- `labeled`
|
||||
- `unlabeled`
|
||||
- `opened`
|
||||
- `edited`
|
||||
- `closed`
|
||||
- `reopened`
|
||||
- `synchronize`
|
||||
- `converted_to_draft`
|
||||
- `ready_for_review`
|
||||
- `locked`
|
||||
- `unlocked`
|
||||
- `milestoned`
|
||||
- `demilestoned`
|
||||
- `review_requested`
|
||||
- `review_request_removed`
|
||||
- `auto_merge_enabled`
|
||||
- `auto_merge_disabled`
|
||||
|
||||
For example, you can run a workflow when a pull request has been opened or reopened.
|
||||
```yaml
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, reopened]
|
||||
```
|
||||
|
||||
### `push`
|
||||
Runs your workflow when you push a commit or tag, or when you create a repository from a template.
|
||||
|
||||
You can use the `branches` or `branches-ignore` filter to configure your workflow to only run when specific branches are pushed.
|
||||
|
||||
You can use the `tags` or `tags-ignore` filter to configure your workflow to only run when specific tags are pushed.
|
||||
|
||||
If you use both the `branches` filter and the `paths` filter, the workflow will only run when both filters are satisfied. For example, the following workflow will only run when a push that includes a change to a JavaScript (`.js`) file is made to a branch whose name starts with `releases/`:
|
||||
```yaml
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'releases/**'
|
||||
paths:
|
||||
- '**.js'
|
||||
```
|
||||
|
||||
For example, you can run a workflow when the `push` event occurs.
|
||||
```yaml
|
||||
on:
|
||||
push
|
||||
```
|
||||
|
||||
### `release`
|
||||
Runs your workflow when release activity in your repository occurs.
|
||||
|
||||
Activity Types:
|
||||
- `published`
|
||||
- `unpublished`
|
||||
- `created`
|
||||
- `edited`
|
||||
- `deleted`
|
||||
- `prereleased`
|
||||
- `released`
|
||||
|
||||
For example, you can run a workflow when a release has been `published`.
|
||||
```yaml
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
```
|
||||
|
||||
### `schedule`
|
||||
The `schedule` event allows you to trigger a workflow at a scheduled time.
|
||||
|
||||
You can schedule a workflow to run at specific UTC times using POSIX cron syntax. Scheduled workflows run on the latest commit on the default or base branch. The shortest interval you can run scheduled workflows is once every 5 minutes. A single workflow can be triggered by multiple `schedule` events. You can access the schedule event that triggered the workflow through the `github.event.schedule` context.
|
||||
|
||||
This example triggers the workflow every day at 5:30 and 17:30 UTC:
|
||||
```yaml
|
||||
on:
|
||||
schedule:
|
||||
# * is a special character in YAML so you have to quote this string
|
||||
- cron: '30 5,17 * * *'
|
||||
```
|
||||
|
||||
### `workflow_dispatch`
|
||||
To enable a workflow to be triggered manually, you need to configure the `workflow_dispatch` event. You can manually trigger a workflow run using the [GitHub](GitHub.md) API, [GitHub](GitHub.md) CLI, or [GitHub](GitHub.md) browser interface.
|
||||
|
||||
```yaml
|
||||
on: workflow_dispatch
|
||||
```
|
||||
|
||||
## Jobs
|
||||
A job is a set of _steps_ in a workflow that is executed on the same runner. Each step is either a [shell](../cli/Shell.md) script that will be executed, or an _action_ that will be run. Steps are executed in order and are dependent on each other. Since each step is executed on the same runner, you can share data from one step to another. For example, you can have a step that builds your application followed by a step that tests the application that was built.
|
||||
|
||||
You can configure a job's dependencies with other jobs; by default, jobs have no dependencies and run in parallel with each other. When a job takes a dependency on another job, it will wait for the dependent job to complete before it can run. For example, you may have multiple build jobs for different architectures that have no dependencies, and a packaging job that is dependent on those jobs. The build jobs will run in parallel, and when they have all completed successfully, the packaging job will run.
|
||||
|
||||
## Actions
|
||||
An _action_ is a custom application for the GitHub Actions platform that performs a complex but frequently repeated task. Use an action to help reduce the amount of repetitive code that you write in your workflow files. An action can pull your [git](../../dev/Git.md) repository from [GitHub](GitHub.md), set up the correct toolchain for your build environment, or set up the authentication to your cloud provider.
|
||||
|
||||
You can write your own actions, or you can find actions to use in your workflows in the GitHub Marketplace.
|
10
technology/applications/development/GitHub Desktop.md
Normal file
10
technology/applications/development/GitHub Desktop.md
Normal file
|
@ -0,0 +1,10 @@
|
|||
---
|
||||
obj: application
|
||||
os: ["linux", "macos", "windows"]
|
||||
website: https://desktop.github.com/
|
||||
flatpak-id: io.github.shiftey.Desktop
|
||||
---
|
||||
# GitHub Desktop
|
||||
GitHub Desktop is a graphical [git](../../dev/Git.md) client primarily focused on [GitHub](GitHub.md).
|
||||
|
||||
#add-image #refactor
|
12
technology/applications/development/GitHub.md
Normal file
12
technology/applications/development/GitHub.md
Normal file
File diff suppressed because one or more lines are too long
417
technology/applications/development/HTTPie.md
Normal file
417
technology/applications/development/HTTPie.md
Normal file
File diff suppressed because one or more lines are too long
13
technology/applications/development/Podman Desktop.md
Normal file
13
technology/applications/development/Podman Desktop.md
Normal file
File diff suppressed because one or more lines are too long
43
technology/applications/development/Visual Studio Code.md
Normal file
43
technology/applications/development/Visual Studio Code.md
Normal file
File diff suppressed because one or more lines are too long
34
technology/applications/development/xh.md
Normal file
34
technology/applications/development/xh.md
Normal file
|
@ -0,0 +1,34 @@
|
|||
---
|
||||
obj: application
|
||||
os:
|
||||
- linux
|
||||
repo: https://github.com/ducaale/xh
|
||||
---
|
||||
# xh
|
||||
#refactor
|
||||
[Repo](https://github.com/ducaale/xh)
|
||||
xh is a friendly and fast tool for sending [HTTP](../../internet/HTTP.md) requests. It reimplements as much as possible of [HTTPie](HTTPie.md)'s excellent design, with a focus on improved performance.
|
||||
|
||||
## Examples
|
||||
```
|
||||
# Send a GET request
|
||||
xh httpbin.org/json
|
||||
|
||||
# Send a POST request with body {"name": "ahmed", "age": 24}
|
||||
xh httpbin.org/post name=ahmed age:=24
|
||||
|
||||
# Send a GET request with querystring id=5&sort=true
|
||||
xh get httpbin.org/json id==5 sort==true
|
||||
|
||||
# Send a GET request and include a header named x-api-key with value 12345
|
||||
xh get httpbin.org/json x-api-key:12345
|
||||
|
||||
# Send a PUT request and pipe the result to less
|
||||
xh put httpbin.org/put id:=49 age:=25 | less
|
||||
|
||||
# Download and save to res.json
|
||||
xh -d httpbin.org/json -o res.json
|
||||
|
||||
# Make a request with a custom user agent
|
||||
xh httpbin.org/get user-agent:foobar
|
||||
```
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue