Skip to content
New issue

Have a question about this project? # for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “#”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? # to your account

Update commands from redis-doc #249

Merged
merged 6 commits into from
Nov 23, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions codegen/patches/tests.ts
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ export const fixupGeneratedTests = (filename: string) => (code: string): string
* aren't available in that image
*/
function commentOutFutureReleaseFeatures(filename: string) {
const unsupported = ["lmove", "lpos", "smismember", "zinter", "zmscore", "zunion"];
const unsupported = ["lmove", "lpos", "smismember", "zinter", "zmscore", "zunion", "zdiff", "zdiffstore"];
const match = unsupported.find(u => filename.endsWith(`${u}.md`));
if (match) {
return (code: string) => `// ${match} not supported by node_redis! ${code}`;
Expand All @@ -34,7 +34,7 @@ function fixKeyWeightsOverlyComplexParsingIssue(code: string) {
if (code.match(/(zunionstore|zinterstore).*WEIGHTS/)) {
return [`// @ts-expect-error (not smart enough to deal with numkeys)`, code].join("\n");
}
if (code.match(/(zunion|zinter).*"zset1","zset2"/)) {
if (code.match(/(zunion|zinter|zdiff\b).*"zset1","zset2"/)) {
return code.replace(`"zset1","zset2"`, `["zset1", "zset2"]`);
}
return code;
Expand Down
94 changes: 93 additions & 1 deletion codegen/schema.json
Original file line number Diff line number Diff line change
Expand Up @@ -3889,6 +3889,16 @@
"type": "integer"
}
},
"RESET": {
"summary": "Reset the connection",
"since": "6.2",
"group": "connection",
"arguments": [],
"return": {
"type": "string",
"const": "OK"
}
},
"RESTORE": {
"summary": "Create a key using the provided serialized value, previously obtained using DUMP.",
"complexity": "O(1) to create the new key and additional O(N*M) to reconstruct the serialized value, where N is the number of Redis objects composing the value and M their average size. For small string values the time complexity is thus O(1)+O(1*M) where M is small, so simply O(1). However for sorted set values the complexity is O(N*M*log(N)) because inserting values into sorted sets is O(log(N)).",
Expand Down Expand Up @@ -5397,6 +5407,72 @@
"type": "integer"
}
},
"ZDIFF": {
"summary": "Subtract multiple sorted sets",
"complexity": "O(L + (N-K)log(N)) worst case where L is the total number of elements in all the sets, N is the size of the first set, and K is the size of the result set.",
"arguments": [
{
"name": "numkeys",
"schema": {
"type": "integer"
}
},
{
"name": "key",
"schema": {
"type": "array",
"items": {
"type": "string"
}
}
},
{
"name": "withscores",
"optional": true,
"schema": {
"type": "string",
"enum": ["WITHSCORES"]
}
}
],
"since": "6.2.0",
"group": "sorted_set",
"return": {
"type": "array"
}
},
"ZDIFFSTORE": {
"summary": "Subtract multiple sorted sets and store the resulting sorted set in a new key",
"complexity": "O(L + (N-K)log(N)) worst case where L is the total number of elements in all the sets, N is the size of the first set, and K is the size of the result set.",
"arguments": [
{
"name": "destination",
"schema": {
"type": "string"
}
},
{
"name": "numkeys",
"schema": {
"type": "integer"
}
},
{
"name": "key",
"schema": {
"type": "array",
"items": {
"type": "string"
}
}
}
],
"since": "6.2.0",
"group": "sorted_set",
"return": {
"type": "integer"
}
},
"ZINCRBY": {
"summary": "Increment the score of a member in a sorted set",
"complexity": "O(log(N)) where N is the number of elements in the sorted set.",
Expand Down Expand Up @@ -6905,7 +6981,7 @@
}
},
"XLEN": {
"summary": "Return the number of entires in a stream",
"summary": "Return the number of entries in a stream",
"complexity": "O(1)",
"arguments": [
{
Expand Down Expand Up @@ -7420,6 +7496,22 @@
"schema": {
"type": "string"
}
},
{
"name": "IDLE_min-idle-time",
"optional": true,
"schema": {
"type": "array",
"items": [
{
"type": "string",
"const": "IDLE"
},
{
"type": "integer"
}
]
}
}
],
"since": "5.0.0",
Expand Down
10 changes: 10 additions & 0 deletions docs/redis-doc/clients.json
Original file line number Diff line number Diff line change
Expand Up @@ -1772,6 +1772,16 @@
"active": true
},

{
"name": "NewLife.Redis",
"language": "C#",
"url": "https://github.com/NewLifeX/NewLife.Redis",
"repository": "https://github.com/NewLifeX/NewLife.Redis",
"description": "The high-performance redis client supports .NETCORE/.NET4.0/.NET4.5, which is specially optimized for big data and message queuing. The average daily call volume of single online application is 10 billion",
"authors": [],
"active": true
},

{
"name": "wiredis",
"language": "C++",
Expand Down
59 changes: 58 additions & 1 deletion docs/redis-doc/commands.json
Original file line number Diff line number Diff line change
Expand Up @@ -2580,6 +2580,11 @@
"since": "1.0.0",
"group": "generic"
},
"RESET": {
"summary": "Reset the connection",
"since": "6.2",
"group": "connection"
},
"RESTORE": {
"summary": "Create a key using the provided serialized value, previously obtained using DUMP.",
"complexity": "O(1) to create the new key and additional O(N*M) to reconstruct the serialized value, where N is the number of Redis objects composing the value and M their average size. For small string values the time complexity is thus O(1)+O(1*M) where M is small, so simply O(1). However for sorted set values the complexity is O(N*M*log(N)) because inserting values into sorted sets is O(log(N)).",
Expand Down Expand Up @@ -3547,6 +3552,52 @@
"since": "2.0.0",
"group": "sorted_set"
},
"ZDIFF": {
"summary": "Subtract multiple sorted sets",
"complexity": "O(L + (N-K)log(N)) worst case where L is the total number of elements in all the sets, N is the size of the first set, and K is the size of the result set.",
"arguments": [
{
"name": "numkeys",
"type": "integer"
},
{
"name": "key",
"type": "key",
"multiple": true
},
{
"name": "withscores",
"type": "enum",
"enum": [
"WITHSCORES"
],
"optional": true
}
],
"since": "6.2.0",
"group": "sorted_set"
},
"ZDIFFSTORE": {
"summary": "Subtract multiple sorted sets and store the resulting sorted set in a new key",
"complexity": "O(L + (N-K)log(N)) worst case where L is the total number of elements in all the sets, N is the size of the first set, and K is the size of the result set.",
"arguments": [
{
"name": "destination",
"type": "key"
},
{
"name": "numkeys",
"type": "integer"
},
{
"name": "key",
"type": "key",
"multiple": true
}
],
"since": "6.2.0",
"group": "sorted_set"
},
"ZINCRBY": {
"summary": "Increment the score of a member in a sorted set",
"complexity": "O(log(N)) where N is the number of elements in the sorted set.",
Expand Down Expand Up @@ -4409,7 +4460,7 @@
"group": "stream"
},
"XLEN": {
"summary": "Return the number of entires in a stream",
"summary": "Return the number of entries in a stream",
"complexity": "O(1)",
"arguments": [
{
Expand Down Expand Up @@ -4700,6 +4751,12 @@
"name": "consumer",
"type": "string",
"optional": true
},
{
"command": "IDLE",
"name": "min-idle-time",
"type": "integer",
"optional": true
}
],
"since": "5.0.0",
Expand Down
20 changes: 13 additions & 7 deletions docs/redis-doc/commands/client-kill.md
Original file line number Diff line number Diff line change
@@ -1,25 +1,23 @@
The `CLIENT KILL` command closes a given client connection. Up to Redis 2.8.11 it was possible to close a connection only by client address, using the following form:
The `CLIENT KILL` command closes a given client connection. This command support two formats, the old format:

CLIENT KILL addr:port

The `ip:port` should match a line returned by the `CLIENT LIST` command (`addr` field).

However starting with Redis 2.8.12 or greater, the command accepts the following
form:
The new format:

CLIENT KILL <filter> <value> ... ... <filter> <value>

With the new form it is possible to kill clients by different attributes
instead of killing just by address. The following filters are available:

* `CLIENT KILL ADDR ip:port`. This is exactly the same as the old three-arguments behavior.
* `CLIENT KILL ID client-id`. Allows to kill a client by its unique `ID` field, which was introduced in the `CLIENT LIST` command starting from Redis 2.8.12.
* `CLIENT KILL TYPE type`, where *type* is one of `normal`, `master`, `slave` and `pubsub` (the `master` type is available from v3.2). This closes the connections of **all the clients** in the specified class. Note that clients blocked into the `MONITOR` command are considered to belong to the `normal` class.
* `CLIENT KILL LADDR ip:port`. Kill all clients connected to specified local (bind) address.
* `CLIENT KILL ID client-id`. Allows to kill a client by its unique `ID` field. Client `ID`'s are retrieved using the `CLIENT LIST` command.
* `CLIENT KILL TYPE type`, where *type* is one of `normal`, `master`, `replica` and `pubsub`. This closes the connections of **all the clients** in the specified class. Note that clients blocked into the `MONITOR` command are considered to belong to the `normal` class.
* `CLIENT KILL USER username`. Closes all the connections that are authenticated with the specified [ACL](/topics/acl) username, however it returns an error if the username does not map to an existing ACL user.
* `CLIENT KILL SKIPME yes/no`. By default this option is set to `yes`, that is, the client calling the command will not get killed, however setting this option to `no` will have the effect of also killing the client calling the command.

**Note: starting with Redis 5 the project is no longer using the slave word. You can use `TYPE replica` instead, however the old form is still supported for backward compatibility.**

It is possible to provide multiple filters at the same time. The command will handle multiple filters via logical AND. For example:

CLIENT KILL addr 127.0.0.1:12345 type pubsub
Expand Down Expand Up @@ -53,3 +51,11 @@ When called with the three arguments format:
When called with the filter / value format:

@integer-reply: the number of clients killed.

@history

* `>=2.8.12`: Added new filter format.
* `>=2.8.12`: `ID` option.
* `>=3.2`: Added `master` type in for `TYPE` option.
* `>=5`: Replaced `slave` `TYPE` with `replica`. `slave` still supported for backward compatibility.
* `>=6.2`: `LADDR` option.
14 changes: 12 additions & 2 deletions docs/redis-doc/commands/client-list.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
The `CLIENT LIST` command returns information and statistics about the client
connections server in a mostly human readable format.

As of v5.0, the optional `TYPE type` subcommand can be used to filter the list by clients' type, where *type* is one of `normal`, `master`, `replica` and `pubsub`. Note that clients blocked into the `MONITOR` command are considered to belong to the `normal` class.
The optional `TYPE type` subcommand can be used to filter the list by clients' type, where *type* is one of `normal`, `master`, `replica` and `pubsub`. Note that clients blocked into the `MONITOR` command are considered to belong to the `normal` class.

@return

Expand All @@ -13,9 +13,10 @@ As of v5.0, the optional `TYPE type` subcommand can be used to filter the list b

Here is the meaning of the fields:

* `id`: an unique 64-bit client ID (introduced in Redis 2.8.12).
* `id`: an unique 64-bit client ID.
* `name`: the name set by the client with `CLIENT SETNAME`
* `addr`: address/port of the client
* `laddr`: address/port of local address client connected to (bind address)
* `fd`: file descriptor corresponding to the socket
* `age`: total duration of the connection in seconds
* `idle`: idle time of the connection in seconds
Expand All @@ -33,6 +34,8 @@ Here is the meaning of the fields:
* `cmd`: last command played
* `argv-mem`: incomplete arguments for the next command (already extracted from query buffer)
* `tot-mem`: total memory consumed by this client in its various buffers
* `redir`: client id of current client tracking redirection
* `user`: the authenticated username of the client

The client flags can be a combination of:

Expand All @@ -53,6 +56,7 @@ U: the client is connected via a Unix domain socket
x: the client is in a MULTI/EXEC context
t: the client enabled keys tracking in order to perform client side caching
R: the client tracking target client is invalid
B: the client enabled broadcast tracking mode
```

The file descriptor events can be:
Expand All @@ -68,3 +72,9 @@ New fields are regularly added for debugging purpose. Some could be removed
in the future. A version safe Redis client using this command should parse
the output accordingly (i.e. handling gracefully missing fields, skipping
unknown fields).

@history

* `>=2.8.12`: Added unique client `id` field.
* `>=5.0`: Added optional `TYPE` filter.
* `>=6.2`: Added `laddr`.
2 changes: 1 addition & 1 deletion docs/redis-doc/commands/cluster-flushslots.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,6 @@ Deletes all slots from a node.

The `CLUSTER FLUSHSLOTS` deletes all information about slots from the connected node. It can only be called when the database is empty.

@reply
@return

@simple-string-reply: `OK`
4 changes: 4 additions & 0 deletions docs/redis-doc/commands/command.md
Original file line number Diff line number Diff line change
Expand Up @@ -104,8 +104,12 @@ Cluster client needs to parse commands marked `movablekeys` to locate all releva
Complete list of commands currently requiring key location parsing:

- `SORT` - optional `STORE` key, optional `BY` weights, optional `GET` keys
- `ZUNION` - keys stop when `WEIGHT` or `AGGREGATE` starts
- `ZUNIONSTORE` - keys stop when `WEIGHT` or `AGGREGATE` starts
- `ZINTER` - keys stop when `WEIGHT` or `AGGREGATE` starts
- `ZINTERSTORE` - keys stop when `WEIGHT` or `AGGREGATE` starts
- `ZDIFF` - keys stop after `numkeys` count arguments
- `ZDIFFSTORE` - keys stop after `numkeys` count arguments
- `EVAL` - keys stop after `numkeys` count arguments
- `EVALSHA` - keys stop after `numkeys` count arguments

Expand Down
15 changes: 15 additions & 0 deletions docs/redis-doc/commands/info.md
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,11 @@ Here is the meaning of all fields in the **clients** section:

* `connected_clients`: Number of client connections (excluding connections
from replicas)
* `cluster_connections`: An approximation of the number of sockets used by the
cluster's bus
* `maxclients`: The value of the `maxclients` configuration directive. This is
the upper limit for the sum of `connected_clients`, `connected_slaves` and
`cluster_connections`.
* `client_longest_output_list`: Longest output list among current client
connections
* `client_biggest_input_buf`: Biggest input buffer among current client
Expand Down Expand Up @@ -195,6 +200,8 @@ If a load operation is on-going, these additional fields will be added:
* `loading_start_time`: Epoch-based timestamp of the start of the load
operation
* `loading_total_bytes`: Total file size
* `loading_rdb_used_mem`: The memory usage of the server that had generated
the RDB file at the time of the file's creation
* `loading_loaded_bytes`: Number of bytes already loaded
* `loading_loaded_perc`: Same value expressed as a percentage
* `loading_eta_seconds`: ETA in seconds for the load to be complete
Expand Down Expand Up @@ -277,7 +284,15 @@ If the instance is a replica, these additional fields are provided:

If a SYNC operation is on-going, these additional fields are provided:

* `master_sync_total_bytes`: Total number of bytes that need to be
transferred. this may be 0 when the size is unknown (for example, when
the `repl-diskless-sync` configuration directive is used)
* `master_sync_read_bytes`: Number of bytes already transferred
* `master_sync_left_bytes`: Number of bytes left before syncing is complete
(may be negative when `master_sync_total_bytes` is 0)
* `master_sync_perc`: The percentage `master_sync_read_bytes` from
`master_sync_total_bytes`, or an approximation that uses
`loading_rdb_used_mem` when `master_sync_total_bytes` is 0
* `master_sync_last_io_seconds_ago`: Number of seconds since last transfer I/O
during a SYNC operation

Expand Down
Loading