diff --git a/.devcontainer/TestConfig.json b/.devcontainer/TestConfig.json index 54ff589ce..2ec990b9f 100644 --- a/.devcontainer/TestConfig.json +++ b/.devcontainer/TestConfig.json @@ -4,7 +4,6 @@ "SecureServer": "redis", "FailoverMasterServer": "redis", "FailoverReplicaServer": "redis", - "RediSearchServer": "redisearch", "IPv4Server": "redis", "RemoteServer": "redis", "SentinelServer": "redis", diff --git a/.devcontainer/docker-compose.yml b/.devcontainer/docker-compose.yml index 2b458776f..a801d6f1e 100644 --- a/.devcontainer/docker-compose.yml +++ b/.devcontainer/docker-compose.yml @@ -12,16 +12,12 @@ services: - ./TestConfig.json:/workspace/tests/StackExchange.Redis.Tests/TestConfig.json:ro depends_on: - redis - - redisearch links: - "redis:redis" - - "redisearch:redisearch" command: /bin/sh -c "while sleep 1000; do :; done" redis: build: context: ../tests/RedisConfigs dockerfile: Dockerfile sysctls : - net.core.somaxconn: '511' - redisearch: - image: redislabs/redisearch:latest \ No newline at end of file + net.core.somaxconn: '511' \ No newline at end of file diff --git a/.editorconfig b/.editorconfig index 162be69f5..eb05866a0 100644 --- a/.editorconfig +++ b/.editorconfig @@ -27,56 +27,187 @@ indent_size = 2 # Dotnet code style settings: [*.{cs,vb}] # Sort using and Import directives with System.* appearing first -dotnet_sort_system_directives_first = true +dotnet_sort_system_directives_first = true:warning # Avoid "this." and "Me." if not necessary -dotnet_style_qualification_for_field = false:suggestion -dotnet_style_qualification_for_property = false:suggestion -dotnet_style_qualification_for_method = false:suggestion -dotnet_style_qualification_for_event = false:suggestion +dotnet_style_qualification_for_field = false:warning +dotnet_style_qualification_for_property = false:warning +dotnet_style_qualification_for_method = false:warning +dotnet_style_qualification_for_event = false:warning + +# Modifiers +dotnet_style_require_accessibility_modifiers = for_non_interface_members:warning +dotnet_style_readonly_field = true:warning # Use language keywords instead of framework type names for type references -dotnet_style_predefined_type_for_locals_parameters_members = true:suggestion -dotnet_style_predefined_type_for_member_access = true:suggestion +dotnet_style_predefined_type_for_locals_parameters_members = true:warning +dotnet_style_predefined_type_for_member_access = true:warning # Suggest more modern language features when available -dotnet_style_object_initializer = true:suggestion -dotnet_style_collection_initializer = true:suggestion -dotnet_style_coalesce_expression = true:suggestion -dotnet_style_null_propagation = true:suggestion -dotnet_style_explicit_tuple_names = true:suggestion +dotnet_style_object_initializer = true:warning +dotnet_style_collection_initializer = true:warning +dotnet_style_explicit_tuple_names = true:warning +dotnet_style_null_propagation = true:warning +dotnet_style_coalesce_expression = true:warning +dotnet_style_prefer_is_null_check_over_reference_equality_method = true:warning +dotnet_style_prefer_auto_properties = true:suggestion # Ignore silly if statements -dotnet_style_prefer_conditional_expression_over_return = false:none +dotnet_style_prefer_conditional_expression_over_assignment = true:suggestion +dotnet_style_prefer_conditional_expression_over_return = true:suggestion + +# Don't warn on things that actually need suppressing +dotnet_remove_unnecessary_suppression_exclusions = CA1009,CA1063,CA1069,CA1416,CA1816,CA1822,CA2202,CS0618,IDE0060,IDE0062,RCS1047,RCS1085,RCS1090,RCS1194,RCS1231 + +# Style Definitions +dotnet_naming_style.pascal_case_style.capitalization = pascal_case +# Use PascalCase for constant fields +dotnet_naming_rule.constant_fields_should_be_pascal_case.severity = suggestion +dotnet_naming_rule.constant_fields_should_be_pascal_case.symbols = constant_fields +dotnet_naming_rule.constant_fields_should_be_pascal_case.style = pascal_case_style +dotnet_naming_symbols.constant_fields.applicable_kinds = field +dotnet_naming_symbols.constant_fields.applicable_accessibilities = * +dotnet_naming_symbols.constant_fields.required_modifiers = const # CSharp code style settings: [*.cs] # Prefer method-like constructs to have a expression-body -csharp_style_expression_bodied_methods = true:none -csharp_style_expression_bodied_constructors = true:none -csharp_style_expression_bodied_operators = true:none +csharp_style_expression_bodied_constructors = true:silent +csharp_style_expression_bodied_methods = true:silent +csharp_style_expression_bodied_operators = true:warning # Prefer property-like constructs to have an expression-body -csharp_style_expression_bodied_properties = true:none -csharp_style_expression_bodied_indexers = true:none -csharp_style_expression_bodied_accessors = true:none +csharp_style_expression_bodied_accessors = true:warning +csharp_style_expression_bodied_indexers = true:warning +csharp_style_expression_bodied_properties = true:warning +csharp_style_expression_bodied_lambdas = true:warning +csharp_style_expression_bodied_local_functions = true:silent -# Suggest more modern language features when available -csharp_style_pattern_matching_over_is_with_cast_check = true:suggestion -csharp_style_pattern_matching_over_as_with_null_check = true:suggestion -csharp_style_inlined_variable_declaration = true:suggestion -csharp_style_throw_expression = true:suggestion -csharp_style_conditional_delegate_call = true:suggestion +# Pattern matching preferences +csharp_style_pattern_matching_over_is_with_cast_check = true:warning +csharp_style_pattern_matching_over_as_with_null_check = true:warning + +# Null-checking preferences +csharp_style_throw_expression = true:warning +csharp_style_conditional_delegate_call = true:warning + +# Modifier preferences +csharp_preferred_modifier_order = public,private,protected,internal,static,extern,new,virtual,abstract,sealed,override,readonly,volatile,async:suggestion + +# Expression-level preferences +csharp_prefer_braces = true:silent +csharp_style_deconstructed_variable_declaration = true:suggestion +csharp_prefer_simple_default_expression = true:silent +csharp_style_pattern_local_over_anonymous_function = true:suggestion +csharp_style_inlined_variable_declaration = true:warning +csharp_prefer_simple_using_statement = true:silent +csharp_style_prefer_not_pattern = true:warning +csharp_style_prefer_switch_expression = true:warning -# Newline settings +# Disable range operator suggestions +csharp_style_prefer_range_operator = false:none +csharp_style_prefer_index_operator = false:none + +# New line preferences csharp_new_line_before_open_brace = all csharp_new_line_before_else = true csharp_new_line_before_catch = true csharp_new_line_before_finally = true csharp_new_line_before_members_in_object_initializers = true csharp_new_line_before_members_in_anonymous_types = true +csharp_new_line_between_query_expression_clauses = true + +# Indentation preferences +csharp_indent_case_contents = true +csharp_indent_switch_labels = true +csharp_indent_labels = flush_left + +# Space preferences +csharp_space_after_cast = false +csharp_space_after_keywords_in_control_flow_statements = true:warning +csharp_space_between_method_call_parameter_list_parentheses = false +csharp_space_between_method_declaration_parameter_list_parentheses = false +csharp_space_between_parentheses = false +csharp_space_before_colon_in_inheritance_clause = true +csharp_space_after_colon_in_inheritance_clause = true +csharp_space_around_binary_operators = before_and_after +csharp_space_between_method_declaration_empty_parameter_list_parentheses = false +csharp_space_between_method_call_name_and_opening_parenthesis = false +csharp_space_between_method_call_empty_parameter_list_parentheses = false + +# Wrapping preferences +csharp_preserve_single_line_statements = true +csharp_preserve_single_line_blocks = true + +# Help Link: https://learn.microsoft.com/dotnet/fundamentals/code-analysis/quality-rules/ca1852 +# Tags : Telemetry, EnabledRuleInAggressiveMode, CompilationEnd +dotnet_diagnostic.CA1852.severity = warning + +# IDE preferences +dotnet_diagnostic.IDE0090.severity = silent # IDE0090: Use 'new(...)' + +#Roslynator preferences +dotnet_diagnostic.RCS1037.severity = error # RCS1037: Remove trailing white-space. +dotnet_diagnostic.RCS1098.severity = none # RCS1098: Constant values should be placed on right side of comparisons. + +dotnet_diagnostic.RCS1194.severity = none # RCS1194: Implement exception constructors. +dotnet_diagnostic.RCS1229.severity = none # RCS1229: Use async/await when necessary. +dotnet_diagnostic.RCS1233.severity = none # RCS1233: Use short-circuiting operator. +dotnet_diagnostic.RCS1234.severity = none # RCS1234: Duplicate enum value. + +# StyleCop preferences +dotnet_diagnostic.SA0001.severity = none # SA0001: XML comment analysis is disabled + +dotnet_diagnostic.SA1101.severity = none # SA1101: Prefix local calls with this +dotnet_diagnostic.SA1108.severity = none # SA1108: Block statements should not contain embedded comments +dotnet_diagnostic.SA1122.severity = none # SA1122: Use string.Empty for empty strings +dotnet_diagnostic.SA1127.severity = none # SA1127: Generic type constraints should be on their own line +dotnet_diagnostic.SA1128.severity = none # SA1128: Put constructor initializers on their own line +dotnet_diagnostic.SA1132.severity = none # SA1132: Do not combine fields +dotnet_diagnostic.SA1133.severity = none # SA1133: Do not combine attributes + +dotnet_diagnostic.SA1200.severity = none # SA1200: Using directives should be placed correctly +dotnet_diagnostic.SA1201.severity = none # SA1201: Elements should appear in the correct order +dotnet_diagnostic.SA1202.severity = none # SA1202: Elements should be ordered by access +dotnet_diagnostic.SA1203.severity = none # SA1203: Constants should appear before fields + +dotnet_diagnostic.SA1306.severity = none # SA1306: Field names should begin with lower-case letter +dotnet_diagnostic.SA1309.severity = none # SA1309: Field names should not begin with underscore +dotnet_diagnostic.SA1310.severity = silent # SA1310: Field names should not contain underscore +dotnet_diagnostic.SA1311.severity = none # SA1311: Static readonly fields should begin with upper-case letter +dotnet_diagnostic.SA1312.severity = none # SA1312: Variable names should begin with lower-case letter + +dotnet_diagnostic.SA1401.severity = silent # SA1401: Fields should be private +dotnet_diagnostic.SA1402.severity = suggestion # SA1402: File may only contain a single type + +dotnet_diagnostic.SA1503.severity = silent # SA1503: Braces should not be omitted +dotnet_diagnostic.SA1516.severity = silent # SA1516: Elements should be separated by blank line + +dotnet_diagnostic.SA1600.severity = none # SA1600: Elements should be documented +dotnet_diagnostic.SA1601.severity = none # SA1601: Partial elements should be documented +dotnet_diagnostic.SA1602.severity = none # SA1602: Enumeration items should be documented +dotnet_diagnostic.SA1615.severity = none # SA1615: Element return value should be documented +dotnet_diagnostic.SA1623.severity = none # SA1623: Property summary documentation should match accessors +dotnet_diagnostic.SA1633.severity = none # SA1633: File should have header +dotnet_diagnostic.SA1642.severity = none # SA1642: Constructor summary documentation should begin with standard text +dotnet_diagnostic.SA1643.severity = none # SA1643: Destructor summary documentation should begin with standard text + + +# To Fix: +dotnet_diagnostic.SA1204.severity = none # SA1204: Static elements should appear before instance elements +dotnet_diagnostic.SA1214.severity = none # SA1214: Readonly fields should appear before non-readonly fields +dotnet_diagnostic.SA1304.severity = none # SA1304: Non-private readonly fields should begin with upper-case letter +dotnet_diagnostic.SA1307.severity = none # SA1307: Accessible fields should begin with upper-case letter +dotnet_diagnostic.SA1308.severity = suggestion # SA1308: Variable names should not be prefixed +dotnet_diagnostic.SA1131.severity = none # SA1131: Use readable conditions +dotnet_diagnostic.SA1405.severity = none # SA1405: Debug.Assert should provide message text +dotnet_diagnostic.SA1501.severity = none # SA1501: Statement should not be on a single line +dotnet_diagnostic.SA1502.severity = suggestion # SA1502: Element should not be on a single line +dotnet_diagnostic.SA1513.severity = none # SA1513: Closing brace should be followed by blank line +dotnet_diagnostic.SA1515.severity = none # SA1515: Single-line comment should be preceded by blank line +dotnet_diagnostic.SA1611.severity = suggestion # SA1611: Element parameters should be documented +dotnet_diagnostic.SA1649.severity = suggestion # SA1649: File name should match first type name + + + -# Space settings -csharp_space_after_keywords_in_control_flow_statements = true:suggestion -# Language settings -csharp_prefer_simple_default_expression = false:none \ No newline at end of file diff --git a/.github/.github.csproj b/.github/.github.csproj index 5a3b2f1f1..008099327 100644 --- a/.github/.github.csproj +++ b/.github/.github.csproj @@ -1,5 +1,5 @@ - + - netcoreapp3.1 + net6.0 \ No newline at end of file diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index d3a01e707..0b62bc917 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -1,121 +1,152 @@ -name: CI Builds +name: CI on: pull_request: push: - branches: - - main + branches: [ 'main' ] paths: - - '*' - - '!/docs/*' # Don't run workflow when files are only in the /docs directory + - '**' + - '!/docs/*' # Don't run workflow when files are only in the /docs directory + workflow_dispatch: jobs: main: name: StackExchange.Redis (Ubuntu) runs-on: ubuntu-latest + env: + DOTNET_SYSTEM_CONSOLE_ALLOW_ANSI_COLOR_REDIRECTION: "1" # Enable color output, even though the console output is redirected in Actions + TERM: xterm # Enable color output in GitHub Actions steps: - - name: Checkout code - uses: actions/checkout@v1 - - name: Setup .NET Core 3.x - uses: actions/setup-dotnet@v1 - with: - dotnet-version: '3.1.x' - - name: Setup .NET 5.x - uses: actions/setup-dotnet@v1 - with: - dotnet-version: '5.0.x' - - name: .NET Build - run: dotnet build Build.csproj -c Release /p:CI=true - - name: Start Redis Services (docker-compose) - working-directory: ./tests/RedisConfigs - run: docker-compose -f docker-compose.yml up -d - - name: StackExchange.Redis.Tests - run: dotnet test tests/StackExchange.Redis.Tests/StackExchange.Redis.Tests.csproj -c Release --logger trx --results-directory ./test-results/ /p:CI=true - - uses: dorny/test-reporter@v1 - continue-on-error: true - if: success() || failure() - with: - name: StackExchange.Redis.Tests (Ubuntu) - Results - path: 'test-results/*.trx' - reporter: dotnet-trx - - name: .NET Lib Pack - run: dotnet pack src/StackExchange.Redis/StackExchange.Redis.csproj --no-build -c Release /p:Packing=true /p:PackageOutputPath=%CD%\.nupkgs /p:CI=true - - nredisearch: - name: NRediSearch (Ubuntu) - runs-on: ubuntu-latest - services: - redisearch: - image: redislabs/redisearch:latest - ports: - - 6385:6379 - steps: - - name: Checkout code - uses: actions/checkout@v1 - - name: Setup .NET Core 3.x - uses: actions/setup-dotnet@v1 - with: - dotnet-version: '3.1.x' - - name: Setup .NET 5.x - uses: actions/setup-dotnet@v1 - with: - dotnet-version: '5.0.x' - - name: .NET Build - run: dotnet build Build.csproj -c Release /p:CI=true - - name: NRedisSearch.Tests - run: dotnet test tests/NRediSearch.Test/NRediSearch.Test.csproj -c Release --logger trx --results-directory ./test-results/ /p:CI=true - - uses: dorny/test-reporter@v1 - continue-on-error: true - if: success() || failure() - with: - name: NRedisSearch.Tests - Results - path: 'test-results/*.trx' - reporter: dotnet-trx - - name: .NET Lib Pack - run: dotnet pack src/NRediSearch/NRediSearch.csproj --no-build -c Release /p:Packing=true /p:PackageOutputPath=%CD%\.nupkgs /p:CI=true + - name: Checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 0 # Fetch the full history + - name: Start Redis Services (docker-compose) + working-directory: ./tests/RedisConfigs + run: docker compose -f docker-compose.yml up -d --wait + - name: Install .NET SDK + uses: actions/setup-dotnet@v4 + with: + dotnet-version: | + 6.0.x + 8.0.x + 10.0.x + - name: .NET Build + run: dotnet build Build.csproj -c Release /p:CI=true + - name: StackExchange.Redis.Tests + run: dotnet test tests/StackExchange.Redis.Tests/StackExchange.Redis.Tests.csproj -c Release --logger trx --logger GitHubActions --results-directory ./test-results/ /p:CI=true + - uses: dorny/test-reporter@v1 + continue-on-error: true + if: success() || failure() + with: + name: Test Results - Ubuntu + path: 'test-results/*.trx' + reporter: dotnet-trx + - name: .NET Lib Pack + run: dotnet pack src/StackExchange.Redis/StackExchange.Redis.csproj --no-build -c Release /p:Packing=true /p:PackageOutputPath=%CD%\.nupkgs /p:CI=true windows: - name: StackExchange.Redis (Windows Server 2019) - runs-on: windows-2019 + name: StackExchange.Redis (Windows Server 2022) + runs-on: windows-2022 + env: + NUGET_CERT_REVOCATION_MODE: offline # Disabling signing because of massive perf hit, see https://github.com/NuGet/Home/issues/11548 + DOTNET_SYSTEM_CONSOLE_ALLOW_ANSI_COLOR_REDIRECTION: "1" # Note this doesn't work yet for Windows - see https://github.com/dotnet/runtime/issues/68340 + TERM: xterm + DOCKER_BUILDKIT: 1 steps: - - name: Checkout code - uses: actions/checkout@v1 - - name: Setup .NET Core 3.x - uses: actions/setup-dotnet@v1 - with: - dotnet-version: '3.1.x' - - name: Setup .NET 5.x - uses: actions/setup-dotnet@v1 - with: - dotnet-version: '5.0.x' - - name: .NET Build - run: dotnet build Build.csproj -c Release /p:CI=true - - name: Start Redis Services (v3.0.503) - working-directory: .\tests\RedisConfigs\3.0.503 - run: | - .\redis-server.exe --service-install --service-name "redis-6379" "..\Basic\master-6379.conf" - .\redis-server.exe --service-install --service-name "redis-6380" "..\Basic\replica-6380.conf" - .\redis-server.exe --service-install --service-name "redis-6381" "..\Basic\secure-6381.conf" - .\redis-server.exe --service-install --service-name "redis-6382" "..\Failover\master-6382.conf" - .\redis-server.exe --service-install --service-name "redis-6383" "..\Failover\replica-6383.conf" - .\redis-server.exe --service-install --service-name "redis-7000" "..\Cluster\cluster-7000.conf" --dir "..\Cluster" - .\redis-server.exe --service-install --service-name "redis-7001" "..\Cluster\cluster-7001.conf" --dir "..\Cluster" - .\redis-server.exe --service-install --service-name "redis-7002" "..\Cluster\cluster-7002.conf" --dir "..\Cluster" - .\redis-server.exe --service-install --service-name "redis-7003" "..\Cluster\cluster-7003.conf" --dir "..\Cluster" - .\redis-server.exe --service-install --service-name "redis-7004" "..\Cluster\cluster-7004.conf" --dir "..\Cluster" - .\redis-server.exe --service-install --service-name "redis-7005" "..\Cluster\cluster-7005.conf" --dir "..\Cluster" - .\redis-server.exe --service-install --service-name "redis-7010" "..\Sentinel\redis-7010.conf" - .\redis-server.exe --service-install --service-name "redis-7011" "..\Sentinel\redis-7011.conf" - .\redis-server.exe --service-install --service-name "redis-26379" "..\Sentinel\sentinel-26379.conf" --sentinel - .\redis-server.exe --service-install --service-name "redis-26380" "..\Sentinel\sentinel-26380.conf" --sentinel - .\redis-server.exe --service-install --service-name "redis-26381" "..\Sentinel\sentinel-26381.conf" --sentinel - Start-Service redis-* - - name: StackExchange.Redis.Tests - run: dotnet test tests/StackExchange.Redis.Tests/StackExchange.Redis.Tests.csproj -c Release --logger trx --results-directory ./test-results/ /p:CI=true - - uses: dorny/test-reporter@v1 - continue-on-error: true - if: success() || failure() - with: - name: StackExchange.Redis.Tests (Windows Server 2019) - Results - path: 'test-results/*.trx' - reporter: dotnet-trx + - name: Checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 0 # Fetch the full history + - uses: Vampire/setup-wsl@v2 + with: + distribution: Ubuntu-22.04 + - name: Install Redis + shell: wsl-bash {0} + working-directory: ./tests/RedisConfigs + run: | + apt-get update + apt-get install curl gpg lsb-release libgomp1 jq -y + curl -fsSL https://packages.redis.io/gpg | gpg --dearmor -o /usr/share/keyrings/redis-archive-keyring.gpg + chmod 644 /usr/share/keyrings/redis-archive-keyring.gpg + echo "deb [signed-by=/usr/share/keyrings/redis-archive-keyring.gpg] https://packages.redis.io/deb $(lsb_release -cs) main" | tee /etc/apt/sources.list.d/redis.list + apt-get update + apt-get install -y redis + mkdir redis + - name: Run redis-server + shell: wsl-bash {0} + working-directory: ./tests/RedisConfigs/redis + run: | + pwd + ls . + # Run each server instance in order + redis-server ../Basic/primary-6379.conf & + redis-server ../Basic/replica-6380.conf & + redis-server ../Basic/secure-6381.conf & + redis-server ../Failover/primary-6382.conf & + redis-server ../Failover/replica-6383.conf & + redis-server ../Cluster/cluster-7000.conf --dir ../Cluster & + redis-server ../Cluster/cluster-7001.conf --dir ../Cluster & + redis-server ../Cluster/cluster-7002.conf --dir ../Cluster & + redis-server ../Cluster/cluster-7003.conf --dir ../Cluster & + redis-server ../Cluster/cluster-7004.conf --dir ../Cluster & + redis-server ../Cluster/cluster-7005.conf --dir ../Cluster & + redis-server ../Sentinel/redis-7010.conf & + redis-server ../Sentinel/redis-7011.conf & + redis-server ../Sentinel/sentinel-26379.conf --sentinel & + redis-server ../Sentinel/sentinel-26380.conf --sentinel & + redis-server ../Sentinel/sentinel-26381.conf --sentinel & + # Wait for server instances to get ready + sleep 5 + echo "Checking redis-server version with port 6379" + redis-cli -p 6379 INFO SERVER | grep redis_version || echo "Failed to get version for port 6379" + echo "Checking redis-server version with port 6380" + redis-cli -p 6380 INFO SERVER | grep redis_version || echo "Failed to get version for port 6380" + echo "Checking redis-server version with port 6381" + redis-cli -p 6381 INFO SERVER | grep redis_version || echo "Failed to get version for port 6381" + echo "Checking redis-server version with port 6382" + redis-cli -p 6382 INFO SERVER | grep redis_version || echo "Failed to get version for port 6382" + echo "Checking redis-server version with port 6383" + redis-cli -p 6383 INFO SERVER | grep redis_version || echo "Failed to get version for port 6383" + echo "Checking redis-server version with port 7000" + redis-cli -p 7000 INFO SERVER | grep redis_version || echo "Failed to get version for port 7000" + echo "Checking redis-server version with port 7001" + redis-cli -p 7001 INFO SERVER | grep redis_version || echo "Failed to get version for port 7001" + echo "Checking redis-server version with port 7002" + redis-cli -p 7002 INFO SERVER | grep redis_version || echo "Failed to get version for port 7002" + echo "Checking redis-server version with port 7003" + redis-cli -p 7003 INFO SERVER | grep redis_version || echo "Failed to get version for port 7003" + echo "Checking redis-server version with port 7004" + redis-cli -p 7004 INFO SERVER | grep redis_version || echo "Failed to get version for port 7004" + echo "Checking redis-server version with port 7005" + redis-cli -p 7005 INFO SERVER | grep redis_version || echo "Failed to get version for port 7005" + echo "Checking redis-server version with port 7010" + redis-cli -p 7010 INFO SERVER | grep redis_version || echo "Failed to get version for port 7010" + echo "Checking redis-server version with port 7011" + redis-cli -p 7011 INFO SERVER | grep redis_version || echo "Failed to get version for port 7011" + echo "Checking redis-server version with port 26379" + redis-cli -p 26379 INFO SERVER | grep redis_version || echo "Failed to get version for port 26379" + echo "Checking redis-server version with port 26380" + redis-cli -p 26380 INFO SERVER | grep redis_version || echo "Failed to get version for port 26380" + echo "Checking redis-server version with port 26381" + redis-cli -p 26381 INFO SERVER | grep redis_version || echo "Failed to get version for port 26381" + continue-on-error: true + + - name: .NET Build + run: dotnet build Build.csproj -c Release /p:CI=true + - name: StackExchange.Redis.Tests + run: dotnet test tests/StackExchange.Redis.Tests/StackExchange.Redis.Tests.csproj -c Release --logger trx --logger GitHubActions --results-directory ./test-results/ /p:CI=true + - uses: dorny/test-reporter@v1 + continue-on-error: true + if: success() || failure() + with: + name: Tests Results - Windows Server 2022 + path: 'test-results/*.trx' + reporter: dotnet-trx + # Package and upload to MyGet only on pushes to main, not on PRs + - name: .NET Pack + if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && github.ref == 'refs/heads/main' + run: dotnet pack Build.csproj --no-build -c Release /p:PackageOutputPath=${env:GITHUB_WORKSPACE}\.nupkgs /p:CI=true + - name: Upload to MyGet + if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && github.ref == 'refs/heads/main' + run: dotnet nuget push ${env:GITHUB_WORKSPACE}\.nupkgs\*.nupkg -s https://www.myget.org/F/stackoverflow/api/v2/package -k ${{ secrets.MYGET_API_KEY }} diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml new file mode 100644 index 000000000..a03767211 --- /dev/null +++ b/.github/workflows/codeql.yml @@ -0,0 +1,63 @@ +name: "CodeQL" + +on: + push: + branches: [ 'main' ] + pull_request: + # The branches below must be a subset of the branches above + branches: [ 'main' ] + workflow_dispatch: + + schedule: + - cron: '8 9 * * 1' + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-latest + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + language: [ 'csharp' ] + # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] + # Use only 'java' to analyze code written in Java, Kotlin or both + # Use only 'javascript' to analyze code written in JavaScript, TypeScript or both + # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Setup .NET + uses: actions/setup-dotnet@v4 + with: + dotnet-version: | + 10.0.x + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v4 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + + # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs + # queries: security-extended,security-and-quality + + - if: matrix.language == 'csharp' + name: .NET Build + run: dotnet build Build.csproj -c Release /p:CI=true + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v4 + with: + category: "/language:${{matrix.language}}" diff --git a/.gitignore b/.gitignore index 3d1821b36..c0024fb1f 100644 --- a/.gitignore +++ b/.gitignore @@ -27,4 +27,5 @@ t8.shakespeare.txt launchSettings.json *.vsp *.diagsession -TestResults/ \ No newline at end of file +TestResults/ +BenchmarkDotNet.Artifacts/ diff --git a/Build.csproj b/Build.csproj index 3e16e801c..41fb15b0c 100644 --- a/Build.csproj +++ b/Build.csproj @@ -1,5 +1,6 @@ + diff --git a/Directory.Build.props b/Directory.Build.props index d43bc25dd..273acae25 100644 --- a/Directory.Build.props +++ b/Directory.Build.props @@ -6,16 +6,16 @@ $(MSBuildThisFileDirectory)StackExchange.Redis.snk $(AssemblyName) strict - Stack Exchange, Inc.; marc.gravell + Stack Exchange, Inc.; Marc Gravell; Nick Craver true $(MSBuildThisFileDirectory)Shared.ruleset NETSDK1069 - NU5105 + $(NoWarn);NU5105;NU1507;SER001;SER002;SER003;SER004;SER005 https://stackexchange.github.io/StackExchange.Redis/ReleaseNotes - https://github.com/StackExchange/StackExchange.Redis/ + https://stackexchange.github.io/StackExchange.Redis/ MIT - 8.0 + 14 git https://github.com/StackExchange/StackExchange.Redis/ @@ -25,16 +25,27 @@ false true false + true + 00240000048000009400000006020000002400005253413100040000010001007791a689e9d8950b44a9a8886baad2ea180e7a8a854f158c9b98345ca5009cdd2362c84f368f1c3658c132b3c0f74e44ff16aeb2e5b353b6e0fe02f923a050470caeac2bde47a2238a9c7125ed7dab14f486a5a64558df96640933b9f2b6db188fc4a820f96dce963b662fa8864adbff38e5b4542343f162ecdc6dad16912fff true true true + + - + + + + + + + diff --git a/Directory.Build.targets b/Directory.Build.targets index ac2529fe4..687e19684 100644 --- a/Directory.Build.targets +++ b/Directory.Build.targets @@ -1,30 +1,5 @@ - - - $([System.IO.Path]::Combine('$(IntermediateOutputPath)','$(TargetFrameworkMoniker).AssemblyAttributes$(DefaultLanguageSourceExtension)')) - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/Directory.Packages.props b/Directory.Packages.props new file mode 100644 index 000000000..9767a0ab1 --- /dev/null +++ b/Directory.Packages.props @@ -0,0 +1,43 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/LICENSE b/LICENSE index e32afb858..db4620c99 100644 --- a/LICENSE +++ b/LICENSE @@ -24,11 +24,11 @@ SOFTWARE. Third Party Licenses: -The Redis project (http://redis.io/) is independent of this client library, and +The Redis project (https://redis.io/) is independent of this client library, and is licensed separately under the three clause BSD license. The full license -information can be viewed here: http://redis.io/topics/license +information can be viewed here: https://redis.io/topics/license -This tool makes use of the "redis-doc" library from http://redis.io/documentation +This tool makes use of the "redis-doc" library from https://redis.io/documentation in the intellisense comments, which is licensed under the Creative Commons Attribution-ShareAlike 4.0 International license; full details are available here: @@ -43,5 +43,5 @@ This tool is not used in the release binaries. The development solution uses the BookSleeve package from nuget (https://code.google.com/p/booksleeve/) by Marc Gravell. This is licensed under the Apache 2.0 license; full details are available here: -http://www.apache.org/licenses/LICENSE-2.0 +https://www.apache.org/licenses/LICENSE-2.0 This tool is not used in the release binaries. \ No newline at end of file diff --git a/README.md b/README.md index 6d1263678..5da32c6ce 100644 --- a/README.md +++ b/README.md @@ -1,11 +1,13 @@ StackExchange.Redis =================== +StackExchange.Redis is a .NET client for communicating with RESP servers such as [Redis](https://redis.io/), [Azure Managed Redis](https://azure.microsoft.com/products/managed-redis), [Garnet](https://microsoft.github.io/garnet/), [Valkey](https://valkey.io/), [AWS ElastiCache](https://aws.amazon.com/elasticache/), and a wide range of other Redis-like servers. We do not maintain a list of compatible servers, but if the server has a Redis-like API: it will *probably* work fine. If not: log an issue with details! + For all documentation, [see here](https://stackexchange.github.io/StackExchange.Redis/). #### Build Status -[![Build status](https://ci.appveyor.com/api/projects/status/2o3frasprum8mbaj/branch/master?svg=true)](https://ci.appveyor.com/project/StackExchange/stackexchange-redis/branch/master) +[![CI](https://github.com/StackExchange/StackExchange.Redis/actions/workflows/CI.yml/badge.svg)](https://github.com/StackExchange/StackExchange.Redis/actions/workflows/CI.yml) #### Package Status @@ -13,5 +15,6 @@ MyGet Pre-release feed: https://www.myget.org/gallery/stackoverflow | Package | NuGet Stable | NuGet Pre-release | Downloads | MyGet | | ------- | ------------ | ----------------- | --------- | ----- | -| [StackExchange.Redis](https://www.nuget.org/packages/StackExchange.Redis/) | [![StackExchange.Redis](https://img.shields.io/nuget/v/StackExchange.Redis.svg)](https://www.nuget.org/packages/StackExchange.Redis/) | [![StackExchange.Redis](https://img.shields.io/nuget/vpre/StackExchange.Redis.svg)](https://www.nuget.org/packages/StackExchange.Redis/) | [![StackExchange.Redis](https://img.shields.io/nuget/dt/StackExchange.Redis.svg)](https://www.nuget.org/packages/StackExchange.Redis/) | [![StackExchange.Redis MyGet](https://img.shields.io/myget/stackoverflow/vpre/StackExchange.Redis.svg)](https://www.myget.org/feed/stackoverflow/package/nuget/StackExchange.Redis) | -| [NRediSearch](https://www.nuget.org/packages/NRediSearch/) | [![NRediSearch](https://img.shields.io/nuget/v/NRediSearch.svg)](https://www.nuget.org/packages/NRediSearch/) | [![NRediSearch](https://img.shields.io/nuget/vpre/NRediSearch.svg)](https://www.nuget.org/packages/NRediSearch/) | [![NRediSearch](https://img.shields.io/nuget/dt/NRediSearch.svg)](https://www.nuget.org/packages/NRediSearch/) | [![NRediSearch MyGet](https://img.shields.io/myget/stackoverflow/vpre/NRediSearch.svg)](https://www.myget.org/feed/stackoverflow/package/nuget/NRediSearch) | +| [StackExchange.Redis](https://www.nuget.org/packages/StackExchange.Redis/) | [![StackExchange.Redis](https://img.shields.io/nuget/v/StackExchange.Redis.svg)](https://www.nuget.org/packages/StackExchange.Redis/) | [![StackExchange.Redis](https://img.shields.io/nuget/vpre/StackExchange.Redis.svg)](https://www.nuget.org/packages/StackExchange.Redis/absoluteLatest) | [![StackExchange.Redis](https://img.shields.io/nuget/dt/StackExchange.Redis.svg)](https://www.nuget.org/packages/StackExchange.Redis/) | [![StackExchange.Redis MyGet](https://img.shields.io/myget/stackoverflow/vpre/StackExchange.Redis.svg)](https://www.myget.org/feed/stackoverflow/package/nuget/StackExchange.Redis) | + +Release notes at: https://stackexchange.github.io/StackExchange.Redis/ReleaseNotes diff --git a/StackExchange.Redis.sln b/StackExchange.Redis.sln index ca575d8c9..4d275ad4f 100644 --- a/StackExchange.Redis.sln +++ b/StackExchange.Redis.sln @@ -1,6 +1,6 @@ Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio Version 16 -VisualStudioVersion = 16.0.28531.58 +# Visual Studio Version 17 +VisualStudioVersion = 17.0.31808.319 MinimumVisualStudioVersion = 10.0.40219.1 Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{3AD17044-6BFF-4750-9AC2-2CA466375F2A}" ProjectSection(SolutionItems) = preProject @@ -9,14 +9,19 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution build.cmd = build.cmd Build.csproj = Build.csproj build.ps1 = build.ps1 + .github\workflows\CI.yml = .github\workflows\CI.yml Directory.Build.props = Directory.Build.props Directory.Build.targets = Directory.Build.targets + Directory.Packages.props = Directory.Packages.props + tests\RedisConfigs\docker-compose.yml = tests\RedisConfigs\docker-compose.yml global.json = global.json NuGet.Config = NuGet.Config README.md = README.md docs\ReleaseNotes.md = docs\ReleaseNotes.md Shared.ruleset = Shared.ruleset version.json = version.json + tests\RedisConfigs\.docker\Redis\Dockerfile = tests\RedisConfigs\.docker\Redis\Dockerfile + .github\workflows\codeql.yml = .github\workflows\codeql.yml EndProjectSection EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "RedisConfigs", "RedisConfigs", "{96E891CD-2ED7-4293-A7AB-4C6F5D8D2B05}" @@ -41,10 +46,6 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "StackExchange.Redis.Tests", EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "BasicTest", "tests\BasicTest\BasicTest.csproj", "{939FA5F7-16AA-4847-812B-6EBC3748A86D}" EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "NRediSearch", "src\NRediSearch\NRediSearch.csproj", "{71455B07-E628-4F3A-9FFF-9EC63071F78E}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "NRediSearch.Test", "tests\NRediSearch.Test\NRediSearch.Test.csproj", "{94D233F5-2400-4542-98B9-BA72005C57DC}" -EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Sentinel", "Sentinel", "{36255A0A-89EC-43C8-A642-F4C1ACAEF5BC}" ProjectSection(SolutionItems) = preProject tests\RedisConfigs\Sentinel\redis-7010.conf = tests\RedisConfigs\Sentinel\redis-7010.conf @@ -72,9 +73,10 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Cluster", "Cluster", "{A3B4 EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Basic", "Basic", "{38BDEEED-7BEB-4B1F-9CE0-256D63F9C502}" ProjectSection(SolutionItems) = preProject - tests\RedisConfigs\Basic\master-6379.conf = tests\RedisConfigs\Basic\master-6379.conf + tests\RedisConfigs\Basic\primary-6379.conf = tests\RedisConfigs\Basic\primary-6379.conf tests\RedisConfigs\Basic\replica-6380.conf = tests\RedisConfigs\Basic\replica-6380.conf tests\RedisConfigs\Basic\secure-6381.conf = tests\RedisConfigs\Basic\secure-6381.conf + tests\RedisConfigs\Basic\tls-ciphers-6384.conf = tests\RedisConfigs\Basic\tls-ciphers-6384.conf EndProjectSection EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "BasicTestBaseline", "tests\BasicTestBaseline\BasicTestBaseline.csproj", "{8FDB623D-779B-4A84-BC6B-75106E41D8A4}" @@ -83,7 +85,7 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TestConsole", "toys\TestCon EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Failover", "Failover", "{D082703F-1652-4C35-840D-7D377F6B9979}" ProjectSection(SolutionItems) = preProject - tests\RedisConfigs\Failover\master-6382.conf = tests\RedisConfigs\Failover\master-6382.conf + tests\RedisConfigs\Failover\primary-6382.conf = tests\RedisConfigs\Failover\primary-6382.conf tests\RedisConfigs\Failover\replica-6383.conf = tests\RedisConfigs\Failover\replica-6383.conf EndProjectSection EndProject @@ -93,7 +95,7 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "KestrelRedisServer", "toys\ EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "tests", "tests", "{73A5C363-CA1F-44C4-9A9B-EF791A76BA6A}" ProjectSection(SolutionItems) = preProject - tests\Directory.Build.props = tests\Directory.Build.props + tests\.editorconfig = tests\.editorconfig tests\Directory.Build.targets = tests\Directory.Build.targets EndProjectSection EndProject @@ -104,29 +106,6 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "src", "src", "{00CA0876-DA9 EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "toys", "toys", "{E25031D3-5C64-430D-B86F-697B66816FD8}" EndProject -Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "docs", "docs", "{153A10E4-E668-41AD-9E0F-6785CE7EED66}" - ProjectSection(SolutionItems) = preProject - docs\Basics.md = docs\Basics.md - docs\Configuration.md = docs\Configuration.md - docs\Events.md = docs\Events.md - docs\ExecSync.md = docs\ExecSync.md - docs\index.md = docs\index.md - docs\KeysScan.md = docs\KeysScan.md - docs\KeysValues.md = docs\KeysValues.md - docs\PipelinesMultiplexers.md = docs\PipelinesMultiplexers.md - docs\Profiling.md = docs\Profiling.md - docs\Profiling_v1.md = docs\Profiling_v1.md - docs\Profiling_v2.md = docs\Profiling_v2.md - docs\PubSubOrder.md = docs\PubSubOrder.md - docs\ReleaseNotes.md = docs\ReleaseNotes.md - docs\Scripting.md = docs\Scripting.md - docs\Server.md = docs\Server.md - docs\Testing.md = docs\Testing.md - docs\ThreadTheft.md = docs\ThreadTheft.md - docs\Timeouts.md = docs\Timeouts.md - docs\Transactions.md = docs\Transactions.md - EndProjectSection -EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TestConsoleBaseline", "toys\TestConsoleBaseline\TestConsoleBaseline.csproj", "{D58114AE-4998-4647-AFCA-9353D20495AE}" EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = ".github", ".github\.github.csproj", "{8FB98E7D-DAE2-4465-BD9A-104000E0A2D4}" @@ -137,11 +116,21 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Docker", "Docker", "{A9F81D tests\RedisConfigs\Docker\supervisord.conf = tests\RedisConfigs\Docker\supervisord.conf EndProjectSection EndProject -Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "RediSearch", "RediSearch", "{3FA2A7C6-DA16-4DEF-ACE0-34573A4AD430}" - ProjectSection(SolutionItems) = preProject - tests\RedisConfigs\RediSearch\redisearch-6385.conf = tests\RedisConfigs\RediSearch\redisearch-6385.conf - tests\RedisConfigs\RediSearch\redisearch.md = tests\RedisConfigs\RediSearch\redisearch.md - EndProjectSection +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "ConsoleTest", "tests\ConsoleTest\ConsoleTest.csproj", "{A0F89B8B-32A3-4C28-8F1B-ADE343F16137}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "ConsoleTestBaseline", "tests\ConsoleTestBaseline\ConsoleTestBaseline.csproj", "{69A0ACF2-DF1F-4F49-B554-F732DCA938A3}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "docs", "docs\docs.csproj", "{1DC43E76-5372-4C7F-A433-0602273E87FC}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StackExchange.Redis.Benchmarks", "tests\StackExchange.Redis.Benchmarks\StackExchange.Redis.Benchmarks.csproj", "{59889284-FFEE-82E7-94CB-3B43E87DA6CF}" +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "eng", "eng", "{5FA0958E-6EBD-45F4-808E-3447A293F96F}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StackExchange.Redis.Build", "eng\StackExchange.Redis.Build\StackExchange.Redis.Build.csproj", "{190742E1-FA50-4E36-A8C4-88AE87654340}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "RESPite", "src\RESPite\RESPite.csproj", "{05761CF5-CC46-43A6-814B-6BD2ECC1F0ED}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "RESPite.Tests", "tests\RESPite.Tests\RESPite.Tests.csproj", "{CA67D8CA-6CC9-40E2-8CAC-F0B1401BEF7B}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution @@ -161,14 +150,6 @@ Global {939FA5F7-16AA-4847-812B-6EBC3748A86D}.Debug|Any CPU.Build.0 = Debug|Any CPU {939FA5F7-16AA-4847-812B-6EBC3748A86D}.Release|Any CPU.ActiveCfg = Release|Any CPU {939FA5F7-16AA-4847-812B-6EBC3748A86D}.Release|Any CPU.Build.0 = Release|Any CPU - {71455B07-E628-4F3A-9FFF-9EC63071F78E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {71455B07-E628-4F3A-9FFF-9EC63071F78E}.Debug|Any CPU.Build.0 = Debug|Any CPU - {71455B07-E628-4F3A-9FFF-9EC63071F78E}.Release|Any CPU.ActiveCfg = Release|Any CPU - {71455B07-E628-4F3A-9FFF-9EC63071F78E}.Release|Any CPU.Build.0 = Release|Any CPU - {94D233F5-2400-4542-98B9-BA72005C57DC}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {94D233F5-2400-4542-98B9-BA72005C57DC}.Debug|Any CPU.Build.0 = Debug|Any CPU - {94D233F5-2400-4542-98B9-BA72005C57DC}.Release|Any CPU.ActiveCfg = Release|Any CPU - {94D233F5-2400-4542-98B9-BA72005C57DC}.Release|Any CPU.Build.0 = Release|Any CPU {8FDB623D-779B-4A84-BC6B-75106E41D8A4}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {8FDB623D-779B-4A84-BC6B-75106E41D8A4}.Debug|Any CPU.Build.0 = Debug|Any CPU {8FDB623D-779B-4A84-BC6B-75106E41D8A4}.Release|Any CPU.ActiveCfg = Release|Any CPU @@ -193,6 +174,34 @@ Global {8FB98E7D-DAE2-4465-BD9A-104000E0A2D4}.Debug|Any CPU.Build.0 = Debug|Any CPU {8FB98E7D-DAE2-4465-BD9A-104000E0A2D4}.Release|Any CPU.ActiveCfg = Release|Any CPU {8FB98E7D-DAE2-4465-BD9A-104000E0A2D4}.Release|Any CPU.Build.0 = Release|Any CPU + {A0F89B8B-32A3-4C28-8F1B-ADE343F16137}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {A0F89B8B-32A3-4C28-8F1B-ADE343F16137}.Debug|Any CPU.Build.0 = Debug|Any CPU + {A0F89B8B-32A3-4C28-8F1B-ADE343F16137}.Release|Any CPU.ActiveCfg = Release|Any CPU + {A0F89B8B-32A3-4C28-8F1B-ADE343F16137}.Release|Any CPU.Build.0 = Release|Any CPU + {69A0ACF2-DF1F-4F49-B554-F732DCA938A3}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {69A0ACF2-DF1F-4F49-B554-F732DCA938A3}.Debug|Any CPU.Build.0 = Debug|Any CPU + {69A0ACF2-DF1F-4F49-B554-F732DCA938A3}.Release|Any CPU.ActiveCfg = Release|Any CPU + {69A0ACF2-DF1F-4F49-B554-F732DCA938A3}.Release|Any CPU.Build.0 = Release|Any CPU + {1DC43E76-5372-4C7F-A433-0602273E87FC}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {1DC43E76-5372-4C7F-A433-0602273E87FC}.Debug|Any CPU.Build.0 = Debug|Any CPU + {1DC43E76-5372-4C7F-A433-0602273E87FC}.Release|Any CPU.ActiveCfg = Release|Any CPU + {1DC43E76-5372-4C7F-A433-0602273E87FC}.Release|Any CPU.Build.0 = Release|Any CPU + {59889284-FFEE-82E7-94CB-3B43E87DA6CF}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {59889284-FFEE-82E7-94CB-3B43E87DA6CF}.Debug|Any CPU.Build.0 = Debug|Any CPU + {59889284-FFEE-82E7-94CB-3B43E87DA6CF}.Release|Any CPU.ActiveCfg = Release|Any CPU + {59889284-FFEE-82E7-94CB-3B43E87DA6CF}.Release|Any CPU.Build.0 = Release|Any CPU + {190742E1-FA50-4E36-A8C4-88AE87654340}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {190742E1-FA50-4E36-A8C4-88AE87654340}.Debug|Any CPU.Build.0 = Debug|Any CPU + {190742E1-FA50-4E36-A8C4-88AE87654340}.Release|Any CPU.ActiveCfg = Release|Any CPU + {190742E1-FA50-4E36-A8C4-88AE87654340}.Release|Any CPU.Build.0 = Release|Any CPU + {05761CF5-CC46-43A6-814B-6BD2ECC1F0ED}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {05761CF5-CC46-43A6-814B-6BD2ECC1F0ED}.Debug|Any CPU.Build.0 = Debug|Any CPU + {05761CF5-CC46-43A6-814B-6BD2ECC1F0ED}.Release|Any CPU.ActiveCfg = Release|Any CPU + {05761CF5-CC46-43A6-814B-6BD2ECC1F0ED}.Release|Any CPU.Build.0 = Release|Any CPU + {CA67D8CA-6CC9-40E2-8CAC-F0B1401BEF7B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {CA67D8CA-6CC9-40E2-8CAC-F0B1401BEF7B}.Debug|Any CPU.Build.0 = Debug|Any CPU + {CA67D8CA-6CC9-40E2-8CAC-F0B1401BEF7B}.Release|Any CPU.ActiveCfg = Release|Any CPU + {CA67D8CA-6CC9-40E2-8CAC-F0B1401BEF7B}.Release|Any CPU.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE @@ -202,8 +211,6 @@ Global {EF84877F-59BE-41BE-9013-E765AF0BB72E} = {00CA0876-DA9F-44E8-B0DC-A88716BF347A} {3B8BD8F1-8BFC-4D8C-B4DA-25FFAF3D1DBE} = {73A5C363-CA1F-44C4-9A9B-EF791A76BA6A} {939FA5F7-16AA-4847-812B-6EBC3748A86D} = {73A5C363-CA1F-44C4-9A9B-EF791A76BA6A} - {71455B07-E628-4F3A-9FFF-9EC63071F78E} = {00CA0876-DA9F-44E8-B0DC-A88716BF347A} - {94D233F5-2400-4542-98B9-BA72005C57DC} = {73A5C363-CA1F-44C4-9A9B-EF791A76BA6A} {36255A0A-89EC-43C8-A642-F4C1ACAEF5BC} = {96E891CD-2ED7-4293-A7AB-4C6F5D8D2B05} {A3B4B972-5BD2-4D90-981F-7E51E350E628} = {96E891CD-2ED7-4293-A7AB-4C6F5D8D2B05} {38BDEEED-7BEB-4B1F-9CE0-256D63F9C502} = {96E891CD-2ED7-4293-A7AB-4C6F5D8D2B05} @@ -212,10 +219,14 @@ Global {D082703F-1652-4C35-840D-7D377F6B9979} = {96E891CD-2ED7-4293-A7AB-4C6F5D8D2B05} {8375813E-FBAF-4DA3-A2C7-E4645B39B931} = {E25031D3-5C64-430D-B86F-697B66816FD8} {3DA1EEED-E9FE-43D9-B293-E000CFCCD91A} = {E25031D3-5C64-430D-B86F-697B66816FD8} - {153A10E4-E668-41AD-9E0F-6785CE7EED66} = {3AD17044-6BFF-4750-9AC2-2CA466375F2A} {D58114AE-4998-4647-AFCA-9353D20495AE} = {E25031D3-5C64-430D-B86F-697B66816FD8} {A9F81DA3-DA82-423E-A5DD-B11C37548E06} = {96E891CD-2ED7-4293-A7AB-4C6F5D8D2B05} - {3FA2A7C6-DA16-4DEF-ACE0-34573A4AD430} = {96E891CD-2ED7-4293-A7AB-4C6F5D8D2B05} + {A0F89B8B-32A3-4C28-8F1B-ADE343F16137} = {73A5C363-CA1F-44C4-9A9B-EF791A76BA6A} + {69A0ACF2-DF1F-4F49-B554-F732DCA938A3} = {73A5C363-CA1F-44C4-9A9B-EF791A76BA6A} + {59889284-FFEE-82E7-94CB-3B43E87DA6CF} = {73A5C363-CA1F-44C4-9A9B-EF791A76BA6A} + {190742E1-FA50-4E36-A8C4-88AE87654340} = {5FA0958E-6EBD-45F4-808E-3447A293F96F} + {05761CF5-CC46-43A6-814B-6BD2ECC1F0ED} = {00CA0876-DA9F-44E8-B0DC-A88716BF347A} + {CA67D8CA-6CC9-40E2-8CAC-F0B1401BEF7B} = {73A5C363-CA1F-44C4-9A9B-EF791A76BA6A} EndGlobalSection GlobalSection(ExtensibilityGlobals) = postSolution SolutionGuid = {193AA352-6748-47C1-A5FC-C9AA6B5F000B} diff --git a/StackExchange.Redis.sln.DotSettings b/StackExchange.Redis.sln.DotSettings index 165f8337f..8dd9095d9 100644 --- a/StackExchange.Redis.sln.DotSettings +++ b/StackExchange.Redis.sln.DotSettings @@ -1,3 +1,29 @@  OK - PONG \ No newline at end of file + PONG + True + True + True + True + True + True + True + True + True + True + True + True + True + True + True + True + True + True + True + True + True + True + True + True + True + True \ No newline at end of file diff --git a/appveyor.yml b/appveyor.yml index 2cbf38d46..678032414 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -6,17 +6,17 @@ init: install: - cmd: >- - choco install dotnet-sdk --version 5.0.100 + choco install dotnet-9.0-sdk cd tests\RedisConfigs\3.0.503 - redis-server.exe --service-install --service-name "redis-6379" "..\Basic\master-6379.conf" + redis-server.exe --service-install --service-name "redis-6379" "..\Basic\primary-6379-3.0.conf" redis-server.exe --service-install --service-name "redis-6380" "..\Basic\replica-6380.conf" redis-server.exe --service-install --service-name "redis-6381" "..\Basic\secure-6381.conf" - redis-server.exe --service-install --service-name "redis-6382" "..\Failover\master-6382.conf" + redis-server.exe --service-install --service-name "redis-6382" "..\Failover\primary-6382.conf" redis-server.exe --service-install --service-name "redis-6383" "..\Failover\replica-6383.conf" @@ -48,6 +48,10 @@ install: Start-Service redis-* } +branches: + only: + - main + skip_branch_with_pr: true skip_tags: true skip_commits: @@ -63,11 +67,12 @@ nuget: disable_publish_on_pr: true build_script: -- ps: .\build.ps1 -PullRequestNumber "$env:APPVEYOR_PULL_REQUEST_NUMBER" -CreatePackages ($env:OS -eq "Windows_NT") +- ps: .\build.ps1 -PullRequestNumber "$env:APPVEYOR_PULL_REQUEST_NUMBER" -CreatePackages ($env:OS -eq "Windows_NT") -NetCoreOnlyTests test: off artifacts: - path: .\.nupkgs\*.nupkg +- path: '**\*.trx' deploy: - provider: NuGet diff --git a/build.ps1 b/build.ps1 index 24152baab..3ace75a06 100644 --- a/build.ps1 +++ b/build.ps1 @@ -3,7 +3,8 @@ param( [bool] $CreatePackages, [switch] $StartServers, [bool] $RunTests = $true, - [string] $PullRequestNumber + [string] $PullRequestNumber, + [switch] $NetCoreOnlyTests ) Write-Host "Run Parameters:" -ForegroundColor Cyan @@ -29,7 +30,11 @@ if ($RunTests) { Write-Host "Servers Started." -ForegroundColor "Green" } Write-Host "Running tests: Build.csproj traversal (all frameworks)" -ForegroundColor "Magenta" - dotnet test ".\Build.csproj" -c Release --no-build --logger trx + if ($NetCoreOnlyTests) { + dotnet test ".\Build.csproj" -c Release -f net8.0 --no-build --logger trx + } else { + dotnet test ".\Build.csproj" -c Release --no-build --logger trx + } if ($LastExitCode -ne 0) { Write-Host "Error with tests, aborting build." -Foreground "Red" Exit 1 diff --git a/docs/AsyncTimeouts.md b/docs/AsyncTimeouts.md new file mode 100644 index 000000000..04892d59a --- /dev/null +++ b/docs/AsyncTimeouts.md @@ -0,0 +1,79 @@ +# Async timeouts and cancellation + +StackExchange.Redis directly supports timeout of *synchronous* operations, but for *asynchronous* operations, it is recommended +to use the inbuilt framework support for cancellation and timeouts, i.e. the [WaitAsync](https://learn.microsoft.com/dotnet/api/system.threading.tasks.task.waitasync) +family of methods. This allows the caller to control timeout (via `TimeSpan`), cancellation (via `CancellationToken`), or both. + +Note that it is possible that operations will still be buffered and may still be issued to the server *after* timeout/cancellation means +that the caller isn't observing the result. + +## Usage + +### Timeout + +Timeouts are probably the most common cancellation scenario: + +```csharp +var timeout = TimeSpan.FromSeconds(5); +await database.StringSetAsync("key", "value").WaitAsync(timeout); +var value = await database.StringGetAsync("key").WaitAsync(timeout); +``` + +### Cancellation + +You can also use `CancellationToken` to drive cancellation, identically: + +```csharp +CancellationToken token = ...; // for example, from HttpContext.RequestAborted +await database.StringSetAsync("key", "value").WaitAsync(token); +var value = await database.StringGetAsync("key").WaitAsync(token); +``` +### Combined Cancellation and Timeout + +These two concepts can be combined so that if either cancellation or timeout occur, the caller's +operation is cancelled: + +```csharp +var timeout = TimeSpan.FromSeconds(5); +CancellationToken token = ...; // for example, from HttpContext.RequestAborted +await database.StringSetAsync("key", "value").WaitAsync(timeout, token); +var value = await database.StringGetAsync("key").WaitAsync(timeout, token); +``` + +### Creating a timeout for multiple operations + +If you want a timeout to apply to a *group* of operations rather than individually, then you +can using `CancellationTokenSource` to create a `CancellationToken` that is cancelled after a +specified timeout. For example: + +```csharp +var timeout = TimeSpan.FromSeconds(5); +using var cts = new CancellationTokenSource(timeout); +await database.StringSetAsync("key", "value").WaitAsync(cts.Token); +var value = await database.StringGetAsync("key").WaitAsync(cts.Token); +``` + +This can additionally be combined with one-or-more cancellation tokens: + +```csharp +var timeout = TimeSpan.FromSeconds(5); +CancellationToken token = ...; // for example, from HttpContext.RequestAborted +using var cts = CancellationTokenSource.CreateLinkedTokenSource(token); // or multiple tokens +cts.CancelAfter(timeout); +await database.StringSetAsync("key", "value").WaitAsync(cts.Token); +var value = await database.StringGetAsync("key").WaitAsync(cts.Token); +``` + +### Cancelling keys enumeration + +Keys being enumerated (via `SCAN`) can *also* be cancelled, using the inbuilt `.WithCancellation(...)` method: + +```csharp +CancellationToken token = ...; // for example, from HttpContext.RequestAborted +await foreach (var key in server.KeysAsync(pattern: "*foo*").WithCancellation(token)) +{ + ... +} +``` + +To use a timeout instead, you can use the `CancellationTokenSource` approach shown above. \ No newline at end of file diff --git a/docs/Authentication.md b/docs/Authentication.md new file mode 100644 index 000000000..15a673d19 --- /dev/null +++ b/docs/Authentication.md @@ -0,0 +1,129 @@ +# Authentication + +There are multiple ways of connecting to a Redis server, depending on the authentication model. The simplest +(but least secure) approach is to use the `default` user, with no authentication, and no transport security. +This is as simple as: + +``` csharp +var muxer = await ConnectionMultiplexer.ConnectAsync("myserver"); // or myserver:1241 to use a custom port +``` + +This approach is often used for local transient servers - it is simple, but insecure. But from there, +we can get more complex! + +## TLS + +If your server has TLS enabled, SE.Redis can be instructed to use it. In some cases (Azure Managed Redis, etc), the +library will recognize the endpoint address, meaning: *you do not need to do anything*. To +*manually* enable TLS, the `ssl` token can be used: + +``` csharp +var muxer = await ConnectionMultiplexer.ConnectAsync("myserver,ssl=true"); +``` + +This will work fine if the server is using a server-certificate that is already trusted by the local +machine. If this is *not* the case, we need to tell the library about the server. This requires +the `ConfigurationOptions` type: + +``` csharp +var options = ConfigurationOptions.Parse("myserver,ssl=true"); +// or: var options = new ConfigurationOptions { Endpoints = { "myserver" }, Ssl = true }; +// TODO configure +var muxer = await ConnectionMultiplexer.ConnectAsync(options); +``` + +If we have a local *issuer* public certificate (commonly `ca.crt`), we can use: + +``` csharp +options.TrustIssuer(caPath); +``` + +Alternatively, in advanced scenarios: to provide your own custom server validation, the `options.CertificateValidation` callback +can be used; this uses the normal [`RemoteCertificateValidationCallback`](https://learn.microsoft.com/dotnet/api/system.net.security.remotecertificatevalidationcallback) +API. + +## Usernames and Passwords + +Usernames and passwords can be specified with the `user` and `password` tokens, respectively: + +``` csharp +var muxer = await ConnectionMultiplexer.ConnectAsync("myserver,ssl=true,user=myuser,password=mypassword"); +``` + +If no `user` is provided, the `default` user is assumed. In some cases, an authentication-token can be +used in place of a classic password. + +## Managed identities + +If the server is an Azure Managed Redis resource, connections can be secured using Microsoft Entra ID authentication. Use the [Microsoft.Azure.StackExchangeRedis](https://github.com/Azure/Microsoft.Azure.StackExchangeRedis) extension package to handle the authentication using tokens retrieved from Microsoft Entra. The package integrates via the ConfigurationOptions class, and can use various types of identities for token retrieval. For example with a user-assigned managed identity: + +```csharp +var options = ConfigurationOptions.Parse("mycache.region.redis.azure.net:10000"); +await options.ConfigureForAzureWithUserAssignedManagedIdentityAsync(managedIdentityClientId); +``` + +For details and samples see [https://github.com/Azure/Microsoft.Azure.StackExchangeRedis](https://github.com/Azure/Microsoft.Azure.StackExchangeRedis) + +## Client certificates + +If the server is configured to require a client certificate, this can be supplied in multiple ways. +If you have a local public / private key pair (such as `MyUser2.crt` and `MyUser2.key`), the +`options.SetUserPemCertificate(...)` method can be used: + +``` csharp +options.SetUserPemCertificate( + userCertificatePath: userCrtPath, + userKeyPath: userKeyPath +); +``` + +If you have a single `pfx` file that contains the public / private pair, the `options.SetUserPfxCertificate(...)` +method can be used: + +``` csharp +options.SetUserPfxCertificate( + userCertificatePath: userCrtPath, + password: filePassword // optional +); +``` + +Alternatively, in advanced scenarios: to provide your own custom client-certificate lookup, the `options.CertificateSelection` callback +can be used; this uses the normal +[`LocalCertificateSelectionCallback`](https://learn.microsoft.com/dotnet/api/system.net.security.remotecertificatevalidationcallback) +API. + +## User certificates with implicit user authentication + +Historically, the client certificate only provided access to the server, but as the `default` user. From 8.6, +the server can be configured to use client certificates to provide user identity. This replaces the +usage of passwords, and requires: + +- An 8.6+ server, configured to use TLS with client certificates mapped - typically using the `CN` of the certificate as the user. +- A matching `ACL` user account configured on the server, that is enabled (`on`) - i.e. the `ACL LIST` command should + display something like `user MyUser2 on sanitize-payload ~* &* +@all` (the details will vary depending on the user permissions). +- At the client: access to the client certificate pair. + +For example: + +``` csharp +string certRoot = // some path to a folder with ca.crt, MyUser2.crt and MyUser2.key + +var options = ConfigurationOptions.Parse("myserver:6380"); +options.SetUserPemCertificate(// automatically enables TLS + userCertificatePath: Path.Combine(certRoot, "MyUser2.crt"), + userKeyPath: Path.Combine(certRoot, "MyUser2.key")); +options.TrustIssuer(Path.Combine(certRoot, "ca.crt")); +await using var conn = await ConnectionMultiplexer.ConnectAsync(options); + +// prove we are connected as MyUser2 +var user = (string?)await conn.GetDatabase().ExecuteAsync("acl", "whoami"); +Console.WriteLine(user); // writes "MyUser2" +``` + +## More info + +For more information: + +- [Redis Security](https://redis.io/docs/latest/operate/oss_and_stack/management/security/) + - [ACL](https://redis.io/docs/latest/operate/oss_and_stack/management/security/acl/) + - [TLS](https://redis.io/docs/latest/operate/oss_and_stack/management/security/encryption/) diff --git a/docs/Basics.md b/docs/Basics.md index 860658458..4d843cb3a 100644 --- a/docs/Basics.md +++ b/docs/Basics.md @@ -12,18 +12,18 @@ ConnectionMultiplexer redis = ConnectionMultiplexer.Connect("localhost"); Note that `ConnectionMultiplexer` implements `IDisposable` and can be disposed when no longer required. This is deliberately not showing `using` statement usage, because it is exceptionally rare that you would want to use a `ConnectionMultiplexer` briefly, as the idea is to re-use this object. -A more complicated scenario might involve a master/replica setup; for this usage, simply specify all the desired nodes that make up that logical redis tier (it will automatically identify the master): +A more complicated scenario might involve a primary/replica setup; for this usage, simply specify all the desired nodes that make up that logical redis tier (it will automatically identify the primary): ```csharp ConnectionMultiplexer redis = ConnectionMultiplexer.Connect("server1:6379,server2:6379"); ``` -If it finds both nodes are masters, a tie-breaker key can optionally be specified that can be used to resolve the issue, however such a condition is fortunately very rare. +If it finds both nodes are primaries, a tie-breaker key can optionally be specified that can be used to resolve the issue, however such a condition is fortunately very rare. Once you have a `ConnectionMultiplexer`, there are 3 main things you might want to do: - access a redis database (note that in the case of a cluster, a single logical database may be spread over multiple nodes) -- make use of the [pub/sub](http://redis.io/topics/pubsub) features of redis +- make use of the [pub/sub](https://redis.io/topics/pubsub) features of redis - access an individual server for maintenance / monitoring purposes Using a redis database @@ -43,7 +43,7 @@ object asyncState = ... IDatabase db = redis.GetDatabase(databaseNumber, asyncState); ``` -Once you have the `IDatabase`, it is simply a case of using the [redis API](http://redis.io/commands). Note that all methods have both synchronous and asynchronous implementations. In line with Microsoft's naming guidance, the asynchronous methods all end `...Async(...)`, and are fully `await`-able etc. +Once you have the `IDatabase`, it is simply a case of using the [redis API](https://redis.io/commands). Note that all methods have both synchronous and asynchronous implementations. In line with Microsoft's naming guidance, the asynchronous methods all end `...Async(...)`, and are fully `await`-able etc. The simplest operation would be to store and retrieve a value: @@ -55,7 +55,7 @@ string value = db.StringGet("mykey"); Console.WriteLine(value); // writes: "abcdefg" ``` -Note that the `String...` prefix here denotes the [String redis type](http://redis.io/topics/data-types), and is largely separate to the [.NET String type][3], although both can store text data. However, redis allows raw binary data for both keys and values - the usage is identical: +Note that the `String...` prefix here denotes the [String redis type](https://redis.io/topics/data-types), and is largely separate to the [.NET String type][3], although both can store text data. However, redis allows raw binary data for both keys and values - the usage is identical: ```csharp byte[] key = ..., value = ...; @@ -64,18 +64,18 @@ db.StringSet(key, value); byte[] value = db.StringGet(key); ``` -The entire range of [redis database commands](http://redis.io/commands) covering all redis data types is available for use. +The entire range of [redis database commands](https://redis.io/commands) covering all redis data types is available for use. Using redis pub/sub ---- -Another common use of redis is as a [pub/sub message](http://redis.io/topics/pubsub) distribution tool; this is also simple, and in the event of connection failure, the `ConnectionMultiplexer` will handle all the details of re-subscribing to the requested channels. +Another common use of redis is as a [pub/sub message](https://redis.io/topics/pubsub) distribution tool; this is also simple, and in the event of connection failure, the `ConnectionMultiplexer` will handle all the details of re-subscribing to the requested channels. ```csharp ISubscriber sub = redis.GetSubscriber(); ``` -Again, the object returned from `GetSubscriber` is a cheap pass-thru object that does not need to be stored. The pub/sub API has no concept of databases, but as before we can optionally provide an async-state. Note that all subscriptions are global: they are not scoped to the lifetime of the `ISubscriber` instance. The pub/sub features in redis use named "channels"; channels do not need to be defined in advance on the server (an interesting use here is things like per-user notification channels, which is what drives parts of the realtime updates on [Stack Overflow](http://stackoverflow.com)). As is common in .NET, subscriptions take the form of callback delegates which accept the channel-name and the message: +Again, the object returned from `GetSubscriber` is a cheap pass-thru object that does not need to be stored. The pub/sub API has no concept of databases, but as before we can optionally provide an async-state. Note that all subscriptions are global: they are not scoped to the lifetime of the `ISubscriber` instance. The pub/sub features in redis use named "channels"; channels do not need to be defined in advance on the server (an interesting use here is things like per-user notification channels, which is what drives parts of the realtime updates on [Stack Overflow](https://stackoverflow.com)). As is common in .NET, subscriptions take the form of callback delegates which accept the channel-name and the message: ```csharp sub.Subscribe("messages", (channel, message) => { @@ -118,13 +118,13 @@ For maintenance purposes, it is sometimes necessary to issue server-specific com IServer server = redis.GetServer("localhost", 6379); ``` -The `GetServer` method will accept an [`EndPoint`](http://msdn.microsoft.com/en-us/library/system.net.endpoint(v=vs.110).aspx) or the name/value pair that uniquely identify the server. As before, the object returned from `GetServer` is a cheap pass-thru object that does not need to be stored, and async-state can be optionally specified. Note that the set of available endpoints is also available: +The `GetServer` method will accept an [`EndPoint`](https://docs.microsoft.com/en-us/dotnet/api/system.net.endpoint) or the name/value pair that uniquely identify the server. As before, the object returned from `GetServer` is a cheap pass-thru object that does not need to be stored, and async-state can be optionally specified. Note that the set of available endpoints is also available: ```csharp EndPoint[] endpoints = redis.GetEndPoints(); ``` -From the `IServer` instance, the [Server commands](http://redis.io/commands#server) are available; for example: +From the `IServer` instance, the [Server commands](https://redis.io/commands#server) are available; for example: ```csharp DateTime lastSave = server.LastSave(); @@ -139,7 +139,7 @@ There are 3 primary usage mechanisms with StackExchange.Redis: - Synchronous - where the operation completes before the methods returns to the caller (note that while this may block the caller, it absolutely **does not** block other threads: the key idea in StackExchange.Redis is that it aggressively shares the connection between concurrent callers) - Asynchronous - where the operation completes some time in the future, and a `Task` or `Task` is returned immediately, which can later: - be `.Wait()`ed (blocking the current thread until the response is available) - - have a continuation callback added ([`ContinueWith`](http://msdn.microsoft.com/en-us/library/system.threading.tasks.task.continuewith(v=vs.110).aspx) in the TPL) + - have a continuation callback added ([`ContinueWith`](https://docs.microsoft.com/en-us/dotnet/api/system.threading.tasks.task.continuewith) in the TPL) - be *awaited* (which is a language-level feature that simplifies the latter, while also continuing immediately if the reply is already known) - Fire-and-Forget - where you really aren't interested in the reply, and are happy to continue irrespective of the response @@ -161,9 +161,6 @@ The fire-and-forget usage is accessed by the optional `CommandFlags flags` param db.StringIncrement(pageKey, flags: CommandFlags.FireAndForget); ``` - - - - [1]: http://msdn.microsoft.com/en-us/library/dd460717%28v=vs.110%29.aspx - [2]: http://msdn.microsoft.com/en-us/library/system.threading.tasks.task.asyncstate(v=vs.110).aspx - [3]: http://msdn.microsoft.com/en-us/library/system.string(v=vs.110).aspx + [1]: https://docs.microsoft.com/en-us/dotnet/standard/parallel-programming/task-parallel-library-tpl + [2]: https://docs.microsoft.com/en-us/dotnet/api/system.threading.tasks.task.asyncstate + [3]: https://docs.microsoft.com/en-us/dotnet/api/system.string diff --git a/docs/CompareAndSwap.md b/docs/CompareAndSwap.md new file mode 100644 index 000000000..7d79d42a0 --- /dev/null +++ b/docs/CompareAndSwap.md @@ -0,0 +1,321 @@ +# Compare-And-Swap / Compare-And-Delete (CAS/CAD) + +Redis 8.4 introduces atomic Compare-And-Swap (CAS) and Compare-And-Delete (CAD) operations, allowing you to conditionally modify +or delete values based on their current state. SE.Redis exposes these features through the `ValueCondition` abstraction. + +## Prerequisites + +- Redis 8.4.0 or later + +## Overview + +Traditional Redis operations like `SET NX` (set if not exists) and `SET XX` (set if exists) only check for key existence. +CAS/CAD operations go further by allowing you to verify the **actual value** before making changes, enabling true atomic +compare-and-swap semantics, without requiring Lua scripts or complex `MULTI`/`WATCH`/`EXEC` usage. + +The `ValueCondition` struct supports several condition types: + +- **Existence checks**: `Always`, `Exists`, `NotExists` (equivalent to the traditional `When` enum) +- **Value equality**: `Equal(value)`, `NotEqual(value)` - compare the full value (uses `IFEQ`/`IFNE`) +- **Digest equality**: `DigestEqual(value)`, `DigestNotEqual(value)` - compare XXH3 64-bit hash (uses `IFDEQ`/`IFDNE`) + +## Basic Value Equality Checks + +Use value equality when you need to verify the exact current value before updating or deleting: + +```csharp +var db = connection.GetDatabase(); +var key = "user:session:12345"; + +// Set a value only if it currently equals a specific value +var currentToken = "old-token-abc"; +var newToken = "new-token-xyz"; + +var wasSet = await db.StringSetAsync( + key, + newToken, + when: ValueCondition.Equal(currentToken) +); + +if (wasSet) +{ + Console.WriteLine("Token successfully rotated"); +} +else +{ + Console.WriteLine("Token mismatch - someone else updated it"); +} +``` + +### Conditional Delete + +Delete a key only if it contains a specific value: + +```csharp +var lockToken = "my-unique-lock-token"; + +// Only delete if the lock still has our token +var wasDeleted = await db.StringDeleteAsync( + "resource:lock", + when: ValueCondition.Equal(lockToken) +); + +if (wasDeleted) +{ + Console.WriteLine("Lock released successfully"); +} +else +{ + Console.WriteLine("Lock was already released or taken by someone else"); +} +``` + +(see also the [Lock Operations section](#lock-operations) below) + +## Digest-Based Checks + +For large values, comparing the full value can be inefficient. Digest-based checks use XXH3 64-bit hashing to compare values efficiently: + +```csharp +var key = "document:content"; +var largeDocument = GetLargeDocumentBytes(); // e.g., 10MB + +// Calculate digest locally +var expectedDigest = ValueCondition.CalculateDigest(largeDocument); + +// Update only if the document hasn't changed +var newDocument = GetUpdatedDocumentBytes(); +var wasSet = await db.StringSetAsync( + key, + newDocument, + when: expectedDigest +); +``` + +### Retrieving Server-Side Digests + +You can retrieve the digest of a value stored in Redis without fetching the entire value: + +```csharp +// Get the digest of the current value +var digest = await db.StringDigestAsync(key); + +if (digest.HasValue) +{ + Console.WriteLine($"Current digest: {digest.Value}"); + + // Later, use this digest for conditional operations + var wasDeleted = await db.StringDeleteAsync(key, when: digest.Value); +} +else +{ + Console.WriteLine("Key does not exist"); +} +``` + +## Negating Conditions + +Use the `!` operator to negate any condition: + +```csharp +var expectedValue = "old-value"; + +// Set only if the value is NOT equal to expectedValue +var wasSet = await db.StringSetAsync( + key, + "new-value", + when: !ValueCondition.Equal(expectedValue) +); + +// Equivalent to: +var wasSet2 = await db.StringSetAsync( + key, + "new-value", + when: ValueCondition.NotEqual(expectedValue) +); +``` + +## Converting Between Value and Digest Conditions + +Convert a value condition to a digest condition for efficiency: + +```csharp +var valueCondition = ValueCondition.Equal("some-value"); + +// Convert to digest-based check +var digestCondition = valueCondition.AsDigest(); + +// Now uses IFDEQ instead of IFEQ +var wasSet = await db.StringSetAsync(key, "new-value", when: digestCondition); +``` + +## Parsing Digests + +If you receive a XXH3 digest as a hex string (e.g., from external systems), you can parse it: + +```csharp +// Parse from hex string +var digestCondition = ValueCondition.ParseDigest("e34615aade2e6333"); + +// Use in conditional operations +var wasSet = await db.StringSetAsync(key, newValue, when: digestCondition); +``` + +## Lock Operations + +StackExchange.Redis automatically uses CAS/CAD for lock operations when Redis 8.4+ is available, providing better performance and atomicity: + +```csharp +var lockKey = "resource:lock"; +var lockToken = Guid.NewGuid().ToString(); +var lockExpiry = TimeSpan.FromSeconds(30); + +// Take a lock (uses NX internally) +if (await db.LockTakeAsync(lockKey, lockToken, lockExpiry)) +{ + try + { + // Do work while holding the lock + + // Extend the lock (uses CAS internally on Redis 8.4+) + if (!(await db.LockExtendAsync(lockKey, lockToken, lockExpiry))) + { + // Failed to extend the lock - it expired, or was forcibly taken against our will + throw new InvalidOperationException("Lock extension failed - check expiry duration is appropriate."); + } + + // Do more work... + } + finally + { + // Release the lock (uses CAD internally on Redis 8.4+) + await db.LockReleaseAsync(lockKey, lockToken); + } +} +``` + +On Redis 8.4+, `LockExtend` uses `SET` with `IFEQ` and `LockRelease` uses `DELEX` with `IFEQ`, eliminating +the need for transactions. + +## Common Patterns + +### Optimistic Locking + +Implement optimistic concurrency control for updating data: + +```csharp +async Task UpdateUserProfileAsync(string userId, Func updateFunc) +{ + var key = $"user:profile:{userId}"; + + // Read current value + var currentJson = await db.StringGetAsync(key); + if (currentJson.IsNull) + { + return false; // User doesn't exist + } + + var currentProfile = JsonSerializer.Deserialize(currentJson!); + var updatedProfile = updateFunc(currentProfile); + var updatedJson = JsonSerializer.Serialize(updatedProfile); + + // Attempt to update only if value hasn't changed + var wasSet = await db.StringSetAsync( + key, + updatedJson, + when: ValueCondition.Equal(currentJson) + ); + + return wasSet; // Returns false if someone else modified it +} + +// Usage with retry logic +int maxRetries = 10; +for (int i = 0; i < maxRetries; i++) +{ + if (await UpdateUserProfileAsync(userId, profile => + { + profile.LastLogin = DateTime.UtcNow; + return profile; + })) + { + break; // Success + } + + // Retry with exponential backoff + await Task.Delay(TimeSpan.FromMilliseconds(Math.Pow(2, i) * 10)); +} +``` + +### Session Token Rotation + +Safely rotate session tokens with atomic verification: + +```csharp +async Task RotateSessionTokenAsync(string sessionId, string expectedToken) +{ + var key = $"session:{sessionId}"; + var newToken = GenerateSecureToken(); + + // Only rotate if the current token matches + var wasRotated = await db.StringSetAsync( + key, + newToken, + expiry: TimeSpan.FromHours(24), + when: ValueCondition.Equal(expectedToken) + ); + + return wasRotated; +} +``` + +### Large Document Updates with Digest + +For large documents, use digests to avoid transferring the full value: + +```csharp +async Task UpdateLargeDocumentAsync(string docId, byte[] newContent) +{ + var key = $"document:{docId}"; + + // Get just the digest, not the full document + var currentDigest = await db.StringDigestAsync(key); + + if (!currentDigest.HasValue) + { + return false; // Document doesn't exist + } + + // Update only if digest matches (document unchanged) + var wasSet = await db.StringSetAsync( + key, + newContent, + when: currentDigest.Value + ); + + return wasSet; +} +``` + +## Performance Considerations + +### Value vs. Digest Checks + +- **Value equality** (`IFEQ`/`IFNE`): Best for small values (< 1KB). Sends the full value to Redis for comparison. +- **Digest equality** (`IFDEQ`/`IFDNE`): Best for large values. Only sends a 16-character hex digest (8 bytes). + +```csharp +// For small values (session tokens, IDs, etc.) +var condition = ValueCondition.Equal(smallValue); + +// For large values (documents, images, etc.) +var condition = ValueCondition.DigestEqual(largeValue); +// or +var condition = ValueCondition.CalculateDigest(largeValueBytes); +``` + +## See Also + +- [Transactions](Transactions.md) - For multi-key atomic operations +- [Keys and Values](KeysValues.md) - Understanding Redis data types +- [Redis CAS/CAD Documentation](https://redis.io/docs/latest/commands/set/) - Redis 8.4 SET command with IFEQ/IFNE/IFDEQ/IFDNE modifiers diff --git a/docs/Configuration.md b/docs/Configuration.md index f73122608..96e4b5bae 100644 --- a/docs/Configuration.md +++ b/docs/Configuration.md @@ -1,6 +1,7 @@ -Configuration +# Configuration === +When connecting to Redis version 6 or above with an ACL configured, your ACL user needs to at least have permissions to run the ECHO command. We run this command to verify that we have a valid connection to the Redis service. Because there are lots of different ways to configure redis, StackExchange.Redis offers a rich configuration model, which is invoked when calling `Connect` (or `ConnectAsync`): ```csharp @@ -14,7 +15,7 @@ The `configuration` here can be either: The latter is *basically* a tokenized form of the former. -Basic Configuration Strings +## Basic Configuration Strings - The *simplest* configuration example is just the host name: @@ -30,11 +31,11 @@ var conn = ConnectionMultiplexer.Connect("redis0:6380,redis1:6380,allowAdmin=tru ``` If you specify a serviceName in the connection string, it will trigger sentinel mode. This example will connect to a sentinel server on the local machine -using the default sentinel port (26379), discover the current master server for the `mymaster` service and return a managed connection -pointing to that master server that will automatically be updated if the master changes: +using the default sentinel port (26379), discover the current primary server for the `myprimary` service and return a managed connection +pointing to that primary server that will automatically be updated if the primary changes: ```csharp -var conn = ConnectionMultiplexer.Connect("localhost,serviceName=mymaster"); +var conn = ConnectionMultiplexer.Connect("localhost,serviceName=myprimary"); ``` An overview of mapping between the `string` and `ConfigurationOptions` representation is shown below, but you can switch between them trivially: @@ -65,7 +66,7 @@ Microsoft Azure Redis example with password var conn = ConnectionMultiplexer.Connect("contoso5.redis.cache.windows.net,ssl=true,password=..."); ``` -Configuration Options +## Configuration Options --- The `ConfigurationOptions` object has a wide range of properties, all of which are fully documented in intellisense. Some of the more common options to use include: @@ -75,36 +76,64 @@ The `ConfigurationOptions` object has a wide range of properties, all of which a | abortConnect={bool} | `AbortOnConnectFail` | `true` (`false` on Azure) | If true, `Connect` will not create a connection while no servers are available | | allowAdmin={bool} | `AllowAdmin` | `false` | Enables a range of commands that are considered risky | | channelPrefix={string} | `ChannelPrefix` | `null` | Optional channel prefix for all pub/sub operations | -| checkCertificateRevocation={bool} | `CheckCertificateRevocation` | `true` | A Boolean value that specifies whether the certificate revocation list is checked during authentication. | +| checkCertificateRevocation={bool} | `CheckCertificateRevocation` | `true` | A Boolean value that specifies whether the certificate revocation list is checked during authentication. | | connectRetry={int} | `ConnectRetry` | `3` | The number of times to repeat connect attempts during initial `Connect` | | connectTimeout={int} | `ConnectTimeout` | `5000` | Timeout (ms) for connect operations | | configChannel={string} | `ConfigurationChannel` | `__Booksleeve_MasterChanged` | Broadcast channel name for communicating configuration changes | -| configCheckSeconds={int} | `ConfigCheckSeconds` | `60` | Time (seconds) to check configuration. This serves as a keep-alive for interactive sockets, if it is supported. | +| configCheckSeconds={int} | `ConfigCheckSeconds` | `60` | Time (seconds) to check configuration. This serves as a keep-alive for interactive sockets, if it is supported. | | defaultDatabase={int} | `DefaultDatabase` | `null` | Default database index, from `0` to `databases - 1` | | keepAlive={int} | `KeepAlive` | `-1` | Time (seconds) at which to send a message to help keep sockets alive (60 sec default) | | name={string} | `ClientName` | `null` | Identification for the connection within redis | | password={string} | `Password` | `null` | Password for the redis server | | user={string} | `User` | `null` | User for the redis server (for use with ACLs on redis 6 and above) | -| proxy={proxy type} | `Proxy` | `Proxy.None` | Type of proxy in use (if any); for example "twemproxy" | +| proxy={proxy type} | `Proxy` | `Proxy.None` | Type of proxy in use (if any); for example "twemproxy/envoyproxy" | | resolveDns={bool} | `ResolveDns` | `false` | Specifies that DNS resolution should be explicit and eager, rather than implicit | -| serviceName={string} | `ServiceName` | `null` | Used for connecting to a sentinel master service | +| serviceName={string} | `ServiceName` | `null` | Used for connecting to a sentinel primary service | | ssl={bool} | `Ssl` | `false` | Specifies that SSL encryption should be used | | sslHost={string} | `SslHost` | `null` | Enforces a particular SSL host identity on the server's certificate | | sslProtocols={enum} | `SslProtocols` | `null` | Ssl/Tls versions supported when using an encrypted connection. Use '\|' to provide multiple values. | | syncTimeout={int} | `SyncTimeout` | `5000` | Time (ms) to allow for synchronous operations | -| asyncTimeout={int} | `AsyncTimeout` | `SyncTimeout` | Time (ms) to allow for asynchronous operations | -| tiebreaker={string} | `TieBreaker` | `__Booksleeve_TieBreak` | Key to use for selecting a server in an ambiguous master scenario | -| version={string} | `DefaultVersion` | (`3.0` in Azure, else `2.0`) | Redis version level (useful when the server does not make this available) | -| | `CheckCertificateRevocation` | `true` | A Boolean value that specifies whether the certificate revocation list is checked during authentication. | +| asyncTimeout={int} | `AsyncTimeout` | `SyncTimeout` | Time (ms) to allow for asynchronous operations | +| tiebreaker={string} | `TieBreaker` | `__Booksleeve_TieBreak` | Key to use for selecting a server in an ambiguous primary scenario | +| version={string} | `DefaultVersion` | (`4.0` in Azure, else `2.0`) | Redis version level (useful when the server does not make this available) | +| tunnel={string} | `Tunnel` | `null` | Tunnel for connections (use `http:{proxy url}` for "connect"-based proxy server) | +| setlib={bool} | `SetClientLibrary` | `true` | Whether to attempt to use `CLIENT SETINFO` to set the library name/version on the connection | +| protocol={string} | `Protocol` | `null` | Redis protocol to use; see section below | +| highIntegrity={bool} | `HighIntegrity` | `false` | High integrity (incurs overhead) sequence checking on every command; see section below | Additional code-only options: -- ReconnectRetryPolicy (`IReconnectRetryPolicy`) - Default: `ReconnectRetryPolicy = LinearRetry(ConnectTimeout);` +- LoggerFactory (`ILoggerFactory`) - Default: `null` + - The logger to use for connection events (not per command), e.g. connection log, disconnects, reconnects, server errors. +- ReconnectRetryPolicy (`IReconnectRetryPolicy`) - Default: `ReconnectRetryPolicy = ExponentialRetry(ConnectTimeout / 2);` + - Determines how often a multiplexer will try to reconnect after a failure +- BacklogPolicy - Default: `BacklogPolicy = BacklogPolicy.Default;` + - Determines how commands will be queued (or not) during a disconnect, for sending when it's available again +- BeforeSocketConnect - Default: `null` + - Allows modifying a `Socket` before connecting (for advanced scenarios) +- SslClientAuthenticationOptions (`netcoreapp3.1`/`net5.0` and higher) - Default: `null` + - Allows specifying exact options for SSL/TLS authentication against a server (e.g. cipher suites, protocols, etc.) - overrides all other SSL configuration options. This is a `Func` which receives the host (or `SslHost` if set) to get the options for. If `null` is returned from the `Func`, it's the same as this property not being set at all when connecting. +- SocketManager - Default: `SocketManager.Shared`: + - The thread pool to use for scheduling work to and from the socket connected to Redis, one of... + - `SocketManager.Shared`: Use a shared dedicated thread pool for _all_ multiplexers (defaults to 10 threads) - best balance for most scenarios. + - `SocketManager.ThreadPool`: Use the build-in .NET thread pool for scheduling. This can perform better for very small numbers of cores or with large apps on large machines that need to use more than 10 threads (total, across all multiplexers) under load. **Important**: this option isn't the default because it's subject to thread pool growth/starvation and if for example synchronous calls are waiting on a redis command to come back to unblock other threads, stalls/hangs can result. Use with caution, especially if you have sync-over-async work in play. +- HighIntegrity - Default: `false` + - This enables sending a sequence check command after _every single command_ sent to Redis. This is an opt-in option that incurs overhead to add this integrity check which isn't in the Redis protocol (RESP2/3) itself. The impact on this for a given workload depends on the number of commands, size of payloads, etc. as to how proportionately impactful it will be - you should test with your workloads to assess this. + - This is especially relevant if your primary use case is all strings (e.g. key/value caching) where the protocol would otherwise not error. + - Intended for cases where network drops (e.g. bytes from the Redis stream, not packet loss) are suspected and integrity of responses is critical. +- HeartbeatConsistencyChecks - Default: `false` + - Allows _always_ sending keepalive checks even if a connection isn't idle. This trades extra commands (per `HeartbeatInterval` - default 1 second) to check the network stream for consistency. If any data was lost, the result won't be as expected and the connection will be terminated ASAP. This is a check to react to any data loss at the network layer as soon as possible. +- HeartbeatInterval - Default: `1000ms` + - Allows running the heartbeat more often which importantly includes timeout evaluation for async commands. For example if you have a 50ms async command timeout, we're only actually checking it during the heartbeat (once per second by default), so it's possible 50-1050ms pass _before we notice it timed out_. If you want more fidelity in that check and to observe that a server failed faster, you can lower this to run the heartbeat more often to achieve that. + - **Note: heartbeats are not free and that's why the default is 1 second. There is additional overhead to running this more often simply because it does some work each time it fires.** +- LibraryName - Default: `SE.Redis` (unless a `DefaultOptionsProvider` specifies otherwise) + - The library name to use with `CLIENT SETINFO` when setting the library name/version on the connection Tokens in the configuration string are comma-separated; any without an `=` sign are assumed to be redis server endpoints. Endpoints without an explicit port will use 6379 if ssl is not enabled, and 6380 if ssl is enabled. Tokens starting with `$` are taken to represent command maps, for example: `$config=cfg`. -Obsolete Configuration Options +## Obsolete Configuration Options --- + These options are parsed in connection strings for backwards compatibility (meaning they do not error as invalid), but no longer have any effect. | Configuration string | `ConfigurationOptions` | Previous Default | Previous Meaning | @@ -112,10 +141,10 @@ These options are parsed in connection strings for backwards compatibility (mean | responseTimeout={int} | `ResponseTimeout` | `SyncTimeout` | Time (ms) to decide whether the socket is unhealthy | | writeBuffer={int} | `WriteBuffer` | `4096` | Size of the output buffer | -Automatic and Manual Configuration +## Automatic and Manual Configuration --- -In many common scenarios, StackExchange.Redis will automatically configure a lot of settings, including the server type and version, connection timeouts, and master/replica relationships. Sometimes, though, the commands for this have been disabled on the redis server. In this case, it is useful to provide more information: +In many common scenarios, StackExchange.Redis will automatically configure a lot of settings, including the server type and version, connection timeouts, and primary/replica relationships. Sometimes, though, the commands for this have been disabled on the redis server. In this case, it is useful to provide more information: ```csharp ConfigurationOptions config = new ConfigurationOptions @@ -141,7 +170,8 @@ Which is equivalent to the command string: ```config redis0:6379,redis1:6380,keepAlive=180,version=2.8.8,$CLIENT=,$CLUSTER=,$CONFIG=,$ECHO=,$INFO=,$PING= ``` -Renaming Commands + +## Renaming Commands --- A slightly unusual feature of redis is that you can disable and/or rename individual commands. As per the previous example, this is done via the `CommandMap`, but instead of passing a `HashSet` to `Create()` (to indicate the available or unavailable commands), you pass a `Dictionary`. All commands not mentioned in the dictionary are assumed to be enabled and not renamed. A `null` or blank value records that the command is disabled. For example: @@ -164,10 +194,32 @@ The above is equivalent to (in the connection string): $INFO=,$SELECT=use ``` -Twemproxy +## Redis Server Permissions --- -[Twemproxy](https://github.com/twitter/twemproxy) is a tool that allows multiple redis instances to be used as though it were a single server, with inbuilt sharding and fault tolerance (much like redis cluster, but implemented separately). The feature-set available to Twemproxy is reduced. To avoid having to configure this manually, the `Proxy` option can be used: +If the user you're connecting to Redis with is limited, it still needs to have certain commands enabled for the StackExchange.Redis to succeed in connecting. The client uses: +- `AUTH` to authenticate +- `CLIENT` to set the client name +- `INFO` to understand server topology/settings +- `ECHO` for heartbeat. +- (Optional) `SUBSCRIBE` to observe change events +- (Optional) `CONFIG` to get/understand settings +- (Optional) `CLUSTER` to get cluster nodes +- (Optional) `SENTINEL` only for Sentinel servers +- (Optional) `GET` to determine tie breakers +- (Optional) `SET` (_only_ if `INFO` is disabled) to see if we're writable + +For example, a common _very_ minimal configuration ACL on the server (non-cluster) would be: +```bash +-@all +@pubsub +@read +echo +info +``` + +Note that if you choose to disable access to the above commands, it needs to be done via the `CommandMap` and not only the ACL on the server (otherwise we'll attempt the command and fail the handshake). Also, if any of the these commands are disabled, some functionality may be diminished or broken. + +## twemproxy +--- + +[twemproxy](https://github.com/twitter/twemproxy) is a tool that allows multiple redis instances to be used as though it were a single server, with inbuilt sharding and fault tolerance (much like redis cluster, but implemented separately). The feature-set available to Twemproxy is reduced. To avoid having to configure this manually, the `Proxy` option can be used: ```csharp var options = new ConfigurationOptions @@ -177,21 +229,34 @@ var options = new ConfigurationOptions }; ``` -Tiebreakers and Configuration Change Announcements +##envoyproxy +--- + +[Envoyproxy](https://github.com/envoyproxy/envoy) is a tool that allows to front a redis cluster with a set of proxies, with inbuilt discovery and fault tolerance. The feature-set available to Envoyproxy is reduced. To avoid having to configure this manually, the `Proxy` option can be used: +```csharp +var options = new ConfigurationOptions+{ + EndPoints = { "my-proxy1", "my-proxy2", "my-proxy3" }, + Proxy = Proxy.Envoyproxy +}; +``` + + +## Tiebreakers and Configuration Change Announcements --- -Normally StackExchange.Redis will resolve master/replica nodes automatically. However, if you are not using a management tool such as redis-sentinel or redis cluster, there is a chance that occasionally you will get multiple master nodes (for example, while resetting a node for maintenance it may reappear on the network as a master). To help with this, StackExchange.Redis can use the notion of a *tie-breaker* - which is only used when multiple masters are detected (not including redis cluster, where multiple masters are *expected*). For compatibility with BookSleeve, this defaults to the key named `"__Booksleeve_TieBreak"` (always in database 0). This is used as a crude voting mechanism to help determine the *preferred* master, so that work is routed correctly. +Normally StackExchange.Redis will resolve primary/replica nodes automatically. However, if you are not using a management tool such as redis-sentinel or redis cluster, there is a chance that occasionally you will get multiple primary nodes (for example, while resetting a node for maintenance it may reappear on the network as a primary). To help with this, StackExchange.Redis can use the notion of a *tie-breaker* - which is only used when multiple primaries are detected (not including redis cluster, where multiple primaries are *expected*). For compatibility with BookSleeve, this defaults to the key named `"__Booksleeve_TieBreak"` (always in database 0). This is used as a crude voting mechanism to help determine the *preferred* primary, so that work is routed correctly. -Likewise, when the configuration is changed (especially the master/replica configuration), it will be important for connected instances to make themselves aware of the new situation (via `INFO`, `CONFIG`, etc - where available). StackExchange.Redis does this by automatically subscribing to a pub/sub channel upon which such notifications may be sent. For similar reasons, this defaults to `"__Booksleeve_MasterChanged"`. +Likewise, when the configuration is changed (especially the primary/replica configuration), it will be important for connected instances to make themselves aware of the new situation (via `INFO`, `CONFIG`, etc - where available). StackExchange.Redis does this by automatically subscribing to a pub/sub channel upon which such notifications may be sent. For similar reasons, this defaults to `"__Booksleeve_MasterChanged"`. Both options can be customized or disabled (set to `""`), via the `.ConfigurationChannel` and `.TieBreaker` configuration properties. -These settings are also used by the `IServer.MakeMaster()` method, which can set the tie-breaker in the database and broadcast the configuration change message. The configuration message can also be used separately to master/replica changes simply to request all nodes to refresh their configurations, via the `ConnectionMultiplexer.PublishReconfigure` method. +These settings are also used by the `IServer.MakeMaster()` method, which can set the tie-breaker in the database and broadcast the configuration change message. The configuration message can also be used separately to primary/replica changes simply to request all nodes to refresh their configurations, via the `ConnectionMultiplexer.PublishReconfigure` method. -ReconnectRetryPolicy +## ReconnectRetryPolicy --- + StackExchange.Redis automatically tries to reconnect in the background when the connection is lost for any reason. It keeps retrying until the connection has been restored. It would use ReconnectRetryPolicy to decide how long it should wait between the retries. -ReconnectRetryPolicy can be linear (default), exponential or a custom retry policy. +ReconnectRetryPolicy can be exponential (default), linear or a custom retry policy. Examples: @@ -214,3 +279,16 @@ config.ReconnectRetryPolicy = new LinearRetry(5000); //5 5000 //6 5000 ``` + +## Redis protocol + +Without specific configuration, StackExchange.Redis will use the RESP2 protocol; this means that pub/sub requires a separate connection to the server. RESP3 is a newer protocol +(usually, but not always, available on v6 servers and above) which allows (among other changes) pub/sub messages to be communicated on the *same* connection - which can be very +desirable in servers with a large number of clients. The protocol handshake needs to happen very early in the connection, so *by default* the library does not attempt a RESP3 connection +unless it has reason to expect it to work. + +The library determines whether to use RESP3 by: +- The `HELLO` command has been disabled: RESP2 is used +- A protocol *other than* `resp3` or `3` is specified: RESP2 is used +- A protocol of `resp3` or `3` is specified: RESP3 is attempted (with fallback if it fails) +- In all other scenarios: RESP2 is used diff --git a/docs/ExecSync.md b/docs/ExecSync.md index 2b3409fa2..e4a09ec95 100644 --- a/docs/ExecSync.md +++ b/docs/ExecSync.md @@ -1,5 +1,5 @@ The Dangers of Synchronous Continuations === -Once, there was more content here; then [a suitably evil workaround was found](http://stackoverflow.com/a/22588431/23354). This page is not +Once, there was more content here; then [a suitably evil workaround was found](https://stackoverflow.com/a/22588431/23354). This page is not listed in the index, but remains for your curiosity. \ No newline at end of file diff --git a/docs/HotKeys.md b/docs/HotKeys.md new file mode 100644 index 000000000..5ac7c86f9 --- /dev/null +++ b/docs/HotKeys.md @@ -0,0 +1,71 @@ +Hot Keys +=== + +The `HOTKEYS` command allows for server-side profiling of CPU and network usage by key. It is available in Redis 8.6 and later. + +This command is available via the `IServer.HotKeys*` methods: + +``` c# +// Get the server instance. +IConnectionMultiplexer muxer = ... // connect to Redis 8.6 or later +var server = muxer.GetServer(endpoint); // or muxer.GetServer(key) + +// Start the capture; you can specify a duration, or manually use the HotKeysStop[Async] method; specifying +// a duration is recommended, so that the profiler will not be left running in the case of failure. +// Optional parameters allow you to specify the metrics to capture, the sample ratio, and the key slots to include; +// by default, all metrics are captured, every command is sampled, and all key slots are included. +await server.HotKeysStartAsync(duration: TimeSpan.FromSeconds(30)); + +// Now either do some work ourselves, or await for some other activity to happen: +await Task.Delay(TimeSpan.FromSeconds(35)); // whatever happens: happens + +// Fetch the results; note that this does not stop the capture, and you can fetch the results multiple times +// either while it is running, or after it has completed - but only a single capture can be active at a time. +var result = await server.HotKeysGetAsync(); + +// ...investigate the results... + +// Optional: discard the active capture data at the server, if any. +await server.HotKeysResetAsync(); +``` + +The `HotKeysResult` class (our `result` value above) contains the following properties: + +- `Metrics`: The metrics captured during this profiling session. +- `TrackingActive`: Indicates whether the capture currently active. +- `SampleRatio`: Profiling frequency; effectively: measure every Nth command. (also: `IsSampled`) +- `SelectedSlots`: The key slots active for this profiling session. +- `CollectionStartTime`: The start time of the capture. +- `CollectionDuration`: The duration of the capture. +- `AllCommandsAllSlotsTime`: The total CPU time measured for all commands in all slots, without any sampling or filtering applied. +- `AllCommandsAllSlotsNetworkBytes`: The total network usage measured for all commands in all slots, without any sampling or filtering applied. + +When slot filtering is used, the following properties are also available: + +- `AllCommandsSelectedSlotsTime`: The total CPU time measured for all commands in the selected slots. +- `AllCommandsSelectedSlotsNetworkBytes`: The total network usage measured for all commands in the selected slots. + +When slot filtering *and* sampling is used, the following properties are also available: + +- `SampledCommandsSelectedSlotsTime`: The total CPU time measured for the sampled commands in the selected slots. +- `SampledCommandsSelectedSlotsNetworkBytes`: The total network usage measured for the sampled commands in the selected slots. + +If CPU metrics were captured, the following properties are also available: + +- `TotalCpuTimeUser`: The total user CPU time measured in the profiling session. +- `TotalCpuTimeSystem`: The total system CPU time measured in the profiling session. +- `TotalCpuTime`: The total CPU time measured in the profiling session. +- `CpuByKey`: Hot keys, as measured by CPU activity; for each: + - `Key`: The key observed. + - `Duration`: The time taken. + +If network metrics were captured, the following properties are also available: + +- `TotalNetworkBytes`: The total network data measured in the profiling session. +- `NetworkBytesByKey`: Hot keys, as measured by network activity; for each: + - `Key`: The key observed. + - `Bytes`: The network activity, in bytes. + +Note: to use slot-based filtering, you must be connected to a Redis Cluster instance. The +`IConnectionMultiplexer.HashSlot(RedisKey)` method can be used to determine the slot for a given key. The key +can also be used in place of an endpoint when using `GetServer(...)` to get the `IServer` instance for a given key. diff --git a/docs/KeysValues.md b/docs/KeysValues.md index 0860ef37c..0a414ce21 100644 --- a/docs/KeysValues.md +++ b/docs/KeysValues.md @@ -1,9 +1,9 @@ Keys, Values and Channels === -In dealing with redis, there is quite an important distinction between *keys* and *everything else*. A key is the unique name of a piece of data (which could be a String, a List, Hash, or any of the other [redis data types](http://redis.io/topics/data-types)) within a database. Keys are never interpreted as... well, anything: they are simply inert names. Further - when dealing with clustered or sharded systems, it is the key that defines the node (or nodes if there are replicas) that contain this data - so keys are crucial for routing commands. +In dealing with redis, there is quite an important distinction between *keys* and *everything else*. A key is the unique name of a piece of data (which could be a String, a List, Hash, or any of the other [redis data types](https://redis.io/topics/data-types)) within a database. Keys are never interpreted as... well, anything: they are simply inert names. Further - when dealing with clustered or sharded systems, it is the key that defines the node (or nodes if there are replicas) that contain this data - so keys are crucial for routing commands. -This contrasts with *values*; values are the *things that you store* against keys - either individually (for String data) or as groups. Values do not affect command routing (caveat: except for [the `SORT` command](http://redis.io/commands/sort) when `BY` or `GET` is specified, but that is *really* complicated to explain). Likewise, values are often *interpreted* by redis for the purposes of an operation: +This contrasts with *values*; values are the *things that you store* against keys - either individually (for String data) or as groups. Values do not affect command routing (caveat: except for [the `SORT` command](https://redis.io/commands/sort) when `BY` or `GET` is specified, but that is *really* complicated to explain). Likewise, values are often *interpreted* by redis for the purposes of an operation: - `incr` (and the various similar commands) interpret String values as numeric data - sorting can interpret values using either numeric or unicode rules @@ -90,7 +90,7 @@ Channel names for pub/sub are represented by the `RedisChannel` type; this is la Scripting --- -[Lua scripting in redis](http://redis.io/commands/EVAL) has two notable features: +[Lua scripting in redis](https://redis.io/commands/EVAL) has two notable features: - the inputs must keep keys and values separate (which inside the script become `KEYS` and `ARGV`, respectively) - the return format is not defined in advance: it is specific to your script diff --git a/docs/KeyspaceNotifications.md b/docs/KeyspaceNotifications.md new file mode 100644 index 000000000..d9c4f26a1 --- /dev/null +++ b/docs/KeyspaceNotifications.md @@ -0,0 +1,213 @@ +# Redis Keyspace Notifications + +Redis keyspace notifications let you monitor operations happening on your Redis keys in real-time. StackExchange.Redis provides a strongly-typed API for subscribing to and consuming these events. +This could be used for example to implement a cache invalidation strategy. + +## Prerequisites + +### Redis Configuration + +You must [enable keyspace notifications](https://redis.io/docs/latest/develop/pubsub/keyspace-notifications/#configuration) in your Redis server config, +for example: + +``` conf +notify-keyspace-events AKE +``` + +- **A** - All event types +- **K** - Keyspace notifications (`__keyspace@__:`) +- **E** - Keyevent notifications (`__keyevent@__:`) + +The two types of event (keyspace and keyevent) encode the same information, but in different formats. +To simplify consumption, StackExchange.Redis provides a unified API for both types of event, via the `KeyNotification` type. + +### Event Broadcasting in Redis Cluster + +Importantly, in Redis Cluster, keyspace notifications are **not** broadcast to all nodes - they are only received by clients connecting to the +individual node where the keyspace notification originated, i.e. where the key was modified. +This is different to how regular pub/sub events are handled, where a subscription to a channel on one node will receive events published on any node. +Clients must explicitly subscribe to the same channel on each node they wish to receive events from, which typically means: every primary node in the cluster. +To make this easier, StackExchange.Redis provides dedicated APIs for subscribing to keyspace and keyevent notifications that handle this for you. + +## Quick Start + +As an example, we'll subscribe to all keys with a specific prefix, and print out the key and event type for each notification. First, +we need to create a `RedisChannel`: + +```csharp +// this will subscribe to __keyspace@0__:user:*, including supporting Redis Cluster +var channel = RedisChannel.KeySpacePrefix(prefix: "user:"u8, database: 0); +``` + +Note that there are a range of other `KeySpace...` and `KeyEvent...` methods for different scenarios, including: + +- `KeySpaceSingleKey` - subscribe to notifications for a single key in a specific database +- `KeySpacePattern` - subscribe to notifications for a key pattern, optionally in a specific database +- `KeySpacePrefix` - subscribe to notifications for all keys with a specific prefix, optionally in a specific database +- `KeyEvent` - subscribe to notifications for a specific event type, optionally in a specific database + +The `KeySpace*` methods are similar, and are presented separately to make the intent clear. For example, `KeySpacePattern("foo*")` is equivalent to `KeySpacePrefix("foo")`, and will subscribe to all keys beginning with `"foo"`. + +Next, we subscribe to the channel and process the notifications using the normal pub/sub subscription API; there are two +main approaches: queue-based and callback-based. + +Queue-based: + +```csharp +var queue = await sub.SubscribeAsync(channel); +_ = Task.Run(async () => +{ + await foreach (var msg in queue) + { + if (msg.TryParseKeyNotification(out var notification)) + { + Console.WriteLine($"Key: {notification.GetKey()}"); + Console.WriteLine($"Type: {notification.Type}"); + Console.WriteLine($"Database: {notification.Database}"); + } + } +}); +``` + +Callback-based: + +```csharp +sub.Subscribe(channel, (recvChannel, recvValue) => +{ + if (KeyNotification.TryParse(recvChannel, recvValue, out var notification)) + { + Console.WriteLine($"Key: {notification.GetKey()}"); + Console.WriteLine($"Type: {notification.Type}"); + Console.WriteLine($"Database: {notification.Database}"); + } +}); +``` + +Note that the channels created by the `KeySpace...` and `KeyEvent...` methods cannot be used to manually *publish* events, +only to subscribe to them. The events are published automatically by the Redis server when keys are modified. If you +want to simulate keyspace notifications by publishing events manually, you should use regular pub/sub channels that avoid +the `__keyspace@` and `__keyevent@` prefixes. + +## Performance considerations for KeyNotification + +The `KeyNotification` struct provides parsed notification data, including (as already shown) the key, event type, +database, etc. Note that using `GetKey()` will allocate a copy of the key bytes; to avoid allocations, +you can use `TryCopyKey()` to copy the key bytes into a provided buffer (potentially with `GetKeyByteCount()`, +`GetKeyMaxCharCount()`, etc in order to size the buffer appropriately). Similarly, `KeyStartsWith()` can be used to +efficiently check the key prefix without allocating a string. This approach is designed to be efficient for high-volume +notification processing, and in particular: for use with the alt-lookup (span) APIs that are slowly being introduced +in various .NET APIs. + +For example, with a `ConcurrentDictionary` (for some `T`), you can use `GetAlternateLookup>()` +to get an alternate lookup API that takes a `ReadOnlySpan` instead of a `string`, and then use `TryCopyKey()` to copy +the key bytes into a buffer, and then use the alt-lookup API to find the value. This means that we avoid allocating a string +for the key entirely, and instead just copy the bytes into a buffer. If we consider that commonly a local cache will *not* +contain the key for the majority of notifications (since they are for cache invalidation), this can be a significant +performance win. + +## Considerations when using database isolation + +Database isolation is controlled either via the `ConfigurationOptions.DefaultDatabase` option when connecting to Redis, +or by using the `GetDatabase(int? db = null)` method to get a specific database instance. Note that the +`KeySpace...` and `KeyEvent...` APIs may optionally take a database. When a database is specified, subscription will only +respond to notifications for keys in that database. If a database is not specified, the subscription will respond to +notifications for keys in all databases. Often, you will want to pass `db.Database` from the `IDatabase` instance you are +using for your application logic, to ensure that you are monitoring the correct database. When using Redis Cluster, +this usually means database `0`, since Redis Cluster does not usually support multiple databases. + +For example: + +- `RedisChannel.KeySpaceSingleKey("foo", 0)` maps to `SUBSCRIBE __keyspace@0__:foo` +- `RedisChannel.KeySpacePrefix("foo", 0)` maps to `PSUBSCRIBE __keyspace@0__:foo*` +- `RedisChannel.KeySpacePrefix("foo")` maps to `PSUBSCRIBE __keyspace@*__:foo*` +- `RedisChannel.KeyEvent(KeyNotificationType.Set, 0)` maps to `SUBSCRIBE __keyevent@0__:set` +- `RedisChannel.KeyEvent(KeyNotificationType.Set)` maps to `PSUBSCRIBE __keyevent@*__:set` + +Additionally, note that while most of these examples require multi-node subscriptions on Redis Cluster, `KeySpaceSingleKey` +is an exception, and will only subscribe to the single node that owns the key `foo`. + +When subscribing without specifying a database (i.e. listening to changes in all database), the database relating +to the notification can be fetched via `KeyNotification.Database`: + +``` c# +var channel = RedisChannel.KeySpacePrefix("foo"); +sub.SubscribeAsync(channel, (recvChannel, recvValue) => +{ + if (KeyNotification.TryParse(recvChannel, recvValue, out var notification)) + { + var key = notification.GetKey(); + var db = notification.Database; + // ... + } +} +``` + +## Considerations when using keyspace or channel isolation + +StackExchange.Redis supports the concept of keyspace and channel (pub/sub) isolation. + +Channel isolation is controlled using the `ConfigurationOptions.ChannelPrefix` option when connecting to Redis. +Intentionally, this feature *is ignored* by the `KeySpace...` and `KeyEvent...` APIs, because they are designed to +subscribe to specific (server-defined) channels that are outside the control of the client. + +Keyspace isolation is controlled using the `WithKeyPrefix` extension method on `IDatabase`. This is *not* used +by the `KeySpace...` and `KeyEvent...` APIs. Since the database and pub/sub APIs are independent, keyspace isolation +*is not applied* (and cannot be; consuming code could have zero, one, or multiple databases with different prefixes). +The caller is responsible for ensuring that the prefix is applied appropriately when constructing the `RedisChannel`. + +By default, key-related features of `KeyNotification` will return the full key reported by the server, +including any prefix. However, the `TryParseKeyNotification` and `TryParse` methods can optionally be passed a +key prefix, which will be used both to filter unwanted notifications and strip the prefix from the key when reading. +It is *possible* to handle keyspace isolation manually by checking the key with `KeyNotification.KeyStartsWith` and +manually trimming the prefix, but it is *recommended* to do this via `TryParseKeyNotification` and `TryParse`. + +As an example, with a multi-tenant scenario using keyspace isolation, we might have in the database code: + +``` c# +// multi-tenant scenario using keyspace isolation +byte[] keyPrefix = Encoding.UTF8.GetBytes("client1234:"); +var db = conn.GetDatabase().WithKeyPrefix(keyPrefix); + +// we will later commit order data for example: +await db.StringSetAsync("order/123", "ISBN 9789123684434"); +``` + +To observe this, we could use: + +``` c# +var sub = conn.GetSubscriber(); + +// subscribe to the specific tenant as a prefix: +var channel = RedisChannel.KeySpacePrefix("client1234:order/", db.Database); + +sub.SubscribeAsync(channel, (recvChannel, recvValue) => +{ + // by including prefix in the TryParse, we filter out notifications that are not for this client + // *and* the key is sliced internally to remove this prefix when reading + if (KeyNotification.TryParse(keyPrefix, recvChannel, recvValue, out var notification)) + { + // if we get here, the key prefix was a match + var key = notification.GetKey(); // "order/123" - note no prefix + // ... + } + + /* + // for contrast only: this is *not* usually the recommended approach when using keyspace isolation + if (KeyNotification.TryParse(recvChannel, recvValue, out var notification) + && notification.KeyStartsWith(keyPrefix)) + { + var key = notification.GetKey(); // "client1234:order/123" - note prefix is included + // ... + } + */ +}); + +``` + +Alternatively, if we wanted a single handler that observed *all* tenants, we could use: + +``` c# +var channel = RedisChannel.KeySpacePattern("client*:order/*", db.Database); +``` + +with similar code, parsing the client from the key manually, using the full key length. \ No newline at end of file diff --git a/docs/PipelinesMultiplexers.md b/docs/PipelinesMultiplexers.md index aa47b2a50..b1711531f 100644 --- a/docs/PipelinesMultiplexers.md +++ b/docs/PipelinesMultiplexers.md @@ -69,7 +69,7 @@ Multiplexing Pipelining is all well and good, but often any single block of code only wants a single value (or maybe wants to perform a few operations, but which depend on each-other). This means that we still have the problem that we spend most of our time waiting for data to transfer between client and server. Now consider a busy application, perhaps a web-server. Such applications are generally inherently concurrent, so if you have 20 parallel application requests all requiring data, you might think of spinning up 20 connections, or you could synchronize access to a single connection (which would mean the last caller would need to wait for the latency of all the other 19 before it even got started). Or as a compromise, perhaps a pool of 5 connections which are leased - no matter how you are doing it, there is going to be a lot of waiting. **StackExchange.Redis does not do this**; instead, it does a *lot* of work for you to make effective use of all this idle time by *multiplexing* a single connection. When used concurrently by different callers, it **automatically pipelines the separate requests**, so regardless of whether the requests use blocking or asynchronous access, the work is all pipelined. So we could have 10 or 20 of our "get a and b" scenario from earlier (from different application requests), and they would all get onto the connection as soon as possible. Essentially, it fills the `waiting` time with work from other callers. -For this reason, the only redis features that StackExchange.Redis does not offer (and *will not ever offer*) are the "blocking pops" ([BLPOP](http://redis.io/commands/blpop), [BRPOP](http://redis.io/commands/brpop) and [BRPOPLPUSH](http://redis.io/commands/brpoplpush)) - because this would allow a single caller to stall the entire multiplexer, blocking all other callers. The only other time that StackExchange.Redis needs to hold work is when verifying pre-conditions for a transaction, which is why StackExchange.Redis encapsulates such conditions into internally managed `Condition` instances. [Read more about transactions here](Transactions). If you feel you want "blocking pops", then I strongly suggest you consider pub/sub instead: +For this reason, the only redis features that StackExchange.Redis does not offer (and *will not ever offer*) are the "blocking pops" ([BLPOP](https://redis.io/commands/blpop), [BRPOP](https://redis.io/commands/brpop) and [BRPOPLPUSH](https://redis.io/commands/brpoplpush)) - because this would allow a single caller to stall the entire multiplexer, blocking all other callers. The only other time that StackExchange.Redis needs to hold work is when verifying pre-conditions for a transaction, which is why StackExchange.Redis encapsulates such conditions into internally managed `Condition` instances. [Read more about transactions here](Transactions). If you feel you want "blocking pops", then I strongly suggest you consider pub/sub instead: ```csharp sub.Subscribe(channel, delegate { @@ -105,6 +105,6 @@ if (value == null) { return value; ``` - [1]: http://msdn.microsoft.com/en-us/library/dd460717(v=vs.110).aspx - [2]: http://msdn.microsoft.com/en-us/library/system.threading.tasks.task(v=vs.110).aspx - [3]: http://msdn.microsoft.com/en-us/library/dd321424(v=vs.110).aspx + [1]: https://docs.microsoft.com/en-us/dotnet/standard/parallel-programming/task-parallel-library-tpl + [2]: https://docs.microsoft.com/en-us/dotnet/api/system.threading.tasks.task + [3]: https://docs.microsoft.com/en-us/dotnet/api/system.threading.tasks.task-1 diff --git a/docs/ReleaseNotes.md b/docs/ReleaseNotes.md index f6ef64307..c4cf50c32 100644 --- a/docs/ReleaseNotes.md +++ b/docs/ReleaseNotes.md @@ -1,244 +1,628 @@ # Release Notes +Current package versions: + +| NuGet Stable | NuGet Pre-release | MyGet | +| ------------ | ----------------- | ----- | +| [![StackExchange.Redis](https://img.shields.io/nuget/v/StackExchange.Redis.svg)](https://www.nuget.org/packages/StackExchange.Redis/) | [![StackExchange.Redis](https://img.shields.io/nuget/vpre/StackExchange.Redis.svg)](https://www.nuget.org/packages/StackExchange.Redis/) | [![StackExchange.Redis MyGet](https://img.shields.io/myget/stackoverflow/vpre/StackExchange.Redis.svg)](https://www.myget.org/feed/stackoverflow/package/nuget/StackExchange.Redis) | + +## Unreleased + +- (none) + +## 2.12.1 + +- Add missing `LCS` outputs and missing `RedisType.VectorSet` ([#3028 by @mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/3028)) +- Track and report multiplexer count ([#3030 by @mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/3030)) +- (docs) Add Entra ID authentication docs ([#3023 by @philon-msft](https://github.com/StackExchange/StackExchange.Redis/pull/3023)) +- (eng) Improve test infrastructure (toy-server) ([#3021 by @mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/3021), [#3022 by @mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/3022), [#3027 by @mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/3027), [#3028 by @mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/3028)) +- (eng) Pre-V2 work: bring RESPite down, toy-server, migrate to `AsciiHash` ([#3028 by @mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/3028)) + +## 2.11.8 + +* Handle `-MOVED` error pointing to same endpoint. ([#3003 by @barshaul](https://github.com/StackExchange/StackExchange.Redis/pull/3003)) +* fix time conversion error in `HOTKEYS` ([#3017 by @mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/3017)) + +- Add support for `VRANGE` ([#3011 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/3011)) +- Add defensive code in azure-maintenance-events handling ([#3013 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/3013)) + +## 2.11.3 + +- Add support for `VRANGE` ([#3011 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/3011)) +- Add defensive code in azure-maintenance-events handling ([#3013 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/3013)) + +## 2.11.0 + +- Add support for `HOTKEYS` ([#3008 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/3008)) +- Add support for keyspace notifications ([#2995 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2995)) +- Add support for idempotent stream entry (`XADD IDMP[AUTO]`) support ([#3006 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/3006)) +- (internals) split AMR out to a separate options provider ([#2986 by NickCraver and philon-msft](https://github.com/StackExchange/StackExchange.Redis/pull/2986)) + +## 2.10.14 + +- Fix bug with connection startup failing in low-memory scenarios ([#3002 by nathan-miller23](https://github.com/StackExchange/StackExchange.Redis/pull/3002)) +- Fix under-count of `TotalOutstanding` in server-counters ([#2996 by nathan-miller23](https://github.com/StackExchange/StackExchange.Redis/pull/2996)) +- Fix incorrect debug assertion in `HGETEX` (no impact to release library) ([#2999 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2999)) + + +## 2.10.1 + +- Support Redis 8.4 CAS/CAD operations (`DIGEST`, and the `IFEQ`, `IFNE`, `IFDEQ`, `IFDNE` modifiers on `SET` / `DEL`) + via the new `ValueCondition` abstraction, and use CAS/CAD operations for `Lock*` APIs when possible ([#2978 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2978)) + - **note**: overload resolution for `StringSet[Async]` may be impacted in niche cases, requiring trivial build changes (there are no runtime-breaking changes such as missing methods) +- Support `XREADGROUP CLAIM` ([#2972 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2972)) +- Support `MSETEX` (Redis 8.4.0) for multi-key operations with expiration ([#2977 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2977)) + +## 2.9.32 + +- Fix `SSUBSCRIBE` routing during slot migrations ([#2969 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2969)) + +## 2.9.25 + +- (build) Fix SNK on non-Windows builds ([#2963 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2963)) + +## 2.9.24 + +- Fix [#2951](https://github.com/StackExchange/StackExchange.Redis/issues/2951) - sentinel reconnection failure ([#2956 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2956)) +- Mitigate [#2955](https://github.com/StackExchange/StackExchange.Redis/issues/2955) (unbalanced pub/sub routing) / add `RedisValue.WithKeyRouting()` ([#2958 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2958)) +- Fix envoyproxy command exclusions ([#2957 by sshumakov](https://github.com/StackExchange/StackExchange.Redis/pull/2957)) +- Restrict `RedisValue` hex fallback (`string` conversion) to encoding failures ([2954 by jcaspes](https://github.com/StackExchange/StackExchange.Redis/pull/2954)) +- (internals) prefer `Volatile.Read` over `Thread.VolatileRead` ([2960 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2960)) + +## 2.9.17 + +- Add vector-set support ([#2939 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2939)) +- Fix `RedisValue` special-value (NaN, Inf, etc) handling when casting from raw/string values to `double` ([#2950 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2950)) +- Internals: + - Use `sealed` classes where possible ([#2942 by Henr1k80](https://github.com/StackExchange/StackExchange.Redis/pull/2942)) + - Add overlapped flushing in `LoggingTunnel` and avoid double-lookups ([#2943 by Henr1k80](https://github.com/StackExchange/StackExchange.Redis/pull/2943)) + +## 2.9.11 + +- Add `HGETDEL`, `HGETEX` and `HSETEX` support ([#2863 by atakavci](https://github.com/StackExchange/StackExchange.Redis/pull/2863)) +- Fix key-prefix omission in `SetIntersectionLength` and `SortedSet{Combine[WithScores]|IntersectionLength}` ([#2863 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2863)) +- Add `Condition.SortedSet[Not]ContainsStarting` condition for transactions ([#2638 by ArnoKoll](https://github.com/StackExchange/StackExchange.Redis/pull/2638)) +- Add support for XPENDING Idle time filter ([#2822 by david-brink-talogy](https://github.com/StackExchange/StackExchange.Redis/pull/2822)) +- Improve `double` formatting performance on net8+ ([#2928 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2928)) +- Add `GetServer(RedisKey, ...)` API ([#2936 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2936)) +- Fix error constructing `StreamAdd` message ([#2941 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2941)) + +## 2.8.58 + +- Fix [#2679](https://github.com/StackExchange/StackExchange.Redis/issues/2679) - blocking call in long-running connects ([#2680 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2680)) +- Support async cancellation of `SCAN` enumeration ([#2911 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2911)) +- Add `XTRIM MINID` support ([#2842 by kijanawoodard](https://github.com/StackExchange/StackExchange.Redis/pull/2842)) +- Add new CE 8.2 stream support - `XDELEX`, `XACKDEL`, `{XADD|XTRIM} [KEEPREF|DELREF|ACKED]` ([#2912 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2912)) +- Fix `ZREVRANGEBYLEX` open-ended commands ([#2636 by ArnoKoll](https://github.com/StackExchange/StackExchange.Redis/pull/2636)) +- Fix `StreamGroupInfo.Lag` when `null` ([#2902 by robhop](https://github.com/StackExchange/StackExchange.Redis/pull/2902)) +- Internals + - Logging improvements ([#2903 by Meir017](https://github.com/StackExchange/StackExchange.Redis/pull/2903) and [#2917 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2917)) + - Update tests to xUnit v3 ([#2907 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2907)) + - Avoid `CLIENT PAUSE` in CI tests ([#2916 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2916)) + +## 2.8.47 + +- Add support for new `BITOP` operations in CE 8.2 ([#2900 by atakavci](https://github.com/StackExchange/StackExchange.Redis/pull/2900)) +- Package updates ([#2906 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2906)) +- Docs: added [guidance on async timeouts](https://stackexchange.github.io/StackExchange.Redis/AsyncTimeouts) ([#2910 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2910)) +- Fix handshake error with `CLIENT ID` ([#2909 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2909)) + +## 2.8.41 + +- Add support for sharded pub/sub via `RedisChannel.Sharded` - ([#2887 by vandyvilla, atakavci and mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2887)) + +## 2.8.37 + +- Add `ConfigurationOptions.SetUserPemCertificate(...)` and `ConfigurationOptions.SetUserPfxCertificate(...)` methods to simplify using client certificates ([#2873 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2873)) +- Add logging for when a Multiplexer reconfigures ([#2864 by st-dev-gh](https://github.com/StackExchange/StackExchange.Redis/pull/2864)) +- Fix: Move `AuthenticateAsClient` to fully async after dropping older framework support, to help client thread starvation in cases TLS negotiation stalls server-side ([#2878 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2878)) + +## 2.8.31 + +- Fix: Respect `IReconnectRetryPolicy` timing in the case that a node that was present disconnects indefinitely ([#2853](https://github.com/StackExchange/StackExchange.Redis/pull/2853) & [#2856](https://github.com/StackExchange/StackExchange.Redis/pull/2856) by NickCraver) + - Special thanks to [sampdei](https://github.com/sampdei) tracking this down and working a fix +- Changes max default retry policy backoff to 60 seconds ([#2853 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2853)) +- Fix [#2652](https://github.com/StackExchange/StackExchange.Redis/issues/2652): Track client-initiated shutdown for any pipe type ([#2814 by bgrainger](https://github.com/StackExchange/StackExchange.Redis/pull/2814)) + +## 2.8.24 + +- Update Envoy command definitions to [allow `UNWATCH`](https://github.com/envoyproxy/envoy/pull/37620) ([#2824 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2824)) + +## 2.8.22 + +- Format IPv6 endpoints correctly when rewriting configration strings ([#2813 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2813)) +- Update default Redis version from `4.0.0` to `6.0.0` for Azure Redis resources ([#2810 by philon-msft](https://github.com/StackExchange/StackExchange.Redis/pull/2810)) +- Detect Azure Managed Redis caches and tune default connection settings for them ([#2818 by philon-msft](https://github.com/StackExchange/StackExchange.Redis/pull/2818)) +- Bump `Microsoft.Bcl.AsyncInterfaces` dependency from `5.0.0` to `6.0.0` ([#2820 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2820)) + +## 2.8.16 + +- Fix: PhysicalBridge: Always perform "last read" check in heartbeat when `HeartbeatConsistencyChecks` is enabled ([#2795 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2795)) + +## 2.8.14 + +- Fix [#2793](https://github.com/StackExchange/StackExchange.Redis/issues/2793): Update Envoyproxy's command map according to latest Envoy documentation ([#2794 by dbarbosapn](https://github.com/StackExchange/StackExchange.Redis/pull/2794)) + +## 2.8.12 + +- Add support for hash field expiration (see [#2715](https://github.com/StackExchange/StackExchange.Redis/issues/2715)) ([#2716 by atakavci](https://github.com/StackExchange/StackExchange.Redis/pull/2716])) +- Add support for `HSCAN NOVALUES` (see [#2721](https://github.com/StackExchange/StackExchange.Redis/issues/2721)) ([#2722 by atakavci](https://github.com/StackExchange/StackExchange.Redis/pull/2722)) +- Fix [#2763](https://github.com/StackExchange/StackExchange.Redis/issues/2763): Make ConnectionMultiplexer.Subscription thread-safe ([#2769 by Chuck-EP](https://github.com/StackExchange/StackExchange.Redis/pull/2769)) +- Fix [#2778](https://github.com/StackExchange/StackExchange.Redis/issues/2778): Run `CheckInfoReplication` even with `HeartbeatConsistencyChecks` ([#2784 by NickCraver and leachdaniel-clark](https://github.com/StackExchange/StackExchange.Redis/pull/2784)) + +## 2.8.0 + +- Add high-integrity mode ([docs](https://stackexchange.github.io/StackExchange.Redis/Configuration), [#2471 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2741)) +- TLS certificate/`TrustIssuer`: Check EKU in X509 chain checks when validating certificates ([#2670 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2670)) + +## 2.7.33 + +- **Potentially Breaking**: Fix `CheckTrustedIssuer` certificate validation for broken chain scenarios ([#2665 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2665)) + - Users inadvertently trusting a remote cert with a broken chain could not be failing custom validation before this change. This is only in play if you are using `ConfigurationOptions.TrustIssuer` at all. +- Add new `LoggingTunnel` API; see [https://stackexchange.github.io/StackExchange.Redis/RespLogging](https://stackexchange.github.io/StackExchange.Redis/RespLogging) ([#2660 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2660)) +- Fix [#2664](https://github.com/StackExchange/StackExchange.Redis/issues/2664): Move ProcessBacklog to fully sync to prevent thread pool hopping and blocking on awaits ([#2667 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2667)) + +## 2.7.27 + +- Support `HeartbeatConsistencyChecks` and `HeartbeatInterval` in `Clone()` ([#2658 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2658)) +- Add `AddLibraryNameSuffix` to multiplexer; allows usage-specific tokens to be appended *after connect* ([#2659 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2659)) + +## 2.7.23 + +- Fix [#2653](https://github.com/StackExchange/StackExchange.Redis/issues/2653): Client library metadata should validate contents ([#2654 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2654)) +- Add `HeartbeatConsistencyChecks` option (opt-in) to enabled per-heartbeat (defaults to once per second) checks to be sent to ensure no network stream corruption has occurred ([#2656 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2656)) + +## 2.7.20 + +- Fix [#2642](https://github.com/StackExchange/StackExchange.Redis/issues/2642): Detect and support multi-DB pseudo-cluster/proxy scenarios ([#2646](https://github.com/StackExchange/StackExchange.Redis/pull/2646) by mgravell) + +## 2.7.17 + +- Fix [#2321](https://github.com/StackExchange/StackExchange.Redis/issues/2321): Honor disposition of select command in Command Map for transactions [(#2322 by slorello89)](https://github.com/StackExchange/StackExchange.Redis/pull/2322) +- Fix [#2619](https://github.com/StackExchange/StackExchange.Redis/issues/2619): Type-forward `IsExternalInit` to support down-level TFMs ([#2621 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2621)) +- `InternalsVisibleTo` `PublicKey` enhancements([#2623 by WeihanLi](https://github.com/StackExchange/StackExchange.Redis/pull/2623)) +- Fix [#2576](https://github.com/StackExchange/StackExchange.Redis/issues/2576): Prevent `NullReferenceException` during shutdown of connections ([#2629 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2629)) + +## 2.7.10 + +- Fix [#2593](https://github.com/StackExchange/StackExchange.Redis/issues/2593): `EXPIRETIME` and `PEXPIRETIME` miscategorized as `PrimaryOnly` commands causing them to fail when issued against a read-only replica ([#2593 by slorello89](https://github.com/StackExchange/StackExchange.Redis/pull/2593)) +- Fix [#2591](https://github.com/StackExchange/StackExchange.Redis/issues/2591): Add `HELLO` to Sentinel connections so they can support RESP3 ([#2601 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2601)) +- Fix [#2595](https://github.com/StackExchange/StackExchange.Redis/issues/2595): Add detection handling for dead sockets that the OS says are okay, seen especially in Linux environments ([#2610 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2610)) + +## 2.7.4 + +- Adds: RESP3 support ([#2396 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2396)) - see https://stackexchange.github.io/StackExchange.Redis/Resp3 +- Fix [#2507](https://github.com/StackExchange/StackExchange.Redis/issues/2507): Pub/sub with multi-item payloads should be usable ([#2508 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2508)) +- Add: connection-id tracking (internal only, no public API) ([#2508 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2508)) +- Add: `ConfigurationOptions.LoggerFactory` for logging to an `ILoggerFactory` (e.g. `ILogger`) all connection and error events ([#2051 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2051)) +- Fix [#2467](https://github.com/StackExchange/StackExchange.Redis/issues/2467): Add StreamGroupInfo EntriesRead and Lag ([#2510 by tvdias](https://github.com/StackExchange/StackExchange.Redis/pull/2510)) + +## 2.6.122 + +- Change: Target net6.0 instead of net5.0, since net5.0 is end of life. ([#2497 by eerhardt](https://github.com/StackExchange/StackExchange.Redis/pull/2497)) +- Fix: Fix nullability annotation of IConnectionMultiplexer.RegisterProfiler ([#2494 by eerhardt](https://github.com/StackExchange/StackExchange.Redis/pull/2494)) +- Fix [#2520](https://github.com/StackExchange/StackExchange.Redis/issues/2520): Improve cluster connections in down scenarios by not re-pinging successful nodes ([#2525 by Matiszak](https://github.com/StackExchange/StackExchange.Redis/pull/2525)) +- Add: `Timer.ActiveCount` under `POOL` in timeout messages on .NET 6+ to help diagnose timer overload affecting timeout evaluations ([#2500 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2500)) +- Add: `LibraryName` configuration option; allows the library name to be controlled at the individual options level (in addition to the existing controls in `DefaultOptionsProvider`) ([#2502 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2502)) +- Add: `DefaultOptionsProvider.GetProvider` allows lookup of provider by endpoint ([#2502 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2502)) + +## 2.6.116 + +- Fix [#2479](https://github.com/StackExchange/StackExchange.Redis/issues/2479): Add `RedisChannel.UseImplicitAutoPattern` (global) and `RedisChannel.IsPattern` ([#2480 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2480)) +- Fix [#2479](https://github.com/StackExchange/StackExchange.Redis/issues/2479): Mark `RedisChannel` conversion operators as obsolete; add `RedisChannel.Literal` and `RedisChannel.Pattern` helpers ([#2481 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2481)) +- Fix [#2449](https://github.com/StackExchange/StackExchange.Redis/issues/2449): Update `Pipelines.Sockets.Unofficial` to `v2.2.8` to support native AOT ([#2456 by eerhardt](https://github.com/StackExchange/StackExchange.Redis/pull/2456)) + +## 2.6.111 + +- Fix [#2426](https://github.com/StackExchange/StackExchange.Redis/issues/2426): Don't restrict multi-slot operations on Envoy proxy; let the proxy decide ([#2428 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2428)) +- Add: Support for `User`/`Password` in `DefaultOptionsProvider` to support token rotation scenarios ([#2445 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2445)) +- Fix [#2449](https://github.com/StackExchange/StackExchange.Redis/issues/2449): Resolve AOT trim warnings in `TryGetAzureRoleInstanceIdNoThrow` ([#2451 by eerhardt](https://github.com/StackExchange/StackExchange.Redis/pull/2451)) +- Adds: Support for `HTTP/1.1 200 Connection established` in HTTP Tunnel ([#2448 by flobernd](https://github.com/StackExchange/StackExchange.Redis/pull/2448)) +- Adds: Timeout duration to backlog timeout error messages ([#2452 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2452)) +- Adds: `DefaultOptionsProvider.LibraryName` for specifying lib-name passed to `CLIENT SETINFO` in Redis 7.2+ ([#2453 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2453)) + +## 2.6.104 + +- Fix [#2412](https://github.com/StackExchange/StackExchange.Redis/issues/2412): Critical (but rare) GC bug that can lead to async tasks never completing if the multiplexer is not held by the consumer ([#2408 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2408)) +- Add: Better error messages (over generic timeout) when commands are backlogged and unable to write to any connection ([#2408 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2408)) +- Fix [#2392](https://github.com/StackExchange/StackExchange.Redis/issues/2392): Dequeue *all* timed out messages from the backlog when not connected (including Fire+Forget) ([#2397 by kornelpal](https://github.com/StackExchange/StackExchange.Redis/pull/2397)) +- Fix [#2400](https://github.com/StackExchange/StackExchange.Redis/issues/2400): Expose `ChannelMessageQueue` as `IAsyncEnumerable` ([#2402 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2402)) +- Add: Support for `CLIENT SETINFO` (lib name/version) during handshake; opt-out is via `ConfigurationOptions`; also support read of `resp`, `lib-ver` and `lib-name` via `CLIENT LIST` ([#2414 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2414)) +- Documentation: clarify the meaning of `RedisValue.IsInteger` re [#2418](https://github.com/StackExchange/StackExchange.Redis/issues/2418) ([#2420 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2420)) + +## 2.6.96 + +- Fix [#2350](https://github.com/StackExchange/StackExchange.Redis/issues/2350): Properly parse lua script parameters in all cultures ([#2351 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2351)) +- Fix [#2362](https://github.com/StackExchange/StackExchange.Redis/issues/2362): Set `RedisConnectionException.FailureType` to `AuthenticationFailure` on all authentication scenarios for better handling ([#2367 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2367)) +- Fix [#2368](https://github.com/StackExchange/StackExchange.Redis/issues/2368): Support `RedisValue.Length()` for all storage types ([#2370 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2370)) +- Fix [#2376](https://github.com/StackExchange/StackExchange.Redis/issues/2376): Avoid a (rare) deadlock scenario ([#2378 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2378)) + +## 2.6.90 + +- Adds: Support for `EVAL_RO` and `EVALSHA_RO` via `IDatabase.ScriptEvaluateReadOnly`/`IDatabase.ScriptEvaluateReadOnlyAsync` ([#2168 by shacharPash](https://github.com/StackExchange/StackExchange.Redis/pull/2168)) +- Fix [#1458](https://github.com/StackExchange/StackExchange.Redis/issues/1458): Fixes a leak condition when a connection completes on the TCP phase but not the Redis handshake ([#2238 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2238)) +- Internal: ServerSnapshot: Improve API and allow filtering with custom struct enumerator ([#2337 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2337)) + + +## 2.6.86 + +- Fix [#1520](https://github.com/StackExchange/StackExchange.Redis/issues/1520) & [#1660](https://github.com/StackExchange/StackExchange.Redis/issues/1660): When `MOVED` is encountered from a cluster, a reconfigure will happen proactively to react to cluster changes ASAP ([#2286 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2286)) +- Fix [#2249](https://github.com/StackExchange/StackExchange.Redis/issues/2249): Properly handle a `fail` state (new `ClusterNode.IsFail` property) for `CLUSTER NODES` and expose `fail?` as a property (`IsPossiblyFail`) as well ([#2288 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2288)) +- Adds: `IConnectionMultiplexer.ServerMaintenanceEvent` (was on `ConnectionMultiplexer` but not the interface) ([#2306 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2306)) +- Adds: To timeout messages, additional debug information: `Sync-Ops` (synchronous operations), `Async-Ops` (asynchronous operations), and `Server-Connected-Seconds` (how long the connection in question has been connected, or `"n/a"`) ([#2300 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2300)) + +## 2.6.80 + +- Adds: `last-in` and `cur-in` (bytes) to timeout exceptions to help identify timeouts that were just-behind another large payload off the wire ([#2276 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2276)) +- Adds: general-purpose tunnel support, with HTTP proxy "connect" support included ([#2274 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2274)) +- Removes: Package dependency (`System.Diagnostics.PerformanceCounter`) ([#2285 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2285)) + + +## 2.6.70 + +- Fix: `MOVED` with `NoRedirect` (and other non-reachable errors) should respect the `IncludeDetailInExceptions` setting ([#2267 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2267)) +- Fix [#2251](https://github.com/StackExchange/StackExchange.Redis/issues/2251) & [#2265](https://github.com/StackExchange/StackExchange.Redis/issues/2265): Cluster endpoint connections weren't proactively connecting subscriptions in all cases and taking the full connection timeout to complete as a result ([#2268 by iteplov](https://github.com/StackExchange/StackExchange.Redis/pull/2268)) + + +## 2.6.66 + +- Fix [#2182](https://github.com/StackExchange/StackExchange.Redis/issues/2182): Be more flexible in which commands are "primary only" in order to support users with replicas that are explicitly configured to allow writes ([#2183 by slorello89](https://github.com/StackExchange/StackExchange.Redis/pull/2183)) +- Adds: `IConnectionMultiplexer` now implements `IAsyncDisposable` ([#2161 by kimsey0](https://github.com/StackExchange/StackExchange.Redis/pull/2161)) +- Adds: `IConnectionMultiplexer.GetServers()` to get all `IServer` instances for a multiplexer ([#2203 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2203)) +- Fix [#2016](https://github.com/StackExchange/StackExchange.Redis/issues/2016): Align server selection with supported commands (e.g. with writable servers) to reduce `Command cannot be issued to a replica` errors ([#2191 by slorello89](https://github.com/StackExchange/StackExchange.Redis/pull/2191)) +- Performance: Optimization around timeout processing to reduce lock contention in the case of many items that haven't yet timed out during a heartbeat ([#2217 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2217)) +- Fix [#2223](https://github.com/StackExchange/StackExchange.Redis/issues/2223): Resolve sync-context issues (missing `ConfigureAwait(false)`) ([#2229 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2229)) +- Fix [#1968](https://github.com/StackExchange/StackExchange.Redis/issues/1968): Improved handling of EVAL scripts during server restarts and failovers, detecting and re-sending the script for a retry when needed ([#2170 by martintmk](https://github.com/StackExchange/StackExchange.Redis/pull/2170)) +- Adds: `ConfigurationOptions.SslClientAuthenticationOptions` (`netcoreapp3.1`/`net5.0`+ only) to give more control over SSL/TLS authentication ([#2224 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2224)) +- Fix [#2240](https://github.com/StackExchange/StackExchange.Redis/pull/2241): Improve support for DNS-based IPv6 endpoints ([#2241 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2241)) +- Adds: `ConfigurationOptions.HeartbeatInterval` (**Advanced Setting** - [see docs](https://stackexchange.github.io/StackExchange.Redis/Configuration#configuration-options)) To allow more finite control of the client heartbeat, which encompases how often command timeouts are actually evaluated - still defaults to 1,000 ms ([#2243 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2243)) +- Fix [#1879](https://github.com/StackExchange/StackExchange.Redis/issues/1879): Improve exception message when the wrong password is used ([#2246 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2246)) +- Fix [#2233](https://github.com/StackExchange/StackExchange.Redis/issues/2233): Repeated connection to Sentinel servers using the same ConfigurationOptions would fail ([#2242 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2242)) + + +## 2.6.48 + +- URGENT Fix: [#2167](https://github.com/StackExchange/StackExchange.Redis/issues/2167), [#2176](https://github.com/StackExchange/StackExchange.Redis/issues/2176): fix error in batch/transaction handling that can result in out-of-order instructions ([#2177 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2177)) +- Fix: [#2164](https://github.com/StackExchange/StackExchange.Redis/issues/2164): fix `LuaScript.Prepare` for scripts that don't have parameters ([#2166 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2166)) + +## 2.6.45 + +- Adds: [Nullable reference type](https://docs.microsoft.com/en-us/dotnet/csharp/nullable-references) annotations ([#2041 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2041)) + - Adds annotations themselves for nullability to everything in the library + - Fixes a few internal edge cases that will now throw proper errors (rather than a downstream null reference) + - Fixes inconsistencies with `null` vs. empty array returns (preferring an not-null empty array in those edge cases) + - Note: does *not* increment a major version (as these are warnings to consumers), because: they're warnings (errors are opt-in), removing obsolete types with a 3.0 rev _would_ be binary breaking (this isn't), and reving to 3.0 would cause binding redirect pain for consumers. Bumping from 2.5 to 2.6 only for this change. +- Adds: Support for `COPY` with `.KeyCopy()`/`.KeyCopyAsync()` ([#2064 by Avital-Fine](https://github.com/StackExchange/StackExchange.Redis/pull/2064)) +- Adds: Support for `LMOVE` with `.ListMove()`/`.ListMoveAsync()` ([#2065 by Avital-Fine](https://github.com/StackExchange/StackExchange.Redis/pull/2065)) +- Adds: Support for `ZRANDMEMBER` with `.SortedSetRandomMember()`/`.SortedSetRandomMemberAsync()`, `.SortedSetRandomMembers()`/`.SortedSetRandomMembersAsync()`, and `.SortedSetRandomMembersWithScores()`/`.SortedSetRandomMembersWithScoresAsync()` ([#2076 by Avital-Fine](https://github.com/StackExchange/StackExchange.Redis/pull/2076)) +- Adds: Support for `SMISMEMBER` with `.SetContains()`/`.SetContainsAsync()` ([#2077 by Avital-Fine](https://github.com/StackExchange/StackExchange.Redis/pull/2077)) +- Adds: Support for `ZDIFF`, `ZDIFFSTORE`, `ZINTER`, `ZINTERCARD`, and `ZUNION` with `.SortedSetCombine()`/`.SortedSetCombineAsync()`, `.SortedSetCombineWithScores()`/`.SortedSetCombineWithScoresAsync()`, and `.SortedSetIntersectionLength()`/`.SortedSetIntersectionLengthAsync()` ([#2075 by Avital-Fine](https://github.com/StackExchange/StackExchange.Redis/pull/2075)) +- Adds: Support for `SINTERCARD` with `.SetIntersectionLength()`/`.SetIntersectionLengthAsync()` ([#2078 by Avital-Fine](https://github.com/StackExchange/StackExchange.Redis/pull/2078)) +- Adds: Support for `LPOS` with `.ListPosition()`/`.ListPositionAsync()` and `.ListPositions()`/`.ListPositionsAsync()` ([#2080 by slorello89](https://github.com/StackExchange/StackExchange.Redis/pull/2080)) +- Adds: Support for `ZMSCORE` with `.SortedSetScores()`/.`SortedSetScoresAsync()` ([#2082 by ttingen](https://github.com/StackExchange/StackExchange.Redis/pull/2082)) +- Adds: Support for `NX | XX | GT | LT` to `EXPIRE`, `EXPIREAT`, `PEXPIRE`, and `PEXPIREAT` with `.KeyExpire()`/`.KeyExpireAsync()` ([#2083 by Avital-Fine](https://github.com/StackExchange/StackExchange.Redis/pull/2083)) +- Adds: Support for `EXPIRETIME`, and `PEXPIRETIME` with `.KeyExpireTime()`/`.KeyExpireTimeAsync()` ([#2083 by Avital-Fine](https://github.com/StackExchange/StackExchange.Redis/pull/2083)) +- Fix: For streams, properly hash `XACK`, `XCLAIM`, and `XPENDING` in cluster scenarios to eliminate `MOVED` retries ([#2085 by nielsderdaele](https://github.com/StackExchange/StackExchange.Redis/pull/2085)) +- Adds: Support for `OBJECT REFCOUNT` with `.KeyRefCount()`/`.KeyRefCountAsync()` ([#2087 by Avital-Fine](https://github.com/StackExchange/StackExchange.Redis/pull/2087)) +- Adds: Support for `OBJECT ENCODING` with `.KeyEncoding()`/`.KeyEncodingAsync()` ([#2088 by Avital-Fine](https://github.com/StackExchange/StackExchange.Redis/pull/2088)) +- Adds: Support for `GEOSEARCH` with `.GeoSearch()`/`.GeoSearchAsync()` ([#2089 by slorello89](https://github.com/StackExchange/StackExchange.Redis/pull/2089)) +- Adds: Support for `GEOSEARCHSTORE` with `.GeoSearchAndStore()`/`.GeoSearchAndStoreAsync()` ([#2089 by slorello89](https://github.com/StackExchange/StackExchange.Redis/pull/2089)) +- Adds: Support for `HRANDFIELD` with `.HashRandomField()`/`.HashRandomFieldAsync()`, `.HashRandomFields()`/`.HashRandomFieldsAsync()`, and `.HashRandomFieldsWithValues()`/`.HashRandomFieldsWithValuesAsync()` ([#2090 by slorello89](https://github.com/StackExchange/StackExchange.Redis/pull/2090)) +- Adds: Support for `LMPOP` with `.ListLeftPop()`/`.ListLeftPopAsync()` and `.ListRightPop()`/`.ListRightPopAsync()` ([#2094 by slorello89](https://github.com/StackExchange/StackExchange.Redis/pull/2094)) +- Adds: Support for `ZMPOP` with `.SortedSetPop()`/`.SortedSetPopAsync()` ([#2094 by slorello89](https://github.com/StackExchange/StackExchange.Redis/pull/2094)) +- Adds: Support for `XAUTOCLAIM` with `.StreamAutoClaim()`/.`StreamAutoClaimAsync()` and `.StreamAutoClaimIdsOnly()`/.`StreamAutoClaimIdsOnlyAsync()` ([#2095 by ttingen](https://github.com/StackExchange/StackExchange.Redis/pull/2095)) +- Fix [#2071](https://github.com/StackExchange/StackExchange.Redis/issues/2071): Add `.StringSet()`/`.StringSetAsync()` overloads for source compat broken for 1 case in 2.5.61 ([#2098 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2098)) +- Fix [#2086](https://github.com/StackExchange/StackExchange.Redis/issues/2086): Correct HashSlot calculations for `XREAD` and `XREADGROUP` commands ([#2093 by nielsderdaele](https://github.com/StackExchange/StackExchange.Redis/pull/2093)) +- Adds: Support for `LCS` with `.StringLongestCommonSubsequence()`/`.StringLongestCommonSubsequence()`, `.StringLongestCommonSubsequenceLength()`/`.StringLongestCommonSubsequenceLengthAsync()`, and `.StringLongestCommonSubsequenceWithMatches()`/`.StringLongestCommonSubsequenceWithMatchesAsync()` ([#2104 by Avital-Fine](https://github.com/StackExchange/StackExchange.Redis/pull/2104)) +- Adds: Support for `OBJECT FREQ` with `.KeyFrequency()`/`.KeyFrequencyAsync()` ([#2105 by Avital-Fine](https://github.com/StackExchange/StackExchange.Redis/pull/2105)) +- Performance: Avoids allocations when computing cluster hash slots or testing key equality ([#2110 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2110)) +- Adds: Support for `SORT_RO` with `.Sort()`/`.SortAsync()` ([#2111 by slorello89](https://github.com/StackExchange/StackExchange.Redis/pull/2111)) +- Adds: Support for `BIT | BYTE` to `BITCOUNT` and `BITPOS` with `.StringBitCount()`/`.StringBitCountAsync()` and `.StringBitPosition()`/`.StringBitPositionAsync()` ([#2116 by Avital-Fine](https://github.com/StackExchange/StackExchange.Redis/pull/2116)) +- Adds: Support for pub/sub payloads that are unary arrays ([#2118 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/2118)) +- Fix: Sentinel timer race during dispose ([#2133 by ewisuri](https://github.com/StackExchange/StackExchange.Redis/pull/2133)) +- Adds: Support for `GT`, `LT`, and `CH` on `ZADD` with `.SortedSetAdd()`/`.SortedSetAddAsync()` and `.SortedSetUpdate()`/`.SortedSetUpdateAsync()` ([#2136 by Avital-Fine](https://github.com/StackExchange/StackExchange.Redis/pull/2136)) +- Adds: Support for `COMMAND COUNT`, `COMMAND GETKEYS`, and `COMMAND LIST`, with `.CommandCount()`/`.CommandCountAsync()`, `.CommandGetKeys()`/`.CommandGetKeysAsync()`, and `.CommandList()`/`.CommandListAsync()` ([#2143 by shacharPash](https://github.com/StackExchange/StackExchange.Redis/pull/2143)) + +## 2.5.61 + +- Adds: `GETEX` support with `.StringGetSetExpiry()`/`.StringGetSetExpiryAsync()` ([#1743 by benbryant0](https://github.com/StackExchange/StackExchange.Redis/pull/1743)) +- Fix [#1988](https://github.com/StackExchange/StackExchange.Redis/issues/1988): Don't issue `SELECT` commands if explicitly disabled ([#2023 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2023)) +- Adds: `KEEPTTL` support on `SET` operations ([#2029 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2029)) +- Fix: Allow `XTRIM` `MAXLEN` argument to be `0` ([#2030 by NicoAvanzDev](https://github.com/StackExchange/StackExchange.Redis/pull/2030)) +- Adds: `ConfigurationOptions.BeforeSocketConnect` for configuring sockets between creation and connection ([#2031 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2031)) +- Fix [#1813](https://github.com/StackExchange/StackExchange.Redis/issues/1813): Don't connect to endpoints we failed to parse ([#2042 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2042)) +- Fix: `ClientKill`/`ClientKillAsync` when using `ClientType` ([#2048 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2048)) +- Adds: Most `ConfigurationOptions` changes after `ConnectionMultiplexer` connections will now be respected, e.g. changing a timeout will work and changing a password for auth rotation would be used at the next reconnect ([#2050 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2050)) + - **Obsolete**: This change also moves `ConnectionMultiplexer.IncludeDetailInExceptions` and `ConnectionMultiplexer.IncludePerformanceCountersInExceptions` to `ConfigurationOptions`. The old properties are `[Obsolete]` proxies that work until 3.0 for compatibility. +- Adds: Support for `ZRANGESTORE` with `.SortedSetRangeAndStore()`/`.SortedSetRangeAndStoreAsync()` ([#2052 by slorello89](https://github.com/StackExchange/StackExchange.Redis/pull/2052)) + +## 2.5.43 + +- Adds: Bounds checking for `ExponentialRetry` backoff policy ([#1921 by gliljas](https://github.com/StackExchange/StackExchange.Redis/pull/1921)) +- Adds: `DefaultOptionsProvider` support for endpoint-based defaults configuration ([#1987 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/1987)) +- Adds: Envoy proxy support ([#1989 by rkarthick](https://github.com/StackExchange/StackExchange.Redis/pull/1989)) +- Performance: When `SUBSCRIBE` is disabled, give proper errors and connect faster ([#2001 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2001)) +- Adds: `GET` on `SET` command support (present in Redis 6.2+ - [#2003 by martinekvili](https://github.com/StackExchange/StackExchange.Redis/pull/2003)) +- Performance: Improves concurrent load performance when backlogs are utilized ([#2008 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2008)) +- Stability: Improves cluster connections when `CLUSTER` command is disabled ([#2014 by tylerohlsen](https://github.com/StackExchange/StackExchange.Redis/pull/2014)) +- Logging: Improves connection logging and adds overall timing to it ([#2019 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/2019)) + +## 2.5.27 (prerelease) + +- Adds: a backlog/retry mechanism for commands issued while a connection isn't available ([#1912 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/1912)) + - Commands will be queued if a multiplexer isn't yet connected to a Redis server. + - Commands will be queued if a connection is lost and then sent to the server when the connection is restored. + - All commands queued will only remain in the backlog for the duration of the configured timeout. + - To revert to previous behavior, a new `ConfigurationOptions.BacklogPolicy` is available - old behavior is configured via `options.BacklogPolicy = BacklogPolicy.FailFast`. This backlogs nothing and fails commands immediately if no connection is available. +- Adds: Makes `StreamEntry` constructor public for better unit test experience ([#1923 by WeihanLi](https://github.com/StackExchange/StackExchange.Redis/pull/1923)) +- Fix: Integer overflow error (issue #1926) with 2GiB+ result payloads ([#1928 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/1928)) +- Change: Update assumed redis versions to v2.8 or v4.0 in the Azure case ([#1929 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/1929)) +- Fix: Profiler showing `EVAL` instead `EVALSHA` ([#1930 by martinpotter](https://github.com/StackExchange/StackExchange.Redis/pull/1930)) +- Performance: Moved tiebreaker fetching in connections into the handshake phase (streamline + simplification) ([#1931 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/1931)) +- Stability: Fixed potential disposed object usage around Arenas (pulling in [Piplines.Sockets.Unofficial#63](https://github.com/mgravell/Pipelines.Sockets.Unofficial/pull/63) by MarcGravell) +- Adds: Thread pool work item stats to exception messages to help diagnose contention ([#1964 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/1964)) +- Fix/Performance: Overhauls pub/sub implementation for correctness ([#1947 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/1947)) + - Fixes a race in subscribing right after connected + - Fixes a race in subscribing immediately before a publish + - Fixes subscription routing on clusters (spreading instead of choosing 1 node) + - More correctly reconnects subscriptions on connection failures, including to other endpoints +- Adds "(vX.X.X)" version suffix to the default client ID so server-side `CLIENT LIST` can more easily see what's connected ([#1985 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/1985)) +- Fix: Properly including or excluding key names on some message failures ([#1990 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/1990)) +- Fix: Correct return of nil results in `LPOP`, `RPOP`, `SRANDMEMBER`, and `SPOP` ([#1993 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/1993)) + +## 2.2.88 + +- Change: Connection backoff default is now exponential instead of linear ([#1896 by lolodi](https://github.com/StackExchange/StackExchange.Redis/pull/1896)) +- Adds: Support for `NodeMaintenanceScaleComplete` event (handles Redis cluster scaling) ([#1902 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/1902)) + +## 2.2.79 + +- NRediSearch: Support on json index ([#1808 by AvitalFineRedis](https://github.com/StackExchange/StackExchange.Redis/pull/1808)) +- NRediSearch: Support sortable TagFields and unNormalizedForm for Tag & Text Fields ([#1862 by slorello89 & AvitalFineRedis](https://github.com/StackExchange/StackExchange.Redis/pull/1862)) +- Fix: Potential errors getting socket bytes ([#1836 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/1836)) +- Logging: Adds (.NET Version and timestamps) for better debugging ([#1796 by philon-msft](https://github.com/StackExchange/StackExchange.Redis/pull/1796)) +- Adds: `Condition` APIs (transactions), now supports `StreamLengthEqual` and variants ([#1807 by AlphaGremlin](https://github.com/StackExchange/StackExchange.Redis/pull/1807)) +- Adds: Support for count argument to `ListLeftPop`, `ListLeftPopAsync`, `ListRightPop`, and `ListRightPopAsync` ([#1850 by jjfmarket](https://github.com/StackExchange/StackExchange.Redis/pull/1850)) +- Fix: Potential task/thread exhaustion from the backlog processor ([#1854 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/1854)) +- Adds: Support for listening to Azure Maintenance Events ([#1876 by amsoedal](https://github.com/StackExchange/StackExchange.Redis/pull/1876)) +- Adds: `StringGetDelete`/`StringGetDeleteAsync` APIs for Redis `GETDEL` command([#1840 by WeihanLi](https://github.com/StackExchange/StackExchange.Redis/pull/1840)) + +## 2.2.62 + +- Stability: Sentinel potential memory leak fix in OnManagedConnectionFailed handler ([#1710 by alexSatov](https://github.com/StackExchange/StackExchange.Redis/pull/1710)) +- Fix: `GetOutstandingCount` could obscure underlying faults by faulting itself ([#1792 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/1792)) +- Fix [#1719](https://github.com/StackExchange/StackExchange.Redis/issues/1791): With backlog messages becoming reordered ([#1779 by TimLovellSmith](https://github.com/StackExchange/StackExchange.Redis/pull/1779)) + ## 2.2.50 -- performance optimization for PING accuracy (#1714 via eduardobr) -- improvement to reconnect logic (exponential backoff) (#1735 via deepakverma) -- refresh replica endpoint list on failover (#1684 by laurauzcategui) -- fix for ReconfigureAsync re-entrancy (caused connection issues) (#1772 by NickCraver) -- fix for ReconfigureAsync Sentinel race resulting in NoConnectionAvailable when using DemandMaster (#1773 by NickCraver) -- resolve race in AUTH and other connection reconfigurations (#1759 via TimLovellSmith and NickCraver) +- Performance: Optimization for PING accuracy ([#1714 by eduardobr](https://github.com/StackExchange/StackExchange.Redis/pull/1714)) +- Fix: Improvement to reconnect logic (exponential backoff) ([#1735 by deepakverma](https://github.com/StackExchange/StackExchange.Redis/pull/1735)) +- Adds: Refresh replica endpoint list on failover ([#1684 by laurauzcategui](https://github.com/StackExchange/StackExchange.Redis/pull/1684)) +- Fix: `ReconfigureAsync` re-entrancy (caused connection issues) ([#1772 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/1772)) +- Fix: `ReconfigureAsync` Sentinel race resulting in NoConnectionAvailable when using DemandMaster ([#1773 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/1773)) +- Stability: Resolve race in AUTH and other connection reconfigurations ([#1759 by TimLovellSmith and NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/1759)) ## 2.2.4 -- fix ambiguous signature of the new `RPUSHX`/`LPUSHX` methods (#1620) +- Fix: Ambiguous signature of the new `RPUSHX`/`LPUSHX` methods ([#1620 by stefanloerwald](https://github.com/StackExchange/StackExchange.Redis/pull/1620)) ## 2.2.3 -- add .NET 5 target -- fix mutex race condition (#1585 via arsnyder16) -- allow `CheckCertificateRevocation` to be controlled via the config string (#1591 via lwlwalker) -- fix range end-value inversion (#1573 via tombatron) -- add `ROLE` support (#1551 via zmj) -- add varadic `RPUSHX`/`LPUSHX` support (#1557 via dmytrohridin) -- fix server-selection strategy race condition (#1532 via deepakverma) -- fix sentinel default port (#1525 via ejsmith) -- fix `Int64` parse scenario (#1568 via arsnyder16) -- force replication check during failover (via joroda) -- documentation tweaks (multiple) -- fix backlog contention issue (#1612, see also #1574 via devbv) +- Adds: .NET 5 target +- Fix: Mutex race condition ([#1585 by arsnyder16](https://github.com/StackExchange/StackExchange.Redis/pull/1585)) +- Adds: `CheckCertificateRevocation` can be controlled via the config string ([#1591 by lwlwalker](https://github.com/StackExchange/StackExchange.Redis/pull/1591)) +- Fix: Range end-value inversion ([#1573 by tombatron](https://github.com/StackExchange/StackExchange.Redis/pull/1573)) +- Adds: `ROLE` support ([#1551 by zmj](https://github.com/StackExchange/StackExchange.Redis/pull/1551)) +- Adds: varadic `RPUSHX`/`LPUSHX` support ([#1557 by dmytrohridin](https://github.com/StackExchange/StackExchange.Redis/pull/1557)) +- Fix: Server-selection strategy race condition ([#1532 by deepakverma](https://github.com/StackExchange/StackExchange.Redis/pull/1532)) +- Fix: Sentinel default port ([#1525 by ejsmith](https://github.com/StackExchange/StackExchange.Redis/pull/1525)) +- Fix: `Int64` parse scenario ([#1568 by arsnyder16](https://github.com/StackExchange/StackExchange.Redis/pull/1568)) +- Add: Force replication check during failover ([#1563 by aravindyeduvaka & joroda](https://github.com/StackExchange/StackExchange.Redis/pull/1563)) +- Documentation tweaks (multiple) +- Fix: Backlog contention issue ([#1612 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/1612/), see also [#1574 by devbv](https://github.com/StackExchange/StackExchange.Redis/pull/1574/)) ## 2.1.58 -- fix: `[*]SCAN` - fix possible NRE scenario if the iterator is disposed with an incomplete operation in flight -- fix: `[*]SCAN` - treat the cursor as an opaque value whenever possible, for compatibility with `redis-cluster-proxy` -- add: `[*]SCAN` - include additional exception data in the case of faults +- Fix: `[*]SCAN` - fix possible NRE scenario if the iterator is disposed with an incomplete operation in flight +- Fix: `[*]SCAN` - treat the cursor as an opaque value whenever possible, for compatibility with `redis-cluster-proxy` +- Adds: `[*]SCAN` - include additional exception data in the case of faults ## 2.1.55 -- identify assembly binding problem on .NET Framework; drops `System.IO.Pipelines` to 4.7.1, and identifies new `System.Buffers` binding failure on 4.7.2 +- Adds: Identification of assembly binding problem on .NET Framework. Drops `System.IO.Pipelines` to 4.7.1, and identifies new `System.Buffers` binding failure on 4.7.2 ## 2.1.50 -- add: bind direct to sentinel-managed instances from a configuration string/object (#1431 via ejsmith) -- add last-delivered-id to `StreamGroupInfo` (#1477 via AndyPook) -- update naming of replication-related commands to reflect Redis 5 naming (#1488/#945) -- fix: the `IServer` commands that are database-specific (`DBSIZE`, `FLUSHDB`, `KEYS`, `SCAN`) now respect the default database on the config (#1460) -- library updates +- Adds: Bind directly to sentinel-managed instances from a configuration string/object ([#1431 by ejsmith](https://github.com/StackExchange/StackExchange.Redis/pull/1431)) +- Adds: `last-delivered-id` to `StreamGroupInfo` ([#1477 by AndyPook](https://github.com/StackExchange/StackExchange.Redis/pull/1477)) +- Change: Update naming of replication-related commands to reflect Redis 5 naming ([#1488 by mgravell](https://github.com/StackExchange/StackExchange.Redis/issues/1488) & [#945 by mgravell](https://github.com/StackExchange/StackExchange.Redis/issues/945)) +- Fix [#1460](https://github.com/StackExchange/StackExchange.Redis/issues/1460): `IServer` commands that are database-specific (`DBSIZE`, `FLUSHDB`, `KEYS`, `SCAN`) now respect the default database on the config ([#1468 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/1468)) +- Library updates ## 2.1.39 -- fix: mutex around connection was not "fair"; in specific scenario could lead to out-of-order commands (#1440) -- fix: update libs (#1432) -- fix: timing error on linux (#1433 via pengweiqhca) -- fix: add `auth` to command-map for sentinal (#1428 via ejsmith) +- Fix: Mutex around connection was not "fair"; in specific scenario could lead to out-of-order commands ([#1440 by kennygea](https://github.com/StackExchange/StackExchange.Redis/pull/1440)) +- Fix [#1432](https://github.com/StackExchange/StackExchange.Redis/issues/1432): Update dependencies +- Fix: Timing error on linux ([#1433 by pengweiqhca](https://github.com/StackExchange/StackExchange.Redis/pull/1433)) +- Fix: Add `auth` to command-map for Sentinel ([#1428 by ejsmith](https://github.com/StackExchange/StackExchange.Redis/pull/1428)) ## 2.1.30 -- fix deterministic builds +- Build: Fix deterministic builds ([#1420 by @mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/1420)) ## 2.1.28 -- fix: stability in new sentinel APIs -- fix: include `SslProtocolos` in `ConfigurationOptions.ToString()` (#1408 via vksampath and Sampath Vuyyuru -- fix: clarify messaging around disconnected multiplexers (#1396) -- change: tweak methods of new sentinel API (this is technically a breaking change, but since this is a new API that was pulled quickly, we consider this to be acceptable) -- add: new thread`SocketManager` mode (opt-in) to always use the regular thread-pool instead of the dedicated pool -- add: improved counters in/around error messages -- add: new `User` property on `ConfigurationOptions` -- build: enable deterministic builds (note: this failed; fixed in 2.1.30) +- Fix: Stability in new sentinel APIs +- Fix [#1407](https://github.com/StackExchange/StackExchange.Redis/issues/1407): Include `SslProtocolos` in `ConfigurationOptions.ToString()` ([#1408 by vksampath and Sampath Vuyyuru](https://github.com/StackExchange/StackExchange.Redis/pull/1408)) +- Fix: Clarify messaging around disconnected multiplexers ([#1396 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/1396)) +- Change: Tweak methods of new sentinel API (this is technically a breaking change, but since this is a new API that was pulled quickly, we consider this to be acceptable) +- Adds: New thread `SocketManager` mode (opt-in) to always use the regular thread-pool instead of the dedicated pool +- Adds: Improved counters in/around error messages +- Adds: New `User` property on `ConfigurationOptions` +- Build: Enable deterministic builds (note: this failed; fixed in 2.1.30) ## 2.1.0 -- fix: ensure active-message is cleared (#1374 via hamish-omny) -- add: sentinel support (#1067 via shadim; #692 via lexxdark) -- add: `IAsyncEnumerable` scanning APIs now supported (#1087) -- add: new API for use with misbehaving sync-contexts ([more info](https://stackexchange.github.io/StackExchange.Redis/ThreadTheft)) -- add: `TOUCH` support (#1291 via gkorland) -- add: `Condition` API (transactions) now supports `SortedSetLengthEqual` (#1332 via phosphene47) -- add: `SocketManager` is now more configurable (#1115, via naile) -- add: NRediSearch updated in line with JRediSearch (#1267, via tombatron; #1199 via oruchreis) -- add: support for `CheckCertificatRevocation` configuration (#1234, via BLun78 and V912736) -- add: more details about exceptions (#1190, via marafiq) -- add: new stream APIs (#1141 and #1154 via ttingen) -- add: event-args now mockable (#1326 via n1l) -- fix: no-op when adding 0 values to a set (#1283 via omeaart) -- add: support for `LATENCY` and `MEMORY` (#1204) -- add: support for `HSTRLEN` (#1241 via eitanhs) -- add: `GeoRadiusResult` is now mockable (#1175 via firenero) -- fix: various documentation fixes (#1162, #1135, #1203, #1240, #1245, #1159, #1311, #1339, #1336) -- fix: rare race-condition around exception data (#1342) -- fix: `ScriptEvaluateAsync` keyspace isolation (#1377 via gliljas) -- fix: F# compatibility enhancements (#1386) -- fix: improved `ScriptResult` null support (#1392) -- fix: error with DNS resolution breaking endpoint iterator (#1393) -- tests: better docker support for tests (#1389 via ejsmith; #1391) -- tests: general test improvements (#1183, #1385, #1384) +- Fix: Ensure active-message is cleared ([#1374 by hamish-omny](https://github.com/StackExchange/StackExchange.Redis/pull/1374)) +- Adds: Sentinel support ([#1067 by shadim](https://github.com/StackExchange/StackExchange.Redis/pull/1067), [#692 by lexxdark](https://github.com/StackExchange/StackExchange.Redis/pull/692)) +- Adds: `IAsyncEnumerable` scanning APIs now supported ([#1087 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/1087)) +- Adds: New API for use with misbehaving sync-contexts ([more info](https://stackexchange.github.io/StackExchange.Redis/ThreadTheft)) +- Adds: `TOUCH` support ([#1291 by gkorland](https://github.com/StackExchange/StackExchange.Redis/pull/1291)) +- Adds: `Condition` API (transactions) now supports `SortedSetLengthEqual` ([#1332 by phosphene47](https://github.com/StackExchange/StackExchange.Redis/pull/1332)) +- Adds: `SocketManager` is now more configurable ([#1115 by naile](https://github.com/StackExchange/StackExchange.Redis/pull/1115)) +- Adds: NRediSearch updated in line with JRediSearch ([#1267 by tombatron](https://github.com/StackExchange/StackExchange.Redis/pull/1267), [#1199 by oruchreis](https://github.com/StackExchange/StackExchange.Redis/pull/1199)) +- Adds: Support for `CheckCertificatRevocation` configuration ([#1234 by BLun78 and V912736](https://github.com/StackExchange/StackExchange.Redis/pull/1234)) +- Adds: More details about exceptions ([#1190 by marafiq](https://github.com/StackExchange/StackExchange.Redis/pull/1190)) +- Adds: Updated `StreamCreateConsumerGroup` methods to use the `MKSTREAM` option ([#1141 via ttingen](https://github.com/StackExchange/StackExchange.Redis/pull/1141)) +- Adds: Support for NOACK in the StreamReadGroup methods ([#1154 by ttingen](https://github.com/StackExchange/StackExchange.Redis/pull/1154)) +- Adds: Event-args now mockable ([#1326 by n1l](https://github.com/StackExchange/StackExchange.Redis/pull/1326)) +- Fix: No-op when adding 0 values to a set ([#1283 by omeaart](https://github.com/StackExchange/StackExchange.Redis/pull/1283)) +- Adds: Support for `LATENCY` and `MEMORY` ([#1204 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/1204)) +- Adds: Support for `HSTRLEN` ([#1241 by eitanhs](https://github.com/StackExchange/StackExchange.Redis/pull/1241)) +- Adds: `GeoRadiusResult` is now mockable ([#1175 by firenero](https://github.com/StackExchange/StackExchange.Redis/pull/1175)) +- Fix: Various documentation fixes ([#1162 by SnakyBeaky](https://github.com/StackExchange/StackExchange.Redis/pull/1162), [#1135 by ttingen](https://github.com/StackExchange/StackExchange.Redis/pull/1135), [#1203 by caveman-dick](https://github.com/StackExchange/StackExchange.Redis/pull/1203), [#1240 by Excelan](https://github.com/StackExchange/StackExchange.Redis/pull/1240), [#1245 by francoance](https://github.com/StackExchange/StackExchange.Redis/pull/1245), [#1159 by odyth](https://github.com/StackExchange/StackExchange.Redis/pull/1159), [#1311 by DillonAd](https://github.com/StackExchange/StackExchange.Redis/pull/1311), [#1339 by vp89](https://github.com/StackExchange/StackExchange.Redis/pull/1339), [#1336 by ERGeorgiev](https://github.com/StackExchange/StackExchange.Redis/issues/1336)) +- Fix: Rare race-condition around exception data ([#1342 by AdamOutcalt](https://github.com/StackExchange/StackExchange.Redis/pull/1342)) +- Fix: `ScriptEvaluateAsync` keyspace isolation ([#1377 by gliljas](https://github.com/StackExchange/StackExchange.Redis/pull/1377)) +- Fix: F# compatibility enhancements ([#1386 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/1386)) +- Fix: Improved `ScriptResult` null support ([#1392 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/1392)) +- Fix: Error with DNS resolution breaking endpoint iterator ([#1393 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/1393)) +- Tests: Better docker support for tests ([#1389 by ejsmith](https://github.com/StackExchange/StackExchange.Redis/pull/1389), [#1391 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/1391)) +- Tests: General test improvements ([#1183 by mtreske](https://github.com/StackExchange/StackExchange.Redis/issues/1183), [#1385 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/1385), [#1384 by NickCraver](https://github.com/StackExchange/StackExchange.Redis/pull/1384)) ## 2.0.601 -- add: tracking for current and next messages to help with debugging timeout issues - helpful in cases of large pipeline blockers +- Adds: Tracking for current and next messages to help with debugging timeout issues - helpful in cases of large pipeline blockers ## 2.0.600 -- add: `ulong` support to `RedisValue` and `RedisResult` (#1103) -- fix: remove odd equality: `"-" != 0` (we do, however, still allow `"-0"`, as that is at least semantically valid, and is logically `== 0`) (related to #1103) -- performance: rework how pub/sub queues are stored - reduces delegate overheads (related to #1101) -- fix #1108 - ensure that we don't try appending log data to the `TextWriter` once we've returned from a method that accepted one +- Adds: `ulong` support to `RedisValue` and `RedisResult` ([#1104 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/1104)) +- Fix: Remove odd equality: `"-" != 0` (we do, however, still allow `"-0"`, as that is at least semantically valid, and is logically `== 0`) (related to [#1103](https://github.com/StackExchange/StackExchange.Redis/issues/1103)) +- Performance: Rework how pub/sub queues are stored - reduces delegate overheads (related to [#1101](https://github.com/StackExchange/StackExchange.Redis/issues/1101)) +- Fix [#1108](https://github.com/StackExchange/StackExchange.Redis/issues/1108): Ensure that we don't try appending log data to the `TextWriter` once we've returned from a method that accepted one ## 2.0.593 -- performance: unify spin-wait usage on sync/async paths to one competitor -- fix #1101 - when a `ChannelMessageQueue` is involved, unsubscribing *via any route* should still unsubscribe and mark the queue-writer as complete +- Performance: Unify spin-wait usage on sync/async paths to one competitor +- Fix [#1101](https://github.com/StackExchange/StackExchange.Redis/issues/1101): When a `ChannelMessageQueue` is involved, unsubscribing *via any route* should still unsubscribe and mark the queue-writer as complete ## 2.0.588 -- stability and performance: resolve intermittent stall in the write-lock that could lead to unexpected timeouts even when at low/reasonable (but concurrent) load +- Stability/Performance: Resolve intermittent stall in the write-lock that could lead to unexpected timeouts even when at low/reasonable (but concurrent) load ## 2.0.571 -- performance: use new [arena allocation API](https://mgravell.github.io/Pipelines.Sockets.Unofficial/docs/arenas) to avoid `RawResult[]` overhead -- performance: massively simplified how `ResultBox` is implemented, in particular to reduce `TaskCompletionSource` allocations -- performance: fix sync-over-async issue with async call paths, and fix the [SemaphoreSlim](https://blog.marcgravell.com/2019/02/fun-with-spiral-of-death.html) problems that this uncovered -- performance: re-introduce the unsent backlog queue, in particular to improve async performance -- performance: simplify how completions are reactivated, so that external callers use their originating pool, not the dedicated IO pools (prevent thread stealing) -- fix: update Pipelines.Sockets.Unofficial to prevent issue with incorrect buffer re-use in corner-case -- fix: `KeyDeleteAsync` could, in some cases, always use `DEL` (instead of `UNLINK`) -- fix: last unanswered write time was incorrect -- change: use higher `Pipe` thresholds when sending +- Performance: Use new [arena allocation API](https://mgravell.github.io/Pipelines.Sockets.Unofficial/docs/arenas) to avoid `RawResult[]` overhead +- Performance: Massively simplified how `ResultBox` is implemented, in particular to reduce `TaskCompletionSource` allocations +- Performance: Fix sync-over-async issue with async call paths, and fix the [SemaphoreSlim](https://blog.marcgravell.com/2019/02/fun-with-spiral-of-death.html) problems that this uncovered +- Performance: Reintroduce the unsent backlog queue, in particular to improve async performance +- Performance: Simplify how completions are reactivated, so that external callers use their originating pool, not the dedicated IO pools (prevent thread stealing) +- Fix: Update `Pipelines.Sockets.Unofficial` to prevent issue with incorrect buffer re-use in corner-case +- Fix: `KeyDeleteAsync` could, in some cases, always use `DEL` (instead of `UNLINK`) +- Fix: Last unanswered write time was incorrect +- Change: Use higher `Pipe` thresholds when sending ## 2.0.519 -- adapt to late changes in the RC streams API (#983, #1007) -- documentation fixes (#997, #1005) -- build: switch to SDK 2.1.500 +- Fix [#1007](https://github.com/StackExchange/StackExchange.Redis/issues/1007): Adapt to late changes in the RC streams API ([#983 by mgravell](https://github.com/StackExchange/StackExchange.Redis/pull/983)) +- Documentation fixes ([#997 by MerelyRBLX](https://github.com/StackExchange/StackExchange.Redis/pull/997), [#1005 by zBrianW](https://github.com/StackExchange/StackExchange.Redis/pull/1005)) +- Build: Switch to SDK 2.1.500 ## 2.0.513 -- fix #961 - fix assembly binding redirect problems; IMPORTANT: this drops to an older `System.Buffers` version - if you have manually added redirects for `4.0.3.0`, you may need to manually update to `4.0.2.0` (or remove completely) -- fix #962 - avoid NRE in edge-case when fetching bridge +- Fix [#961](https://github.com/StackExchange/StackExchange.Redis/issues/962): fix assembly binding redirect problems; IMPORTANT: this drops to an older `System.Buffers` version - if you have manually added redirects for `4.0.3.0`, you may need to manually update to `4.0.2.0` (or remove completely) +- Fix [#962](https://github.com/StackExchange/StackExchange.Redis/issues/962): Avoid NRE in edge-case when fetching bridge ## 2.0.505 -- fix #943 - ensure transaction inner tasks are completed prior to completing the outer transaction task -- fix #946 - reinstate missing `TryParse` methods on `RedisValue` -- fix #940 - off-by-one on pre-boxed integer cache (NRediSearch) +- Fix [#943](https://github.com/StackExchange/StackExchange.Redis/issues/943): Ensure transaction inner tasks are completed prior to completing the outer transaction task +- Fix [#946](https://github.com/StackExchange/StackExchange.Redis/issues/946): Reinstate missing `TryParse` methods on `RedisValue` +- Fix [#940](https://github.com/StackExchange/StackExchange.Redis/issues/940): Off-by-one on pre-boxed integer cache (NRediSearch) ## 2.0.495 -- 2.0 is a large - and breaking - change +2.0 is a large - and breaking - change. The key focus of this release is stability and reliability. -The key focus of this release is stability and reliability. - -- HARD BREAK: the package identity has changed; instead of `StackExchange.Redis` (not strong-named) and `StackExchange.Redis.StrongName` (strong-named), we are now +- **Hard Break**: The package identity has changed; instead of `StackExchange.Redis` (not strong-named) and `StackExchange.Redis.StrongName` (strong-named), we are now only releasing `StackExchange.Redis` (strong-named). This is a binary breaking change that requires consumers to be re-compiled; it cannot be applied via binding-redirects -- HARD BREAK: the platform targets have been rationalized - supported targets are .NETStandard 2.0 (and above), .NETFramework 4.6.1 (and above), and .NETFramework 4.7.2 (and above) +- **Hard Break**: The platform targets have been rationalized - supported targets are .NETStandard 2.0 (and above), .NETFramework 4.6.1 (and above), and .NETFramework 4.7.2 (and above) (note - the last two are mainly due to assembly binding problems) -- HARD BREAK: the profiling API has been overhauled and simplified; full documentation is [provided here](https://stackexchange.github.io/StackExchange.Redis/Profiling_v2.html) -- SOFT BREAK: the `PreserveAsyncOrder` behaviour of the pub/sub API has been deprecated; a *new* API has been provided for scenarios that require in-order pub/sub handling - - the `Subscribe` method has a new overload *without* a handler parameter which returns a `ChannelMessageQueue`, which provides `async` ordered access to messsages) -- internal: the network architecture has moved to use `System.IO.Pipelines`; this has allowed us to simplify and unify a lot of the network code, and in particular - fix a lot of problems relating to how the library worked with TLS and/or .NETStandard -- change: as a result of the `System.IO.Pipelines` change, the error-reporting on timeouts is now much simpler and clearer; the [timeouts documentation](Timeouts.md) has been updated -- removed: the `HighPriority` (queue-jumping) flag is now deprecated -- internal: most buffers internally now make use of pooled memory; `RedisValue` no longer pre-emptively allocates buffers -- internal: added new custom thread-pool for handling async continuations to avoid thread-pool starvation issues -- internal: all IL generation has been removed; the library should now work on platforms that do not allow runtime-emit -- added: asynchronous operations now have full support for reporting timeouts -- added: new APIs now exist to work with pooled memory without allocations - `RedisValue.CreateFrom(MemoryStream)` and `operator` support for `Memory` and `ReadOnlyMemory`; and `IDatabase.StringGetLease[Async](...)`, `IDatabase.HashGetLease[Async](...)`, `Lease.AsStream()`) -- added: ["streams"](https://redis.io/topics/streams-intro) support (thanks to [ttingen](https://github.com/ttingen) for their contribution) -- various missing commands / overloads have been added; `Execute[Async]` for additional commands is now available on `IServer` -- fix: a *lot* of general bugs and issues have been resolved -- ACCIDENTAL BREAK: `RedisValue.TryParse` was accidentally ommitted in the overhaul; this has been rectified and will be available in the next build - -a more complete list of issues addressed can be seen in [this tracking issue](https://github.com/StackExchange/StackExchange.Redis/issues/871) - -Note: we currently have no plans to do an additional 1.* release. In particular, even though there was a `1.2.7-alpha` build on nuget, we *do not* currently have -plans to release `1.2.7`. +- **Hard Break**: The profiling API has been overhauled and simplified; full documentation is [provided here](https://stackexchange.github.io/StackExchange.Redis/Profiling_v2.html) +- **Soft Break**: The `PreserveAsyncOrder` behaviour of the pub/sub API has been deprecated; a *new* API has been provided for scenarios that require in-order pub/sub handling - + the `Subscribe` method has a new overload *without* a handler parameter which returns a `ChannelMessageQueue`, which provides `async` ordered access to messages) +- Internal: The network architecture has moved to use `System.IO.Pipelines`; this has allowed us to simplify and unify a lot of the network code, and in particular fix a lot of problems relating to how the library worked with TLS and/or .NETStandard +- Change: As a result of the `System.IO.Pipelines` change, the error-reporting on timeouts is now much simpler and clearer; the [timeouts documentation](Timeouts.md) has been updated +- Removed: The `HighPriority` (queue-jumping) flag is now deprecated +- Internal: Most buffers internally now make use of pooled memory; `RedisValue` no longer preemptively allocates buffers +- Internal: Added new custom thread-pool for handling async continuations to avoid thread-pool starvation issues +- Internal: All IL generation has been removed; the library should now work on platforms that do not allow runtime-emit +- Adds: asynchronous operations now have full support for reporting timeouts +- Adds: new APIs now exist to work with pooled memory without allocations - `RedisValue.CreateFrom(MemoryStream)` and `operator` support for `Memory` and `ReadOnlyMemory`; and `IDatabase.StringGetLease[Async](...)`, `IDatabase.HashGetLease[Async](...)`, `Lease.AsStream()`) +- Adds: ["streams"](https://redis.io/topics/streams-intro) support (thanks to [ttingen](https://github.com/ttingen) for their contribution) +- Adds: Various missing commands / overloads have been added; `Execute[Async]` for additional commands is now available on `IServer` +- Fix: A *lot* of general bugs and issues have been resolved +- **Break**: `RedisValue.TryParse` was accidentally omitted in the overhaul; this has been rectified and will be available in the next build + +A more complete list of issues addressed can be seen in [this tracking issue](https://github.com/StackExchange/StackExchange.Redis/issues/871) + +Note: we currently have no plans to do an additional `1.*` release. In particular, even though there was a `1.2.7-alpha` build on nuget, we *do not* currently have plans to release `1.2.7`. --- ## 1.2.6 -- fix change to `cluster nodes` output when using cluster-enabled target and 4.0+ (see [redis #4186](https://github.com/antirez/redis/issues/4186) +- Change: `cluster nodes` output when using cluster-enabled target and 4.0+ (see [redis #4186](https://github.com/antirez/redis/issues/4186) ## 1.2.5 -- critical fix: "poll mode" was disabled in the build for net45/net60 - impact: IO jams and lack of reader during high load +- (Critical) Fix: "poll mode" was disabled in the build for `net45`/`net46` - Impact: IO jams and lack of reader during high load ## 1.2.4 -- fix: incorrect build configuration (#649) +- Fix: Incorrect build configuration ([#649 by jrlost](https://github.com/StackExchange/StackExchange.Redis/issues/649)) ## 1.2.3 -- fix: when using `redis-cluster` with multiple replicas, use round-robin when selecting replica (#610) -- add: can specify `NoScriptCache` flag when using `ScriptEvaluate` to bypass all cache features (always uses `EVAL` instead of `SCRIPT LOAD` and `EVALSHA`) (#617) +- Fix: When using `redis-cluster` with multiple replicas, use round-robin when selecting replica ([#610 by mgravell](https://github.com/StackExchange/StackExchange.Redis/issues/610)) +- Adds: Can specify `NoScriptCache` flag when using `ScriptEvaluate` to bypass all cache features (always uses `EVAL` instead of `SCRIPT LOAD` and `EVALSHA`) ([#617 by Funbit](https://github.com/StackExchange/StackExchange.Redis/issues/617)) -## 1.2.2 (preview): +## 1.2.2 (preview) -- **UNAVAILABLE**: .NET 4.0 support is not in this build, due to [a build issue](https://github.com/dotnet/cli/issues/5993) - looking into solutions -- add: make performance-counter tracking opt-in (`IncludePerformanceCountersInExceptions`) as it was causing problems (#587) -- add: can now specifiy allowed SSL/TLS protocols (#603) -- add: track message status in exceptions (#576) -- add: `GetDatabase()` optimization for DB 0 and low numbered databases: `IDatabase` instance is retained and recycled (as long as no `asyncState` is provided) -- improved connection retry policy (#510, #572) -- add `Execute`/`ExecuteAsync` API to support "modules"; [more info](http://blog.marcgravell.com/2017/04/stackexchangeredis-and-redis-40-modules.html) -- fix: timeout link fixed re /docs change (below) +- **Break**: .NET 4.0 support is not in this build, due to [a build issue](https://github.com/dotnet/cli/issues/5993) - looking into solutions +- Adds: Make performance-counter tracking opt-in (`IncludePerformanceCountersInExceptions`) as it was causing problems ([#587 by AlexanderKot](https://github.com/StackExchange/StackExchange.Redis/issues/587)) +- Adds: Can now specifiy allowed SSL/TLS protocols ([#603 by JonCole](https://github.com/StackExchange/StackExchange.Redis/pull/603)) +- Adds: Track message status in exceptions ([#576 by deepakverma](https://github.com/StackExchange/StackExchange.Redis/pull/576)) +- Adds: `GetDatabase()` optimization for DB 0 and low numbered databases: `IDatabase` instance is retained and recycled (as long as no `asyncState` is provided) +- Performance: Improved connection retry policy ([#510 by deepakverma](https://github.com/StackExchange/StackExchange.Redis/pull/510), [#572 by deepakverma](https://github.com/StackExchange/StackExchange.Redis/pull/572)) +- Adds: `Execute`/`ExecuteAsync` API to support "modules"; [more info](https://blog.marcgravell.com/2017/04/stackexchangeredis-and-redis-40-modules.html) +- Fix: Timeout link fixed re /docs change (below) - [`NRediSearch`](https://www.nuget.org/packages/NRediSearch/) added as exploration into "modules" - -Other changes (not library related) - -- (project) refactor /docs for github pages -- improve release note tracking -- rework build process to use csproj +- Other changes (not library related) + - Change: Refactor /docs for github pages + - Change: Improve release note tracking + - Build: Rework build process to use csproj ## 1.2.1 -- fix: avoid overlapping per-endpoint heartbeats - -## 1.2.0 - -- (same as 1.2.0-alpha1) +- Fix: Avoid overlapping per-endpoint heartbeats -## 1.2.0-alpha1 +## 1.2.0 (same as 1.2.0-alpha1) -- add: GEO commands (#489) -- add: ZADD support for new NX/XX switches (#520) -- add: core-clr preview support improvements +- Adds: GEO commands ([#489 by wjdavis5](https://github.com/StackExchange/StackExchange.Redis/pull/489)) +- Adds: ZADD support for new NX/XX switches ([#520 by seniorquico](https://github.com/StackExchange/StackExchange.Redis/pull/520)) +- Adds: core-clr preview support improvements ## 1.1.608 -- fix: bug with race condition in servers indexer (related: 1.1.606) +- Fix: Bug with race condition in servers indexer (related: 1.1.606) ## 1.1.607 -- fix: ensure socket-mode polling is enabled (.net) +- Fix: Ensure socket-mode polling is enabled (.net) ## 1.1.606 -- fix: bug with race condition in servers indexer +- Fix: Bug with race condition in servers indexer -## and the rest +## ...and the rest -(I'm happy to take PRs for change history going back in time) +(We're happy to take PRs for change history going back in time or any fixes here!) diff --git a/docs/Resp3.md b/docs/Resp3.md new file mode 100644 index 000000000..126b460f4 --- /dev/null +++ b/docs/Resp3.md @@ -0,0 +1,45 @@ +# RESP3 and StackExchange.Redis + +RESP2 and RESP3 are evolutions of the Redis protocol, with RESP3 existing from Redis server version 6 onwards (v7.2+ for Redis Enterprise). The main differences are: + +1. RESP3 can carry out-of-band / "push" messages on a single connection, where-as RESP2 requires a separate connection for these messages +2. RESP3 can (when appropriate) convey additional semantic meaning about returned payloads inside the same result structure +3. Some commands (see [this topic](https://github.com/redis/redis-doc/issues/2511)) return different result structures in RESP3 mode; for example a flat interleaved array might become a jagged array + +For most people, #1 is the main reason to consider RESP3, as in high-usage servers - this can halve the number of connections required. +This is particularly useful in hosted environments where the number of inbound connections to the server is capped as part of a service plan. +Alternatively, where users are currently choosing to disable the out-of-band connection to achieve this, they may now be able to re-enable this +(for example, to receive server maintenance notifications) *without* incurring any additional connection overhead. + +Because of the significance of #3 (and to avoid breaking your code), the library does not currently default to RESP3 mode. This must be enabled explicitly +via `ConfigurationOptions.Protocol` or by adding `,protocol=resp3` (or `,protocol=3`) to the configuration string. + +--- + +#3 is a critical one - the library *should* already handle all documented commands that have revised results in RESP3, but if you're using +`Execute[Async]` to issue ad-hoc commands, you may need to update your processing code to compensate for this, ideally using detection to handle +*either* format so that the same code works in both REP2 and RESP3. Since the impacted commands are handled internally by the library, in reality +this should not usually present a difficulty. + +The minor (#2) and major (#3) differences to results are only visible to your code when using: + +- Lua scripts invoked via the `ScriptEvaluate[Async](...)` or related APIs, that either: + - Uses the `redis.setresp(3)` API and returns a value from `redis.[p]call(...)` + - Returns a value that satisfies the [LUA to RESP3 type conversion rules](https://redis.io/docs/manual/programmability/lua-api/#lua-to-resp3-type-conversion) +- Ad-hoc commands (in particular: *modules*) that are invoked via the `Execute[Async](string command, ...)` API + +...both which return `RedisResult`. **If you are not using these APIs, you should not need to do anything additional.** + +Historically, you could use the `RedisResult.Type` property to query the type of data returned (integer, string, etc). In particular: + +- Two new properties are added: `RedisResult.Resp2Type` and `RedisResult.Resp3Type` + - The `Resp3Type` property exposes the new semantic data (when using RESP3) - for example, it can indicate that a value is a double-precision number, a boolean, a map, etc (types that did not historically exist) + - The `Resp2Type` property exposes the same value that *would* have been returned if this data had been returned over RESP2 + - The `Type` property is now marked obsolete, but functions identically to `Resp2Type`, so that pre-existing code (for example, that has a `switch` on the type) is not impacted by RESP3 +- The `ResultType.MultiBulk` is superseded by `ResultType.Array` (this is a nomenclature change only; they are the same value and function identically) + +Possible changes required due to RESP3: + +1. To prevent build warnings, replace usage of `ResultType.MultiBulk` with `ResultType.Array`, and usage of `RedisResult.Type` with `RedisResult.Resp2Type` +2. If you wish to exploit the additional semantic data when enabling RESP3, use `RedisResult.Resp3Type` where appropriate +3. If you are enabling RESP3, you must verify whether the commands you are using can give different result shapes on RESP3 connections \ No newline at end of file diff --git a/docs/RespLogging.md b/docs/RespLogging.md new file mode 100644 index 000000000..3ff3b0164 --- /dev/null +++ b/docs/RespLogging.md @@ -0,0 +1,150 @@ +Logging and validating the underlying RESP stream +=== + +Sometimes (rarely) there is a question over the validity of the RESP stream from a server (especially when using proxies +or a "redis-like-but-not-actually-redis" server), and it is hard to know whether the *data sent* was bad, vs +the client library tripped over the data. + +To help with this, an experimental API exists to help log and validate RESP streams. This API is not intended +for routine use (and may change at any time), but can be useful for diagnosing problems. + +For example, consider we have the following load test which (on some setup) causes a failure with some +degree of reliability (even if you need to run it 6 times to see a failure): + +``` c# +// connect +Console.WriteLine("Connecting..."); +var options = ConfigurationOptions.Parse(ConnectionString); +await using var muxer = await ConnectionMultiplexer.ConnectAsync(options); +var db = muxer.GetDatabase(); + +// load +RedisKey testKey = "marc_abc"; +await db.KeyDeleteAsync(testKey); +Console.WriteLine("Writing..."); +for (int i = 0; i < 100; i++) +{ + // sync every 50 iterations (pipeline the rest) + var flags = (i % 50) == 0 ? CommandFlags.None : CommandFlags.FireAndForget; + await db.SetAddAsync(testKey, Guid.NewGuid().ToString(), flags); +} + +// fetch +Console.WriteLine("Reading..."); +int count = 0; +for (int i = 0; i < 10; i++) +{ + // this is deliberately not using SCARD + // (to put load on the inbound) + count += (await db.SetMembersAsync(testKey)).Length; +} +Console.WriteLine("all done"); +``` + +## Logging RESP streams + +When this fails, it will not be obvious exactly who is to blame. However, we can ask for the data streams +to be logged to the local file-system. + +**Obviously, this may leave data on disk, so this may present security concerns if used with production data; use +this feature sparingly, and clean up after yourself!** + +``` c# +// connect +Console.WriteLine("Connecting..."); +var options = ConfigurationOptions.Parse(ConnectionString); +LoggingTunnel.LogToDirectory(options, @"C:\Code\RedisLog"); // <=== added! +await using var muxer = await ConnectionMultiplexer.ConnectAsync(options); +... +``` + +This API is marked `[Obsolete]` simply to discourage usage, but you can ignore this warning once you +understand what it is saying (using `#pragma warning disable CS0618` if necessary). + +This will update the `ConfigurationOptions` with a custom `Tunnel` that performs file-based mirroring +of the RESP streams. If `Ssl` is enabled on the `ConfigurationOptions`, the `Tunnel` will *take over that responsibility* +(so that the unencrypted data can be logged), and will *disable* `Ssl` on the `ConfigurationOptions` - but TLS +will still be used correctly. + +If we run our code, we will see that 2 files are written per connection ("in" and "out"); if you are using RESP2 (the default), +then 2 connections are usually established (one for regular "interactive" commands, and one for pub/sub messages), so this will +typically create 4 files. + +## Validating RESP streams + +RESP is *mostly* text, so a quick eyeball can be achieved using any text tool; an "out" file will typically start: + +``` txt +$6 +CLIENT +$7 +SETNAME +... +``` + +and an "in" file will typically start: + +``` txt ++OK ++OK ++OK +... +``` + +This is the start of the handshakes for identifying the client to the redis server, and the server acknowledging this (if +you have authentication enabled, there will be a `AUTH` command first, or `HELLO` on RESP3). + +If there is a failure, you obviously don't want to manually check these files. Instead, an API exists to validate RESP streams: + +``` c# +var messages = await LoggingTunnel.ValidateAsync(@"C:\Code\RedisLog"); +Console.WriteLine($"{messages} RESP fragments validated"); +``` + +If the RESP streams are *not* valid, an exception will provide further details. + +**An exception here is strong evidence that there is a fault either in the redis server, or an intermediate proxy**. + +Conversely, if the library reported a protocol failure but the validation step here *does not* report an error, then +that is strong evidence of a library error; [**please report this**](https://github.com/StackExchange/StackExchange.Redis/issues/new) (with details). + +You can also *replay* the conversation locally, seeing the individual requests and responses: + +``` c# +var messages = await LoggingTunnel.ReplayAsync(@"C:\Code\RedisLog", (cmd, resp) => +{ + if (cmd.IsNull) + { + // out-of-band/"push" response + Console.WriteLine("<< " + LoggingTunnel.DefaultFormatResponse(resp)); + } + else + { + Console.WriteLine(" > " + LoggingTunnel.DefaultFormatCommand(cmd)); + Console.WriteLine(" < " + LoggingTunnel.DefaultFormatResponse(resp)); + } +}); +Console.WriteLine($"{messages} RESP commands validated"); +``` + +The `DefaultFormatCommand` and `DefaultFormatResponse` methods are provided for convenience, but you +can perform your own formatting logic if required. If a RESP erorr is encountered in the response to +a particular message, the callback will still be invoked to indicate that error. For example, after deliberately +introducing an error into the captured file, we might see: + +``` txt + > CLUSTER NODES + < -ERR This instance has cluster support disabled + > GET __Booksleeve_TieBreak + < (null) + > ECHO ... + < -Invalid bulk string terminator +Unhandled exception. StackExchange.Redis.RedisConnectionException: Invalid bulk string terminator +``` + +The `-ERR` message is not a problem - that's normal and simply indicates that this is not a redis cluster; however, the +final pair is an `ECHO` request, for which the corresponding response was invalid. This information is useful for finding +out what happened. + +Emphasis: this API is not intended for common/frequent usage; it is intended only to assist validating the underlying +RESP stream. \ No newline at end of file diff --git a/docs/Scripting.md b/docs/Scripting.md index e04b2d19a..56af7afc1 100644 --- a/docs/Scripting.md +++ b/docs/Scripting.md @@ -1,24 +1,23 @@ Scripting === -Basic [Lua scripting](http://redis.io/commands/EVAL) is supported by the `IServer.ScriptLoad(Async)`, `IServer.ScriptExists(Async)`, `IServer.ScriptFlush(Async)`, `IDatabase.ScriptEvaluate`, and `IDatabaseAsync.ScriptEvaluateAsync` methods. +Basic [Lua scripting](https://redis.io/commands/EVAL) is supported by the `IServer.ScriptLoad(Async)`, `IServer.ScriptExists(Async)`, `IServer.ScriptFlush(Async)`, `IDatabase.ScriptEvaluate`, and `IDatabaseAsync.ScriptEvaluateAsync` methods. These methods expose the basic commands necessary to submit and execute Lua scripts to redis. -More sophisticated scripting is available through the `LuaScript` class. The `LuaScript` class makes it simpler to prepare and submit parameters along with a script, as well as allowing you to use -cleaner variables names. +More sophisticated scripting is available through the `LuaScript` class. The `LuaScript` class makes it simpler to prepare and submit parameters along with a script, as well as allowing you to use cleaner variables names. An example use of the `LuaScript`: -``` - const string Script = "redis.call('set', @key, @value)"; +```csharp +const string Script = "redis.call('set', @key, @value)"; - using (ConnectionMultiplexer conn = /* init code */) - { - var db = conn.GetDatabase(0); +using (ConnectionMultiplexer conn = /* init code */) +{ + var db = conn.GetDatabase(0); - var prepared = LuaScript.Prepare(Script); - db.ScriptEvaluate(prepared, new { key = (RedisKey)"mykey", value = 123 }); - } + var prepared = LuaScript.Prepare(Script); + db.ScriptEvaluate(prepared, new { key = (RedisKey)"mykey", value = 123 }); +} ``` The `LuaScript` class rewrites variables in scripts of the form `@myVar` into the appropriate `ARGV[someIndex]` required by redis. If the @@ -36,24 +35,25 @@ Any object that exposes field or property members with the same name as @-prefix - RedisKey - RedisValue +StackExchange.Redis handles Lua script caching internally. It automatically transmits the Lua script to redis on the first call to 'ScriptEvaluate'. For further calls of the same script only the hash with [`EVALSHA`](https://redis.io/commands/evalsha) is used. -To avoid retransmitting the Lua script to redis each time it is evaluated, `LuaScript` objects can be converted into `LoadedLuaScript`s via `LuaScript.Load(IServer)`. -`LoadedLuaScripts` are evaluated with the [`EVALSHA`](http://redis.io/commands/evalsha), and referred to by hash. +For more control of the Lua script transmission to redis, `LuaScript` objects can be converted into `LoadedLuaScript`s via `LuaScript.Load(IServer)`. +`LoadedLuaScripts` are evaluated with the [`EVALSHA`](https://redis.io/commands/evalsha), and referred to by hash. An example use of `LoadedLuaScript`: -``` - const string Script = "redis.call('set', @key, @value)"; +```csharp +const string Script = "redis.call('set', @key, @value)"; - using (ConnectionMultiplexer conn = /* init code */) - { - var db = conn.GetDatabase(0); - var server = conn.GetServer(/* appropriate parameters*/); +using (ConnectionMultiplexer conn = /* init code */) +{ + var db = conn.GetDatabase(0); + var server = conn.GetServer(/* appropriate parameters*/); - var prepared = LuaScript.Prepare(Script); - var loaded = prepared.Load(server); - loaded.Evaluate(db, new { key = (RedisKey)"mykey", value = 123 }); - } + var prepared = LuaScript.Prepare(Script); + var loaded = prepared.Load(server); + loaded.Evaluate(db, new { key = (RedisKey)"mykey", value = 123 }); +} ``` All methods on both `LuaScript` and `LoadedLuaScript` have Async alternatives, and expose the actual script submitted to redis as the `ExecutableScript` property. diff --git a/docs/Server.md b/docs/Server.md index e236b1f5f..a0777c478 100644 --- a/docs/Server.md +++ b/docs/Server.md @@ -13,7 +13,7 @@ There are multiple ways of running redis on windows: - [Memurai](https://www.memurai.com/) : a fully supported, well-maintained port of redis for Windows (this is a commercial product, with a free developer version available, and free trials) - previous to Memurai, MSOpenTech had a Windows port of linux, but this is no longer maintained and is now very out of date; it is not recommended, but: [here](https://www.nuget.org/packages/redis-64/) -- WSL/WSL2 : on Windows 10, you can run redis for linux in the Windows Subsystem for Linux; note, however, that WSL may have some significant performance implications, and WSL2 appears as a *different* machine (not the local machine), due to running as a VM +- WSL/WSL2 : on Windows 10+, you can run redis for linux in the Windows Subsystem for Linux; note, however, that WSL may have some significant performance implications, and WSL2 appears as a *different* machine (not the local machine), due to running as a VM ## Docker @@ -25,4 +25,5 @@ If you don't want to run your own redis servers, multiple commercial cloud offer - RedisLabs - Azure Redis Cache -- AWS ElastiCache for Redis \ No newline at end of file +- AWS ElastiCache for Redis +- GCP Memorystore for Redis diff --git a/docs/ServerMaintenanceEvent.md b/docs/ServerMaintenanceEvent.md new file mode 100644 index 000000000..2f4ba1c29 --- /dev/null +++ b/docs/ServerMaintenanceEvent.md @@ -0,0 +1,67 @@ +# Introducing ServerMaintenanceEvents + +StackExchange.Redis now automatically subscribes to notifications about upcoming maintenance from supported Redis providers. The ServerMaintenanceEvent on the ConnectionMultiplexer raises events in response to notifications about server maintenance, and application code can subscribe to the event to handle connection drops more gracefully during these maintenance operations. + +If you are a Redis vendor and want to integrate support for ServerMaintenanceEvents into StackExchange.Redis, we recommend opening an issue so we can discuss the details. + +## Types of events + +Azure Cache for Redis currently sends the following notifications: +* `NodeMaintenanceScheduled`: Indicates that a maintenance event is scheduled. Can be 10-15 minutes in advance. +* `NodeMaintenanceStarting`: This event gets fired ~20s before maintenance begins +* `NodeMaintenanceStart`: This event gets fired when maintenance is imminent (<5s) +* `NodeMaintenanceFailoverComplete`: Indicates that a replica has been promoted to primary +* `NodeMaintenanceEnded`: Indicates that the node maintenance operation is over + +## Sample code + +The library will automatically subscribe to the pub/sub channel to receive notifications from the server, if one exists. For Azure Redis caches, this is the 'AzureRedisEvents' channel. To plug in your maintenance handling logic, you can pass in an event handler via the `ServerMaintenanceEvent` event on your `ConnectionMultiplexer`. For example: + +```csharp +multiplexer.ServerMaintenanceEvent += (object sender, ServerMaintenanceEvent e) => +{ + if (e is AzureMaintenanceEvent azureEvent && azureEvent.NotificationType == AzureNotificationType.NodeMaintenanceStart) + { + // Take whatever action is appropriate for your application to handle the maintenance operation gracefully. + // This might mean writing a log entry, redirecting traffic away from the impacted Redis server, or + // something entirely different. + } +}; +``` +You can see the schema for the `AzureMaintenanceEvent` class [here](https://github.com/StackExchange/StackExchange.Redis/blob/main/src/StackExchange.Redis/Maintenance/AzureMaintenanceEvent.cs). Note that the library automatically sets the `ReceivedTimeUtc` timestamp when the event is received, so if you see in your logs that `ReceivedTimeUtc` is after `StartTimeUtc`, this may indicate that your connections are under high load. + +## Walking through a sample maintenance event + +1. App is connected to Redis and everything is working fine. +2. Current Time: [16:21:39] -> `NodeMaintenanceScheduled` event is raised, with a `StartTimeUtc` of 16:35:57 (about 14 minutes from current time). + * Note: the start time for this event is an approximation, because we will start getting ready for the update proactively and the node may become unavailable up to 3 minutes sooner. We recommend listening for `NodeMaintenanceStarting` and `NodeMaintenanceStart` for the highest level of accuracy (these are only likely to differ by a few seconds at most). +3. Current Time: [16:34:26] -> `NodeMaintenanceStarting` message is received, and `StartTimeUtc` is 16:34:46, about 20 seconds from the current time. +4. Current Time: [16:34:46] -> `NodeMaintenanceStart` message is received, so we know the node maintenance is about to happen. We break the circuit and stop sending new operations to the Redis connection. (Note: the appropriate action for your application may be different.) StackExchange.Redis will automatically refresh its view of the overall server topology. +5. Current Time: [16:34:47] -> The connection is closed by the Redis server. +6. Current Time: [16:34:56] -> `NodeMaintenanceFailoverComplete` message is received. This tells us that the replica node has promoted itself to primary, so the other node can go offline for maintenance. +7. Current Time [16:34:56] -> The connection to the Redis server is restored. It is safe to send commands again to the connection and all commands will succeed. +8. Current Time [16:37:48] -> `NodeMaintenanceEnded` message is received, with a `StartTimeUtc` of 16:37:48. Nothing to do here if you are talking to the load balancer endpoint (port 6380 or 6379). For clustered servers, you can resume sending readonly workloads to the replica(s). + +## Azure Cache for Redis Maintenance Event details + +#### NodeMaintenanceScheduled event + +`NodeMaintenanceScheduled` events are raised for maintenance scheduled by Azure, up to 15 minutes in advance. This event will not get fired for user-initiated reboots. + +#### NodeMaintenanceStarting event + +`NodeMaintenanceStarting` events are raised ~20 seconds ahead of upcoming maintenance. This means that one of the primary or replica nodes will be going down for maintenance. + +It's important to understand that this does *not* mean downtime if you are using a Standard/Premier SKU cache. If the replica is targeted for maintenance, disruptions should be minimal. If the primary node is the one going down for maintenance, a failover will occur, which will close existing connections going through the load balancer port (6380/6379) or directly to the node (15000/15001). You may want to pause sending write commands until the replica node has assumed the primary role and the failover is complete. + +#### NodeMaintenanceStart event + +`NodeMaintenanceStart` events are raised when maintenance is imminent (within seconds). These messages do not include a `StartTimeUtc` because they are fired immediately before maintenance occurs. + +#### NodeMaintenanceFailoverComplete event + +`NodeMaintenanceFailoverComplete` events are raised when a replica has promoted itself to primary. These events do not include a `StartTimeUtc` because the action has already occurred. + +#### NodeMaintenanceEnded event + +`NodeMaintenanceEnded` events are raised to indicate that the maintenance operation has completed and that the replica is once again available. You do *NOT* need to wait for this event to use the load balancer endpoint, as it is available throughout. However, we included this for logging purposes and for customers who use the replica endpoint in clusters for read workloads. \ No newline at end of file diff --git a/docs/Streams.md b/docs/Streams.md index c7f278d17..47e82c2b9 100644 --- a/docs/Streams.md +++ b/docs/Streams.md @@ -12,7 +12,7 @@ Use the following to add a simple message with a single name/value pair to a str ```csharp var db = redis.GetDatabase(); -var messageId = db.StreamAdd("event_stream", "foo_name", "bar_value"); +var messageId = db.StreamAdd("events_stream", "foo_name", "bar_value"); // messageId = 1518951480106-0 ``` @@ -34,16 +34,34 @@ var messageId = db.StreamAdd("sensor_stream", values); You also have the option to override the auto-generated message ID by passing your own ID to the `StreamAdd` method. Other optional parameters allow you to trim the stream's length. ```csharp -db.StreamAdd("event_stream", "foo_name", "bar_value", messageId: "0-1", maxLength: 100); +db.StreamAdd("events_stream", "foo_name", "bar_value", messageId: "0-1", maxLength: 100); ``` +Idempotent write-at-most-once production +=== + +From Redis 8.6, streams support idempotent write-at-most-once production. This is achieved by passing a `StreamIdempotentId` to the `StreamAdd` method. Using idempotent ids avoids +duplicate entries in the stream, even in the event of a failure and retry. + +The `StreamIdempotentId` contains a producer id and an optional idempotent id. The producer id should be unique for a given data generator and should be stable and consistent between runs. +The optional idempotent id should be unique for a given data item. If the idempotent id is not provided, the server will generate it from the content of the data item. + +```csharp +// int someUniqueExternalSourceId = ... // optional +var idempotentId = new StreamIdempotentId("ticket_generator"); +// optionally, new StreamIdempotentId("ticket_generator", someUniqueExternalSourceId) +var messageId = db.StreamAdd("events_stream", "foo_name", "bar_value", idempotentId); +``` + +~~~~The `StreamConfigure` method can be used to configure the stream, in particular the IDMP map. The `StreamConfiguration` class has properties for the idempotent producer (IDMP) duration and max-size. + Reading from Streams === Reading from a stream is done by using either the `StreamRead` or `StreamRange` methods. ```csharp -var messages = db.StreamRead("event_stream", "0-0"); +var messages = db.StreamRead("events_stream", "0-0"); ``` The code above will read all messages from the ID `"0-0"` to the end of the stream. You have the option to limit the number of messages returned by using the optional `count` parameter. @@ -53,7 +71,7 @@ The `StreamRead` method also allows you to read from multiple streams at once: ```csharp var streams = db.StreamRead(new StreamPosition[] { - new StreamPosition("event_stream", "0-0"), + new StreamPosition("events_stream", "0-0"), new StreamPosition("score_stream", "0-0") }); @@ -66,13 +84,13 @@ You can limit the number of messages returned per stream by using the `countPerS The `StreamRange` method allows you to return a range of entries within a stream. ```csharp -var messages = db.StreamRange("event_stream", minId: "-", maxId: "+"); +var messages = db.StreamRange("events_stream", minId: "-", maxId: "+"); ``` The `"-"` and `"+"` special characters indicate the smallest and greatest IDs possible. These values are the default values that will be used if no value is passed for the respective parameter. You also have the option to read the stream in reverse by using the `messageOrder` parameter. The `StreamRange` method also provides the ability to limit the number of entries returned by using the `count` parameter. ```csharp -var messages = db.StreamRange("event_stream", +var messages = db.StreamRange("events_stream", minId: "0-0", maxId: "+", count: 100, @@ -85,7 +103,7 @@ Stream Information The `StreamInfo` method provides the ability to read basic information about a stream: its first and last entry, the stream's length, the number of consumer groups, etc. This information can be used to process a stream in a more efficient manner. ```csharp -var info = db.StreamInfo("event_stream"); +var info = db.StreamInfo("events_stream"); Console.WriteLine(info.Length); Console.WriteLine(info.FirstEntry.Id); diff --git a/docs/Testing.md b/docs/Testing.md index f9c812811..52776f3b6 100644 --- a/docs/Testing.md +++ b/docs/Testing.md @@ -4,24 +4,24 @@ Testing Welcome to documentation for the `StackExchange.Redis` test suite! Supported platforms: -- Windows - -...that's it. For now. I'll add Docker files for the instances soon, unless someone's willing to get to it first. The tests (for `netcoreapp`) can run multi-platform. - -**Note: some tests are not yet green, about 20 are failing (~31 in CI)**. A large set of .NET Core, testing, and CI changes just slammed us, we're getting back in action. +- Windows (all tests) +- Other .NET-supported platforms (.NET Core tests) The unit and integration tests here are fairly straightforward. There are 2 primary steps: 1. Start the servers + +This can be done either by installing Docker and running `docker compose up` in the `tests\RedisConfigs` folder or by running the `start-all` script in the same folder. Docker is the preferred method. + 2. Run the tests -Tests default to `127.0.0.1` as their server, however you can override any of the test IPs/Hostnames and ports by placing a `TestConfig.json` in the `StackExchange.Redis.Tests\` folder. This file is intentionally in `.gitignore` already, as it's for *your* personal overrides. This is useful for testing local or remote servers, different versions, various ports, etc. +Tests default to `127.0.0.1` as their server, however you can override any of the test IPs/hostnames and ports by placing a `TestConfig.json` in the `StackExchange.Redis.Tests\` folder. This file is intentionally in `.gitignore` already, as it's for *your* personal overrides. This is useful for testing local or remote servers, different versions, various ports, etc. -You can find all the JSON properties at [TestConfig.cs](https://github.com/StackExchange/StackExchange.Redis/blob/master/tests/StackExchange.Redis.Tests/Helpers/TestConfig.cs). An example override (everything not specified being a default) would look like this: +You can find all the JSON properties at [TestConfig.cs](https://github.com/StackExchange/StackExchange.Redis/blob/main/tests/StackExchange.Redis.Tests/Helpers/TestConfig.cs). An example override (everything not specified being a default) would look like this: ```json { "RunLongRunning": true, - "MasterServer": "192.168.0.42", - "MasterPort": 12345 + "PrimaryServer": "192.168.0.42", + "PrimaryPort": 12345 } ``` Note: if a server isn't specified, the related tests should be skipped as inconclusive. @@ -30,12 +30,4 @@ You can find all the JSON properties at [TestConfig.cs](https://github.com/Stack The tests are run (by default) as part of the build. You can simply run this in the repository root: ```cmd .\build.cmd -BuildNumber local -``` - -To specifically run the tests with far more options, from the repository root: -```cmd -dotnet build -.\RedisConfigs\start-all.cmd -cd StackExchange.Redis.Tests -dotnet xunit -``` +``` \ No newline at end of file diff --git a/docs/ThreadTheft.md b/docs/ThreadTheft.md index 339fac554..d5b8e717e 100644 --- a/docs/ThreadTheft.md +++ b/docs/ThreadTheft.md @@ -3,7 +3,7 @@ If you're here because you followed a link in an exception and you just want your code to work, the short version is: try adding the following *early on* in your application startup: -``` c# +```csharp ConnectionMultiplexer.SetFeatureFlag("preventthreadtheft", true); ``` @@ -40,14 +40,13 @@ be an asynchronous dispatch API. But... not all implementations are equal. Some in particular of `LegacyAspNetSynchronizationContext`, which is what you get if you configure ASP.NET with: - ``` xml ``` -or +or if you do _not_ have a `` of at least 4.5 (which causes the above to default `true`) like this: -``` +```xml ``` diff --git a/docs/Timeouts.md b/docs/Timeouts.md index d608b06e1..ea9830041 100644 --- a/docs/Timeouts.md +++ b/docs/Timeouts.md @@ -10,7 +10,7 @@ it is possible that the reader loop has been hijacked; see [Thread Theft](Thread Are there commands taking a long time to process on the redis-server? --------------- -There can be commands that are taking a long time to process on the redis-server causing the request to timeout. Few examples of long running commands are mget with large number of keys, keys * or poorly written lua script. You can run the SlowLog command to see if there are requests taking longer than expected. More details regarding the command can be found [here](https://redis.io/commands/slowlog). +There can be commands that are taking a long time to process on the redis-server causing the request to timeout. Few examples of long running commands are mget with large number of keys, keys * or poorly written lua script. You can run [the `SLOWLOG` command](https://redis.io/commands/slowlog) to see if there are requests taking longer than expected. More details regarding the command can be found [here](https://redis.io/commands/slowlog). Was there a big request preceding several small requests to the Redis that timed out? --------------- @@ -71,13 +71,15 @@ How to configure this setting: > **Important Note:** the value specified in this configuration element is a *per-core* setting. For example, if you have a 4 core machine and want your minIOThreads setting to be 200 at runtime, you would use ``. - - Outside of ASP.NET, use the [ThreadPool.SetMinThreads(…)](https://docs.microsoft.com/en-us/dotnet/api/system.threading.threadpool.setminthreads?view=netcore-2.0#System_Threading_ThreadPool_SetMinThreads_System_Int32_System_Int32_) API. - -- In .Net Core, add Environment Variable COMPlus_ThreadPool_ForceMinWorkerThreads to overwrite default MinThreads setting, according to [Environment/Registry Configuration Knobs](https://github.com/dotnet/coreclr/blob/master/Documentation/project-docs/clr-configuration-knobs.md) - You can also use the same ThreadPool.SetMinThreads() Method as described above. + - Outside of ASP.NET, use one of the methods described in [Run-time configuration options for threading +](https://docs.microsoft.com/dotnet/core/run-time-config/threading#minimum-threads): + - [ThreadPool.SetMinThreads(…)](https://docs.microsoft.com/dotnet/api/system.threading.threadpool.setminthreads) + - The `ThreadPoolMinThreads` MSBuild property + - The `System.Threading.ThreadPool.MinThreads` setting in your `runtimeconfig.json` Explanation for abbreviations appearing in exception messages --- -By default Redis Timeout exception(s) includes useful information, which can help in uderstanding & diagnosing the timeouts. Some of the abbrivations are as follows: +By default Redis Timeout exception(s) includes useful information, which can help in understanding & diagnosing the timeouts. Some of the abbreviations are as follows: | Abbreviation | Long Name | Meaning | | ------------- | ---------------------- | ---------------------------- | @@ -85,8 +87,8 @@ By default Redis Timeout exception(s) includes useful information, which can hel |qu | Queue-Awaiting-Write : {int}|There are x operations currently waiting in queue to write to the redis server.| |qs | Queue-Awaiting-Response : {int}|There are x operations currently awaiting replies from redis server.| |aw | Active-Writer: {bool}|| -|bw | Backlog-Writer: {enum} | Possible values are Inactive, Started, CheckingForWork, CheckingForTimeout, RecordingTimeout, WritingMessage, Flushing, MarkingInactive, RecordingWriteFailure, RecordingFault,SettingIdle,Faulted| -|rs | Read-State: {enum}|Possible values are NotStarted, Init, RanToCompletion, Faulted, ReadSync, ReadAsync, UpdateWriteTime, ProcessBuffer, MarkProcessed, TryParseResult, MatchResult, PubSubMessage, PubSubPMessage, Reconfigure, InvokePubSub, DequeueResult, ComputeResult, CompletePendingMessage, NA| +|bw | Backlog-Writer: {enum} | Possible values are Inactive, Started, CheckingForWork, CheckingForTimeout, RecordingTimeout, WritingMessage, Flushing, MarkingInactive, RecordingWriteFailure, RecordingFault, SettingIdle, SpinningDown, Faulted| +|rs | Read-State: {enum}|Possible values are NotStarted, Init, RanToCompletion, Faulted, ReadSync, ReadAsync, UpdateWriteTime, ProcessBuffer, MarkProcessed, TryParseResult, MatchResult, PubSubMessage, PubSubSMessage, PubSubPMessage, Reconfigure, InvokePubSub, DequeueResult, ComputeResult, CompletePendingMessage, NA| |ws | Write-State: {enum}| Possible values are Initializing, Idle, Writing, Flushing, Flushed, NA| |in | Inbound-Bytes : {long}|there are x bytes waiting to be read from the input stream from redis| |in-pipe | Inbound-Pipe-Bytes: {long}|Bytes waiting to be read| @@ -94,7 +96,8 @@ By default Redis Timeout exception(s) includes useful information, which can hel |mgr | 8 of 10 available|Redis Internal Dedicated Thread Pool State| |IOCP | IOCP: (Busy=0,Free=500,Min=248,Max=500)| Runtime Global Thread Pool IO Threads. | |WORKER | WORKER: (Busy=170,Free=330,Min=248,Max=500)| Runtime Global Thread Pool Worker Threads.| -|v | Redis Version: version |Current redis version you are currently using in your application.| +|POOL | POOL: (Threads=8,QueuedItems=0,CompletedItems=42,Timers=10)| Thread Pool Work Item Stats.| +|v | Redis Version: version |The `StackExchange.Redis` version you are currently using in your application.| |active | Message-Current: {string} |Included in exception message when `IncludeDetailInExceptions=True` on multiplexer| |next | Message-Next: {string} |When `IncludeDetailInExceptions=True` on multiplexer, it might include command and key, otherwise only command.| |Local-CPU | %CPU or Not Available |When `IncludePerformanceCountersInExceptions=True` on multiplexer, Local CPU %age will be included in exception message. It might not work in all environments where application is hosted. | diff --git a/docs/Transactions.md b/docs/Transactions.md index 8752b26ea..4d8deca27 100644 --- a/docs/Transactions.md +++ b/docs/Transactions.md @@ -1,7 +1,7 @@ Transactions in Redis ===================== -Transactions in Redis are not like transactions in, say a SQL database. The [full documentation is here](http://redis.io/topics/transactions), +Transactions in Redis are not like transactions in, say a SQL database. The [full documentation is here](https://redis.io/topics/transactions), but to paraphrase: A transaction in redis consists of a block of commands placed between `MULTI` and `EXEC` (or `DISCARD` for rollback). Once a `MULTI` @@ -41,14 +41,14 @@ you *can* do is: `WATCH` a key, check data from that key in the normal way, then If, when you check the data, you discover that you don't actually need the transaction, you can use `UNWATCH` to forget all the watched keys. Note that watched keys are also reset during `EXEC` and `DISCARD`. So *at the Redis layer*, this is conceptually: -``` +```lua WATCH {custKey} HEXISTS {custKey} "UniqueId" -(check the reply, then either:) +-- (check the reply, then either:) MULTI HSET {custKey} "UniqueId" {newId} EXEC -(or, if we find there was already an unique-id:) +-- (or, if we find there was already an unique-id:) UNWATCH ``` @@ -98,13 +98,13 @@ bool wasSet = db.HashSet(custKey, "UniqueID", newId, When.NotExists); Lua --- -You should also keep in mind that Redis 2.6 and above [support Lua scripting](http://redis.io/commands/EVAL), a versatile tool for performing multiple operations as a single atomic unit at the server. +You should also keep in mind that Redis 2.6 and above [support Lua scripting](https://redis.io/commands/EVAL), a versatile tool for performing multiple operations as a single atomic unit at the server. Since no other connections are serviced during a Lua script it behaves much like a transaction, but without the complexity of `MULTI` / `EXEC` etc. This also avoids issues such as bandwidth and latency between the caller and the server, but the trade-off is that it monopolises the server for the duration of the script. At the Redis layer (and assuming `HSETNX` did not exist) this could be implemented as: -``` +```lua EVAL "if redis.call('hexists', KEYS[1], 'UniqueId') then return redis.call('hset', KEYS[1], 'UniqueId', ARGV[1]) else return 0 end" 1 {custKey} {newId} ``` diff --git a/docs/VectorSets.md b/docs/VectorSets.md new file mode 100644 index 000000000..9a362ec15 --- /dev/null +++ b/docs/VectorSets.md @@ -0,0 +1,394 @@ +# Redis Vector Sets + +Redis Vector Sets provide efficient storage and similarity search for vector data. SE.Redis provides a strongly-typed API for working with vector sets. + +## Prerequisites + +### Redis Version + +Vector Sets require Redis 8.0 or later. + +## Quick Start + +Note that the vectors used in these examples are small for illustrative purposes. In practice, you would commonly use much +larger vectors. The API is designed to efficiently handle large vectors - in particular, the use of `ReadOnlyMemory` +rather than arrays allows you to work with vectors in "pooled" memory buffers (such as `ArrayPool`), which can be more +efficient than creating arrays - or even working with raw memory for example memory-mapped-files. + +### Adding Vectors + +Add vectors to a vector set using `VectorSetAddAsync`: + +```csharp +var db = conn.GetDatabase(); +var key = "product-embeddings"; + +// Create a vector (e.g., from an ML model) +var vector = new[] { 0.1f, 0.2f, 0.3f, 0.4f }; + +// Add a member with its vector +var request = VectorSetAddRequest.Member("product-123", vector.AsMemory()); +bool added = await db.VectorSetAddAsync(key, request); +``` + +### Adding Vectors with Attributes + +You can attach JSON metadata to vectors for filtering: + +```csharp +var vector = new[] { 0.1f, 0.2f, 0.3f, 0.4f }; +var request = VectorSetAddRequest.Member( + "product-123", + vector.AsMemory(), + attributesJson: """{"category":"electronics","price":299.99}""" +); +await db.VectorSetAddAsync(key, request); +``` + +### Similarity Search + +Find similar vectors using `VectorSetSimilaritySearchAsync`: + +```csharp +// Search by an existing member +var query = VectorSetSimilaritySearchRequest.ByMember("product-123"); +query.Count = 10; +query.WithScores = true; + +using var results = await db.VectorSetSimilaritySearchAsync(key, query); +if (results is not null) +{ + foreach (var result in results.Value.Results) + { + Console.WriteLine($"Member: {result.Member}, Score: {result.Score}"); + } +} +``` + +Or search by a vector directly: + +```csharp +var queryVector = new[] { 0.15f, 0.25f, 0.35f, 0.45f }; +var query = VectorSetSimilaritySearchRequest.ByVector(queryVector.AsMemory()); +query.Count = 10; +query.WithScores = true; + +using var results = await db.VectorSetSimilaritySearchAsync(key, query); +``` + +### Filtered Search + +Use JSON path expressions to filter results: + +```csharp +var query = VectorSetSimilaritySearchRequest.ByVector(queryVector.AsMemory()); +query.Count = 10; +query.FilterExpression = "$.category == 'electronics' && $.price < 500"; +query.WithAttributes = true; // Include attributes in results + +using var results = await db.VectorSetSimilaritySearchAsync(key, query); +``` + +See [Redis filtered search documentation](https://redis.io/docs/latest/develop/data-types/vector-sets/filtered-search/) for filter syntax. + +## Vector Set Operations + +### Getting Vector Set Information + +```csharp +var info = await db.VectorSetInfoAsync(key); +if (info is not null) +{ + Console.WriteLine($"Dimension: {info.Value.Dimension}"); + Console.WriteLine($"Length: {info.Value.Length}"); + Console.WriteLine($"Quantization: {info.Value.Quantization}"); +} +``` + +### Checking Membership + +```csharp +bool exists = await db.VectorSetContainsAsync(key, "product-123"); +``` + +### Removing Members + +```csharp +bool removed = await db.VectorSetRemoveAsync(key, "product-123"); +``` + +### Getting Random Members + +```csharp +// Get a single random member +var member = await db.VectorSetRandomMemberAsync(key); + +// Get multiple random members +var members = await db.VectorSetRandomMembersAsync(key, count: 5); +``` + +## Range Queries + +### Getting Members by Lexicographical Range + +Retrieve members in lexicographical order: + +```csharp +// Get all members +using var allMembers = await db.VectorSetRangeAsync(key); +// ... access allMembers.Span, etc + +// Get members in a specific range +using var rangeMembers = await db.VectorSetRangeAsync( + key, + start: "product-100", + end: "product-200", + count: 50 +); +// ... access rangeMembers.Span, etc + +// Exclude boundaries +using var members = await db.VectorSetRangeAsync( + key, + start: "product-100", + end: "product-200", + exclude: Exclude.Both +); +// ... access members.Span, etc +``` + +### Enumerating Large Result Sets + +For large vector sets, use enumeration to process results in batches: + +```csharp +await foreach (var member in db.VectorSetRangeEnumerateAsync(key, count: 100)) +{ + Console.WriteLine($"Processing: {member}"); +} +``` + +The enumeration of results is done in batches, so that the client does not need to buffer the entire result set in memory; +if you exit the loop early, the client and server will stop processing and sending results. This also supports async cancellation: + +```csharp +using var cts = new CancellationTokenSource(); // cancellation not shown + +await foreach (var member in db.VectorSetRangeEnumerateAsync(key, count: 100) + .WithCancellation(cts.Token)) +{ + // ... +} +``` + +## Advanced Configuration + +### Quantization + +Control vector compression: + +```csharp +var request = VectorSetAddRequest.Member("product-123", vector.AsMemory()); +request.Quantization = VectorSetQuantization.Int8; // Default +// or VectorSetQuantization.None +// or VectorSetQuantization.Binary +await db.VectorSetAddAsync(key, request); +``` + +### Dimension Reduction + +Use projection to reduce vector dimensions: + +```csharp +var request = VectorSetAddRequest.Member("product-123", vector.AsMemory()); +request.ReducedDimensions = 128; // Reduce from original dimension +await db.VectorSetAddAsync(key, request); +``` + +### HNSW Parameters + +Fine-tune the HNSW index: + +```csharp +var request = VectorSetAddRequest.Member("product-123", vector.AsMemory()); +request.MaxConnections = 32; // M parameter (default: 16) +request.BuildExplorationFactor = 400; // EF parameter (default: 200) +await db.VectorSetAddAsync(key, request); +``` + +### Search Parameters + +Control search behavior: + +```csharp +var query = VectorSetSimilaritySearchRequest.ByVector(queryVector.AsMemory()); +query.SearchExplorationFactor = 500; // Higher = more accurate, slower +query.Epsilon = 0.1; // Only return similarity >= 0.9 +query.UseExactSearch = true; // Use linear scan instead of HNSW +await db.VectorSetSimilaritySearchAsync(key, query); +``` + +## Working with Vector Data + +### Retrieving Vectors + +Get the approximate vector for a member: + +```csharp +using var vectorLease = await db.VectorSetGetApproximateVectorAsync(key, "product-123"); +if (vectorLease != null) +{ + ReadOnlySpan vector = vectorLease.Value.Span; + // Use the vector data +} +``` + +### Managing Attributes + +Get and set JSON attributes: + +```csharp +// Get attributes +var json = await db.VectorSetGetAttributesJsonAsync(key, "product-123"); + +// Set attributes +await db.VectorSetSetAttributesJsonAsync( + key, + "product-123", + """{"category":"electronics","updated":"2024-01-15"}""" +); +``` + +### Graph Links + +Inspect HNSW graph connections: + +```csharp +// Get linked members +using var links = await db.VectorSetGetLinksAsync(key, "product-123"); +if (links != null) +{ + foreach (var link in links.Value.Span) + { + Console.WriteLine($"Linked to: {link}"); + } +} + +// Get links with similarity scores +using var linksWithScores = await db.VectorSetGetLinksWithScoresAsync(key, "product-123"); +if (linksWithScores != null) +{ + foreach (var link in linksWithScores.Value.Span) + { + Console.WriteLine($"Linked to: {link.Member}, Score: {link.Score}"); + } +} +``` + +## Memory Management + +Vector operations return `Lease` for efficient memory pooling. Always dispose leases: + +```csharp +// Using statement (recommended) +using var results = await db.VectorSetSimilaritySearchAsync(key, query); + +// Or explicit disposal +var results = await db.VectorSetSimilaritySearchAsync(key, query); +try +{ + // Use results +} +finally +{ + results?.Dispose(); +} +``` + +## Performance Considerations + +### Batch Operations + +For bulk inserts, consider using pipelining: + +```csharp +var batch = db.CreateBatch(); +var tasks = new List>(); + +foreach (var (member, vector) in vectorData) +{ + var request = VectorSetAddRequest.Member(member, vector.AsMemory()); + tasks.Add(batch.VectorSetAddAsync(key, request)); +} + +batch.Execute(); +await Task.WhenAll(tasks); +``` + +### Search Optimization + +- Use **quantization** to reduce memory usage and improve search speed +- Tune **SearchExplorationFactor** based on accuracy vs. speed requirements +- Use **filters** to reduce the search space +- Consider **dimension reduction** for very high-dimensional vectors + +### Range Query Pagination + +Prefer enumeration for large result sets to avoid loading everything into memory: + +```csharp +// Good: loads results in batches, processes items individually +await foreach (var member in db.VectorSetRangeEnumerateAsync(key)) +{ + await ProcessMemberAsync(member); +} + +// Avoid: loads all results at once +using var allMembers1 = await db.VectorSetRangeAsync(key); + +// Avoid: loads results in batches, but still loads everything into memory at once +var allMembers2 = await db.VectorSetRangeEnumerateAsync(key).ToArrayAsync(); +``` + +## Common Patterns + +### Semantic Search + +```csharp +// 1. Store document embeddings +var embedding = await GetEmbeddingFromMLModel(document); +var request = VectorSetAddRequest.Member( + documentId, + embedding.AsMemory(), + attributesJson: $$"""{"title":"{{document.Title}}","date":"{{document.Date}}"}""" +); +await db.VectorSetAddAsync("documents", request); + +// 2. Search for similar documents +var queryEmbedding = await GetEmbeddingFromMLModel(searchQuery); +var query = VectorSetSimilaritySearchRequest.ByVector(queryEmbedding.AsMemory()); +query.Count = 10; +query.WithScores = true; +query.WithAttributes = true; + +using var results = await db.VectorSetSimilaritySearchAsync("documents", query); +``` + +### Recommendation System + +```csharp +// Find similar items based on an item the user liked +var query = VectorSetSimilaritySearchRequest.ByMember(userLikedItemId); +query.Count = 20; +query.FilterExpression = "$.inStock == true && $.price < 100"; +query.WithScores = true; + +using var recommendations = await db.VectorSetSimilaritySearchAsync("products", query); +``` + +## See Also + +- [Redis Vector Sets Documentation](https://redis.io/docs/latest/develop/data-types/vector-sets/) +- [HNSW Algorithm](https://arxiv.org/abs/1603.09320) +- [Filtered Search Syntax](https://redis.io/docs/latest/develop/data-types/vector-sets/filtered-search/) + diff --git a/docs/docs.csproj b/docs/docs.csproj new file mode 100644 index 000000000..977e065bc --- /dev/null +++ b/docs/docs.csproj @@ -0,0 +1,6 @@ + + + + netstandard2.0 + + diff --git a/docs/exp/SER001.md b/docs/exp/SER001.md new file mode 100644 index 000000000..2def8be6e --- /dev/null +++ b/docs/exp/SER001.md @@ -0,0 +1,22 @@ +At the current time, [Redis documents that](https://redis.io/docs/latest/commands/vadd/): + +> Vector set is a new data type that is currently in preview and may be subject to change. + +As such, the corresponding library feature must also be considered subject to change: + +1. Existing bindings may cease working correctly if the underlying server API changes. +2. Changes to the server API may require changes to the library API, manifesting in either/both of build-time + or run-time breaks. + +While this seems *unlikely*, it must be considered a possibility. If you acknowledge this, you can suppress +this warning by adding the following to your `csproj` file: + +```xml +$(NoWarn);SER001 +``` + +or more granularly / locally in C#: + +``` c# +#pragma warning disable SER001 +``` \ No newline at end of file diff --git a/docs/exp/SER002.md b/docs/exp/SER002.md new file mode 100644 index 000000000..d122038e2 --- /dev/null +++ b/docs/exp/SER002.md @@ -0,0 +1,26 @@ +Redis 8.4 is currently in preview and may be subject to change. + +New features in Redis 8.4 include: + +- [`MSETEX`](https://github.com/redis/redis/pull/14434) for setting multiple strings with expiry +- [`XREADGROUP ... CLAIM`](https://github.com/redis/redis/pull/14402) for simplifed stream consumption +- [`SET ... {IFEQ|IFNE|IFDEQ|IFDNE}`, `DELEX` and `DIGEST`](https://github.com/redis/redis/pull/14435) for checked (CAS/CAD) string operations + +The corresponding library feature must also be considered subject to change: + +1. Existing bindings may cease working correctly if the underlying server API changes. +2. Changes to the server API may require changes to the library API, manifesting in either/both of build-time + or run-time breaks. + +While this seems *unlikely*, it must be considered a possibility. If you acknowledge this, you can suppress +this warning by adding the following to your `csproj` file: + +```xml +$(NoWarn);SER002 +``` + +or more granularly / locally in C#: + +``` c# +#pragma warning disable SER002 +``` diff --git a/docs/exp/SER003.md b/docs/exp/SER003.md new file mode 100644 index 000000000..651434063 --- /dev/null +++ b/docs/exp/SER003.md @@ -0,0 +1,25 @@ +Redis 8.6 is currently in preview and may be subject to change. + +New features in Redis 8.6 include: + +- `HOTKEYS` for profiling CPU and network hot-spots by key +- `XADD IDMP[AUTP]` for idempotent (write-at-most-once) stream addition + +The corresponding library feature must also be considered subject to change: + +1. Existing bindings may cease working correctly if the underlying server API changes. +2. Changes to the server API may require changes to the library API, manifesting in either/both of build-time + or run-time breaks. + +While this seems *unlikely*, it must be considered a possibility. If you acknowledge this, you can suppress +this warning by adding the following to your `csproj` file: + +```xml +$(NoWarn);SER003 +``` + +or more granularly / locally in C#: + +``` c# +#pragma warning disable SER003 +``` diff --git a/docs/index.md b/docs/index.md index 2fb22443c..0a2e6c721 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,13 +1,13 @@ StackExchange.Redis =================== -[Release Notes](ReleaseNotes) +- [Release Notes](ReleaseNotes) ## Overview StackExchange.Redis is a high performance general purpose redis client for .NET languages (C#, etc.). It is the logical successor to [BookSleeve](https://code.google.com/archive/p/booksleeve/), -and is the client developed-by (and used-by) [Stack Exchange](http://stackexchange.com/) for busy sites like [Stack Overflow](http://stackoverflow.com/). For the full reasons -why this library was created (i.e. "What about BookSleeve?") [please see here](http://marcgravell.blogspot.com/2014/03/so-i-went-and-wrote-another-redis-client.html). +and is the client developed-by (and used-by) [Stack Exchange](https://stackexchange.com/) for busy sites like [Stack Overflow](https://stackoverflow.com/). For the full reasons +why this library was created (i.e. "What about BookSleeve?") [please see here](https://marcgravell.blogspot.com/2014/03/so-i-went-and-wrote-another-redis-client.html). Features -- @@ -31,24 +31,34 @@ Documentation --- - [Server](Server) - running a redis server +- [Authentication](Authentication) - connecting to a Redis server with user authentication - [Basic Usage](Basics) - getting started and basic usage +- [Async Timeouts](AsyncTimeouts) - async timeouts and cancellation - [Configuration](Configuration) - options available when connecting to redis - [Pipelines and Multiplexers](PipelinesMultiplexers) - what is a multiplexer? - [Keys, Values and Channels](KeysValues) - discusses the data-types used on the API - [Transactions](Transactions) - how atomic transactions work in redis +- [Compare-And-Swap / Compare-And-Delete (CAS/CAD)](CompareAndSwap) - atomic conditional operations using value comparison - [Events](Events) - the events available for logging / information purposes - [Pub/Sub Message Order](PubSubOrder) - advice on sequential and concurrent processing +- [Pub/Sub Key Notifications](KeyspaceNotifications) - how to use keyspace and keyevent notifications +- [Hot Keys](HotKeys) - how to use `HOTKEYS` profiling +- [Using RESP3](Resp3) - information on using RESP3 +- [ServerMaintenanceEvent](ServerMaintenanceEvent) - how to listen and prepare for hosted server maintenance (e.g. Azure Cache for Redis) - [Streams](Streams) - how to use the Stream data type +- [Vector Sets](VectorSets) - how to use Vector Sets for similarity search with embeddings - [Where are `KEYS` / `SCAN` / `FLUSH*`?](KeysScan) - how to use server-based commands - [Profiling](Profiling) - profiling interfaces, as well as how to profile in an `async` world - [Scripting](Scripting) - running Lua scripts with convenient named parameter replacement - [Testing](Testing) - running the `StackExchange.Redis.Tests` suite to validate changes +- [Timeouts](Timeouts) - guidance on dealing with timeout problems - [Thread Theft](ThreadTheft) - guidance on avoiding TPL threading problems +- [RESP Logging](RespLogging) - capturing and validating RESP streams Questions and Contributions --- If you think you have found a bug or have a feature request, please [report an issue][2], or if appropriate: submit a pull request. If you have a question, feel free to [contact me](https://github.com/mgravell). - [1]: http://msdn.microsoft.com/en-us/library/dd460717%28v=vs.110%29.aspx + [1]: https://docs.microsoft.com/en-us/dotnet/standard/parallel-programming/task-parallel-library-tpl [2]: https://github.com/StackExchange/StackExchange.Redis/issues?state=open diff --git a/eng/StackExchange.Redis.Build/AsciiHash.md b/eng/StackExchange.Redis.Build/AsciiHash.md new file mode 100644 index 000000000..4a76ded62 --- /dev/null +++ b/eng/StackExchange.Redis.Build/AsciiHash.md @@ -0,0 +1,173 @@ +# AsciiHash + +Efficient matching of well-known short string tokens is a high-volume scenario, for example when matching RESP literals. + +The purpose of this generator is to efficiently interpret input tokens like `bin`, `f32`, etc - whether as byte or character data. + +There are multiple ways of using this tool, with the main distinction being whether you are confirming a single +token, or choosing between multiple tokens (in which case an `enum` is more appropriate): + +## Isolated literals (part 1) + +When using individual tokens, a `static partial class` can be used to generate helpers: + +``` c# +[AsciiHash] public static partial class bin { } +[AsciiHash] public static partial class f32 { } +``` + +Usually the token is inferred from the name; `[AsciiHash("real value")]` can be used if the token is not a valid identifier. +Underscores are replaced with hyphens, so a field called `my_token` has the default value `"my-token"`. +The generator demands *all* of `[AsciiHash] public static partial class`, and note that any *containing* types must +*also* be declared `partial`. + +The output is of the form: + +``` c# +static partial class bin +{ + public const int Length = 3; + public const long HashCS = ... + public const long HashUC = ... + public static ReadOnlySpan U8 => @"bin"u8; + public static string Text => @"bin"; + public static bool IsCS(in ReadOnlySpan value, long cs) => ... + public static bool IsCI(in RawResult value, long uc) => ... + +} +``` +The `CS` and `UC` are case-sensitive and case-insensitive (using upper-case) tools, respectively. + +(this API is strictly an internal implementation detail, and can change at any time) + +This generated code allows for fast, efficient, and safe matching of well-known tokens, for example: + +``` c# +var key = ... +var hash = key.HashCS(); +switch (key.Length) +{ + case bin.Length when bin.Is(key, hash): + // handle bin + break; + case f32.Length when f32.Is(key, hash): + // handle f32 + break; +} +``` + +The switch on the `Length` is optional, but recommended - these low values can often be implemented (by the compiler) +as a simple jump-table, which is very fast. However, switching on the hash itself is also valid. All hash matches +must also perform a sequence equality check - the `Is(value, hash)` convenience method validates both hash and equality. + +Note that `switch` requires `const` values, hence why we use generated *types* rather than partial-properties +that emit an instance with the known values. Also, the `"..."u8` syntax emits a span which is awkward to store, but +easy to return via a property. + +## Isolated literals (part 2) + +In some cases, you want to be able to say "match this value, only known at runtime". For this, note that `AsciiHash` +is also a `struct` that you can create an instance of and supply to code; the best way to do this is *inside* your +`partial class`: + +``` c# +[AsciiHash] +static partial class bin +{ + public static readonly AsciiHash Hash = new(U8); +} +``` + +Now, `bin.Hash` can be supplied to a caller that takes an `AsciiHash` instance (commonly with `in` semantics), +which then has *instance* methods for case-sensitive and case-insensitive matching; the instance already knows +the target hash and payload values. + +The `AsciiHash` returned implements `IEquatable` implementing case-sensitive equality; there are +also independent case-sensitive and case-insensitive comparers available via the static +`CaseSensitiveEqualityComparer` and `CaseInsensitiveEqualityComparer` properties respectively. + +Comparison values can be constructed on the fly on top of transient buffers using the constructors **that take +arrays**. Note that the other constructors may allocate on a per-usage basis. + +## Enum parsing (part 1) + +When identifying multiple values, an `enum` may be more convenient. Consider: + +``` c# +[AsciiHash] +public static partial bool TryParse(ReadOnlySpan value, out SomeEnum value); +``` + +This generates an efficient parser; inputs can be common `byte` or `char` types. Case sensitivity +is controlled by the optional `CaseSensitive` property on the attribute, or via a 3rd (`bool`) parameter +bbon the method, i.e. + +``` c# +[AsciiHash(CaseSensitive = false)] +public static partial bool TryParse(ReadOnlySpan value, out SomeEnum value); +``` + +or + +``` c# +[AsciiHash] +public static partial bool TryParse(ReadOnlySpan value, out SomeEnum value, bool caseSensitive = true); +``` + +Individual enum members can also be marked with `[AsciiHash("token value")]` to override the token payload. If +an enum member declares an empty explicit value (i.e. `[AsciiHash("")]`), then that member is ignored by the +tool; this is useful for marking "unknown" or "invalid" enum values (commonly the first enum, which by +convention has the value `0`): + +``` c# +public enum SomeEnum +{ + [AsciiHash("")] + Unknown, + SomeRealValue, + [AsciiHash("another-real-value")] + AnotherRealValue, + // ... +} +``` + +## Enum parsing (part 2) + +The tool has an *additional* facility when it comes to enums; you generally don't want to have to hard-code +things like buffer-lengths into your code, but when parsing an enum, you need to know how many bytes to read. + +The tool can generate a `static partial class` that contains the maximum length of any token in the enum, as well +as the maximum length of any token in bytes (when encoded as UTF-8). For example: + +``` c# +[AsciiHash("SomeTypeName")] +public enum SomeEnum +{ + // ... +} +``` + +This generates a class like the following: + +``` c# +static partial class SomeTypeName +{ + public const int EnumCount = 48; + public const int MaxChars = 11; + public const int MaxBytes = 11; // as UTF8 + public const int BufferBytes = 16; +} +``` + +The last of these is probably the most useful - it allows an additional byte (to rule out false-positives), +and rounds up to word-sizes, allowing for convenient stack-allocation - for example: + +``` c# +var span = reader.TryGetSpan(out var tmp) ? tmp : reader.Buffer(stackalloc byte[SomeTypeName.BufferBytes]); +if (TryParse(span, out var value)) +{ + // got a value +} +``` + +which allows for very efficient parsing of well-known tokens. \ No newline at end of file diff --git a/eng/StackExchange.Redis.Build/AsciiHashGenerator.cs b/eng/StackExchange.Redis.Build/AsciiHashGenerator.cs new file mode 100644 index 000000000..4fb411454 --- /dev/null +++ b/eng/StackExchange.Redis.Build/AsciiHashGenerator.cs @@ -0,0 +1,774 @@ +using System.Buffers; +using System.Collections.Immutable; +using System.Reflection; +using System.Text; +using Microsoft.CodeAnalysis; +using Microsoft.CodeAnalysis.CSharp; +using Microsoft.CodeAnalysis.CSharp.Syntax; +using RESPite; + +namespace StackExchange.Redis.Build; + +[Generator(LanguageNames.CSharp)] +public class AsciiHashGenerator : IIncrementalGenerator +{ + public void Initialize(IncrementalGeneratorInitializationContext context) + { + // looking for [AsciiHash] partial static class Foo { } + var types = context.SyntaxProvider + .CreateSyntaxProvider( + static (node, _) => node is ClassDeclarationSyntax decl && IsStaticPartial(decl.Modifiers) && + HasAsciiHash(decl.AttributeLists), + TransformTypes) + .Where(pair => pair.Name is { Length: > 0 }) + .Collect(); + + // looking for [AsciiHash] partial static bool TryParse(input, out output) { } + var methods = context.SyntaxProvider + .CreateSyntaxProvider( + static (node, _) => node is MethodDeclarationSyntax decl && IsStaticPartial(decl.Modifiers) && + HasAsciiHash(decl.AttributeLists), + TransformMethods) + .Where(pair => pair.Name is { Length: > 0 }) + .Collect(); + + // looking for [AsciiHash("some type")] enum Foo { } + var enums = context.SyntaxProvider + .CreateSyntaxProvider( + static (node, _) => node is EnumDeclarationSyntax decl && HasAsciiHash(decl.AttributeLists), + TransformEnums) + .Where(pair => pair.Name is { Length: > 0 }) + .Collect(); + + context.RegisterSourceOutput( + types.Combine(methods).Combine(enums), + (ctx, content) => + Generate(ctx, content.Left.Left, content.Left.Right, content.Right)); + + static bool IsStaticPartial(SyntaxTokenList tokens) + => tokens.Any(SyntaxKind.StaticKeyword) && tokens.Any(SyntaxKind.PartialKeyword); + + static bool HasAsciiHash(SyntaxList attributeLists) + { + foreach (var attribList in attributeLists) + { + foreach (var attrib in attribList.Attributes) + { + if (attrib.Name.ToString() is nameof(AsciiHashAttribute) or nameof(AsciiHash)) return true; + } + } + + return false; + } + } + + private static string GetName(INamedTypeSymbol type) + { + if (type.ContainingType is null) return type.Name; + var stack = new Stack(); + while (true) + { + stack.Push(type.Name); + if (type.ContainingType is null) break; + type = type.ContainingType; + } + + var sb = new StringBuilder(stack.Pop()); + while (stack.Count != 0) + { + sb.Append('.').Append(stack.Pop()); + } + + return sb.ToString(); + } + + private static AttributeData? TryGetAsciiHashAttribute(ImmutableArray attributes) + { + foreach (var attrib in attributes) + { + if (attrib.AttributeClass is + { + Name: nameof(AsciiHashAttribute), + ContainingType: null, + ContainingNamespace: + { + Name: "RESPite", + ContainingNamespace.IsGlobalNamespace: true, + } + }) + { + return attrib; + } + } + + return null; + } + + private (string Namespace, string ParentType, string Name, int Count, int MaxChars, int MaxBytes) TransformEnums( + GeneratorSyntaxContext ctx, CancellationToken cancellationToken) + { + // extract the name and value (defaults to name, but can be overridden via attribute) and the location + if (ctx.SemanticModel.GetDeclaredSymbol(ctx.Node) is not INamedTypeSymbol { TypeKind: TypeKind.Enum } named) return default; + if (TryGetAsciiHashAttribute(named.GetAttributes()) is not { } attrib) return default; + var innerName = GetRawValue("", attrib); + if (string.IsNullOrWhiteSpace(innerName)) return default; + + string ns = "", parentType = ""; + if (named.ContainingType is { } containingType) + { + parentType = GetName(containingType); + ns = containingType.ContainingNamespace.ToDisplayString(SymbolDisplayFormat.CSharpErrorMessageFormat); + } + else if (named.ContainingNamespace is { } containingNamespace) + { + ns = containingNamespace.ToDisplayString(SymbolDisplayFormat.CSharpErrorMessageFormat); + } + + int maxChars = 0, maxBytes = 0, count = 0; + foreach (var member in named.GetMembers()) + { + if (member.Kind is SymbolKind.Field) + { + var rawValue = GetRawValue(member.Name, TryGetAsciiHashAttribute(member.GetAttributes())); + if (string.IsNullOrWhiteSpace(rawValue)) continue; + + count++; + maxChars = Math.Max(maxChars, rawValue.Length); + maxBytes = Math.Max(maxBytes, Encoding.UTF8.GetByteCount(rawValue)); + } + } + return (ns, parentType, innerName, count, maxChars, maxBytes); + } + + private (string Namespace, string ParentType, string Name, string Value) TransformTypes( + GeneratorSyntaxContext ctx, + CancellationToken cancellationToken) + { + // extract the name and value (defaults to name, but can be overridden via attribute) and the location + if (ctx.SemanticModel.GetDeclaredSymbol(ctx.Node) is not INamedTypeSymbol { TypeKind: TypeKind.Class } named) return default; + if (TryGetAsciiHashAttribute(named.GetAttributes()) is not { } attrib) return default; + + string ns = "", parentType = ""; + if (named.ContainingType is { } containingType) + { + parentType = GetName(containingType); + ns = containingType.ContainingNamespace.ToDisplayString(SymbolDisplayFormat.CSharpErrorMessageFormat); + } + else if (named.ContainingNamespace is { } containingNamespace) + { + ns = containingNamespace.ToDisplayString(SymbolDisplayFormat.CSharpErrorMessageFormat); + } + + string name = named.Name, value = GetRawValue(name, attrib); + if (string.IsNullOrWhiteSpace(value)) return default; + return (ns, parentType, name, value); + } + + private static string GetRawValue(string name, AttributeData? asciiHashAttribute) + { + var value = ""; + if (asciiHashAttribute is { ConstructorArguments.Length: 1 } + && asciiHashAttribute.ConstructorArguments[0].Value?.ToString() is { Length: > 0 } val) + { + value = val; + } + if (string.IsNullOrWhiteSpace(value)) + { + value = InferPayload(name); // if nothing explicit: infer from name + } + + return value; + } + + private static string InferPayload(string name) => name.Replace("_", "-"); + + private (string Namespace, string ParentType, Accessibility Accessibility, string Name, + (string Type, string Name, bool IsBytes, RefKind RefKind) From, (string Type, string Name, RefKind RefKind) To, + (string Name, bool Value, RefKind RefKind) CaseSensitive, + BasicArray<(string EnumMember, string ParseText)> Members, int DefaultValue) TransformMethods( + GeneratorSyntaxContext ctx, + CancellationToken cancellationToken) + { + if (ctx.SemanticModel.GetDeclaredSymbol(ctx.Node) is not IMethodSymbol + { + IsStatic: true, + IsPartialDefinition: true, + PartialImplementationPart: null, + Arity: 0, + ReturnType.SpecialType: SpecialType.System_Boolean, + Parameters: + { + IsDefaultOrEmpty: false, + Length: 2 or 3, + }, + } method) return default; + + if (TryGetAsciiHashAttribute(method.GetAttributes()) is not { } attrib) return default; + + if (method.ContainingType is not { } containingType) return default; + var parentType = GetName(containingType); + var ns = containingType.ContainingNamespace.ToDisplayString(SymbolDisplayFormat.CSharpErrorMessageFormat); + + var arg = method.Parameters[0]; + if (arg is not { IsOptional: false, RefKind: RefKind.None or RefKind.In or RefKind.Ref or RefKind.RefReadOnlyParameter }) return default; + + static bool IsBytes(ITypeSymbol type) + { + // byte[] + if (type is IArrayTypeSymbol { ElementType: { SpecialType: SpecialType.System_Byte } }) + return true; + + // Span or ReadOnlySpan + if (type is INamedTypeSymbol { TypeKind: TypeKind.Struct, Arity: 1, Name: "Span" or "ReadOnlySpan", + ContainingNamespace: { Name: "System", ContainingNamespace.IsGlobalNamespace: true }, + TypeArguments: { Length: 1 } ta } + && ta[0].SpecialType == SpecialType.System_Byte) + { + return true; + } + return false; + } + + var fromType = arg.Type.ToDisplayString(SymbolDisplayFormat.CSharpErrorMessageFormat); + bool fromBytes = IsBytes(arg.Type); + var from = (fromType, arg.Name, fromBytes, arg.RefKind); + + arg = method.Parameters[1]; + if (arg is not + { + IsOptional: false, RefKind: RefKind.Out or RefKind.Ref, Type: INamedTypeSymbol { TypeKind: TypeKind.Enum } + }) return default; + var to = (arg.Type.ToDisplayString(SymbolDisplayFormat.CSharpErrorMessageFormat), arg.Name, arg.RefKind); + + var members = arg.Type.GetMembers(); + var builder = new BasicArray<(string EnumMember, string ParseText)>.Builder(members.Length); + HashSet values = new(); + foreach (var member in members) + { + if (member is IFieldSymbol { IsStatic: true, IsConst: true } field) + { + var rawValue = GetRawValue(field.Name, TryGetAsciiHashAttribute(member.GetAttributes())); + if (string.IsNullOrWhiteSpace(rawValue)) continue; + builder.Add((field.Name, rawValue)); + int value = field.ConstantValue switch + { + sbyte i8 => i8, + short i16 => i16, + int i32 => i32, + long i64 => (int)i64, + byte u8 => u8, + ushort u16 => u16, + uint u32 => (int)u32, + ulong u64 => (int)u64, + char c16 => c16, + _ => 0, + }; + values.Add(value); + } + } + + (string, bool, RefKind) caseSensitive; + bool cs = IsCaseSensitive(attrib); + if (method.Parameters.Length > 2) + { + arg = method.Parameters[2]; + if (arg is not + { + RefKind: RefKind.None or RefKind.In or RefKind.Ref or RefKind.RefReadOnlyParameter, + Type.SpecialType: SpecialType.System_Boolean, + }) + { + return default; + } + + if (arg.IsOptional) + { + if (arg.ExplicitDefaultValue is not bool dv) return default; + cs = dv; + } + caseSensitive = (arg.Name, cs, arg.RefKind); + } + else + { + caseSensitive = ("", cs, RefKind.None); + } + + int defaultValue = 0; + if (values.Contains(0)) + { + int len = values.Count; + for (int i = 1; i <= len; i++) + { + if (!values.Contains(i)) + { + defaultValue = i; + break; + } + } + } + return (ns, parentType, method.DeclaredAccessibility, method.Name, from, to, caseSensitive, builder.Build(), defaultValue); + } + + private bool IsCaseSensitive(AttributeData attrib) + { + foreach (var member in attrib.NamedArguments) + { + if (member.Key == nameof(AsciiHashAttribute.CaseSensitive) + && member.Value.Kind is TypedConstantKind.Primitive + && member.Value.Value is bool caseSensitive) + { + return caseSensitive; + } + } + + return true; + } + + private string GetVersion() + { + var asm = GetType().Assembly; + if (asm.GetCustomAttributes(typeof(AssemblyFileVersionAttribute), false).FirstOrDefault() is + AssemblyFileVersionAttribute { Version: { Length: > 0 } } version) + { + return version.Version; + } + + return asm.GetName().Version?.ToString() ?? "??"; + } + + private void Generate( + SourceProductionContext ctx, + ImmutableArray<(string Namespace, string ParentType, string Name, string Value)> types, + ImmutableArray<(string Namespace, string ParentType, Accessibility Accessibility, string Name, + (string Type, string Name, bool IsBytes, RefKind RefKind) From, (string Type, string Name, RefKind RefKind) To, + (string Name, bool Value, RefKind RefKind) CaseSensitive, + BasicArray<(string EnumMember, string ParseText)> Members, int DefaultValue)> parseMethods, + ImmutableArray<(string Namespace, string ParentType, string Name, int Count, int MaxChars, int MaxBytes)> enums) + { + if (types.IsDefaultOrEmpty & parseMethods.IsDefaultOrEmpty & enums.IsDefaultOrEmpty) return; // nothing to do + + var sb = new StringBuilder("// ") + .AppendLine().Append("// ").Append(GetType().Name).Append(" v").Append(GetVersion()).AppendLine(); + + sb.AppendLine("using System;"); + sb.AppendLine("using StackExchange.Redis;"); + sb.AppendLine("#pragma warning disable CS8981, SER004"); + + BuildTypeImplementations(sb, types); + BuildEnumParsers(sb, parseMethods); + BuildEnumLengths(sb, enums); + ctx.AddSource(nameof(AsciiHash) + ".generated.cs", sb.ToString()); + } + + private void BuildEnumLengths(StringBuilder sb, ImmutableArray<(string Namespace, string ParentType, string Name, int Count, int MaxChars, int MaxBytes)> enums) + { + if (enums.IsDefaultOrEmpty) return; // nope + + int indent = 0; + StringBuilder NewLine() => sb.AppendLine().Append(' ', indent * 4); + + foreach (var grp in enums.GroupBy(l => (l.Namespace, l.ParentType))) + { + NewLine(); + int braces = 0; + if (!string.IsNullOrWhiteSpace(grp.Key.Namespace)) + { + NewLine().Append("namespace ").Append(grp.Key.Namespace); + NewLine().Append("{"); + indent++; + braces++; + } + + if (!string.IsNullOrWhiteSpace(grp.Key.ParentType)) + { + if (grp.Key.ParentType.Contains('.')) // nested types + { + foreach (var part in grp.Key.ParentType.Split('.')) + { + NewLine().Append("partial class ").Append(part); + NewLine().Append("{"); + indent++; + braces++; + } + } + else + { + NewLine().Append("partial class ").Append(grp.Key.ParentType); + NewLine().Append("{"); + indent++; + braces++; + } + } + + foreach (var @enum in grp) + { + NewLine().Append("internal static partial class ").Append(@enum.Name); + NewLine().Append("{"); + indent++; + NewLine().Append("public const int EnumCount = ").Append(@enum.Count).Append(";"); + NewLine().Append("public const int MaxChars = ").Append(@enum.MaxChars).Append(";"); + NewLine().Append("public const int MaxBytes = ").Append(@enum.MaxBytes).Append("; // as UTF8"); + // for buffer bytes: we want to allow 1 extra byte (to check for false-positive over-long values), + // and then round up to the nearest multiple of 8 (for stackalloc performance, etc) + int bufferBytes = (@enum.MaxBytes + 1 + 7) & ~7; + NewLine().Append("public const int BufferBytes = ").Append(bufferBytes).Append(";"); + indent--; + NewLine().Append("}"); + } + + // handle any closing braces + while (braces-- > 0) + { + indent--; + NewLine().Append("}"); + } + } + } + + private void BuildEnumParsers( + StringBuilder sb, + in ImmutableArray<(string Namespace, string ParentType, Accessibility Accessibility, string Name, + (string Type, string Name, bool IsBytes, RefKind RefKind) From, + (string Type, string Name, RefKind RefKind) To, + (string Name, bool Value, RefKind RefKind) CaseSensitive, + BasicArray<(string EnumMember, string ParseText)> Members, int DefaultValue)> enums) + { + if (enums.IsDefaultOrEmpty) return; // nope + + int indent = 0; + StringBuilder NewLine() => sb.AppendLine().Append(' ', indent * 4); + + foreach (var grp in enums.GroupBy(l => (l.Namespace, l.ParentType))) + { + NewLine(); + int braces = 0; + if (!string.IsNullOrWhiteSpace(grp.Key.Namespace)) + { + NewLine().Append("namespace ").Append(grp.Key.Namespace); + NewLine().Append("{"); + indent++; + braces++; + } + + if (!string.IsNullOrWhiteSpace(grp.Key.ParentType)) + { + if (grp.Key.ParentType.Contains('.')) // nested types + { + foreach (var part in grp.Key.ParentType.Split('.')) + { + NewLine().Append("partial class ").Append(part); + NewLine().Append("{"); + indent++; + braces++; + } + } + else + { + NewLine().Append("partial class ").Append(grp.Key.ParentType); + NewLine().Append("{"); + indent++; + braces++; + } + } + + foreach (var method in grp) + { + var line = NewLine().Append(Format(method.Accessibility)).Append(" static partial bool ") + .Append(method.Name).Append("(") + .Append(Format(method.From.RefKind)) + .Append(method.From.Type).Append(" ").Append(method.From.Name).Append(", ") + .Append(Format(method.To.RefKind)) + .Append(method.To.Type).Append(" ").Append(method.To.Name); + if (!string.IsNullOrEmpty(method.CaseSensitive.Name)) + { + line.Append(", ").Append(Format(method.CaseSensitive.RefKind)).Append("bool ") + .Append(method.CaseSensitive.Name); + } + line.Append(")"); + NewLine().Append("{"); + indent++; + NewLine().Append("// ").Append(method.To.Type).Append(" has ").Append(method.Members.Length).Append(" members"); + string valueTarget = method.To.Name; + if (method.To.RefKind != RefKind.Out) + { + valueTarget = "__tmp"; + NewLine().Append(method.To.Type).Append(" ").Append(valueTarget).Append(";"); + } + + bool alwaysCaseSensitive = + string.IsNullOrEmpty(method.CaseSensitive.Name) && method.CaseSensitive.Value; + if (!alwaysCaseSensitive && !HasCaseSensitiveCharacters(method.Members)) + { + alwaysCaseSensitive = true; + } + + bool twoPart = method.Members.Max(x => x.ParseText.Length) > AsciiHash.MaxBytesHashed; + if (alwaysCaseSensitive) + { + if (twoPart) + { + NewLine().Append("global::RESPite.AsciiHash.HashCS(").Append(method.From.Name).Append(", out var cs0, out var cs1);"); + } + else + { + NewLine().Append("var cs0 = global::RESPite.AsciiHash.HashCS(").Append(method.From.Name).Append(");"); + } + } + else + { + if (twoPart) + { + NewLine().Append("global::RESPite.AsciiHash.Hash(").Append(method.From.Name) + .Append(", out var cs0, out var uc0, out var cs1, out var uc1);"); + } + else + { + NewLine().Append("global::RESPite.AsciiHash.Hash(").Append(method.From.Name) + .Append(", out var cs0, out var uc0);"); + } + } + + if (string.IsNullOrEmpty(method.CaseSensitive.Name)) + { + Write(method.CaseSensitive.Value); + } + else + { + NewLine().Append("if (").Append(method.CaseSensitive.Name).Append(")"); + NewLine().Append("{"); + indent++; + Write(true); + indent--; + NewLine().Append("}"); + NewLine().Append("else"); + NewLine().Append("{"); + indent++; + Write(false); + indent--; + NewLine().Append("}"); + } + + if (method.To.RefKind == RefKind.Out) + { + NewLine().Append("if (").Append(valueTarget).Append(" == (") + .Append(method.To.Type).Append(")").Append(method.DefaultValue).Append(")"); + NewLine().Append("{"); + indent++; + NewLine().Append("// by convention, init to zero on miss"); + NewLine().Append(valueTarget).Append(" = default;"); + NewLine().Append("return false;"); + indent--; + NewLine().Append("}"); + NewLine().Append("return true;"); + } + else + { + NewLine().Append("// do not update parameter on miss"); + NewLine().Append("if (").Append(valueTarget).Append(" == (") + .Append(method.To.Type).Append(")").Append(method.DefaultValue).Append(") return false;"); + NewLine().Append(method.To.Name).Append(" = ").Append(valueTarget).Append(";"); + NewLine().Append("return true;"); + } + + void Write(bool caseSensitive) + { + NewLine().Append(valueTarget).Append(" = ").Append(method.From.Name).Append(".Length switch {"); + indent++; + foreach (var member in method.Members + .OrderBy(x => x.ParseText.Length) + .ThenBy(x => x.ParseText)) + { + var len = member.ParseText.Length; + AsciiHash.Hash(member.ParseText, out var cs0, out var uc0, out var cs1, out var uc1); + + bool valueCaseSensitive = caseSensitive || !HasCaseSensitiveCharacters(member.ParseText); + + line = NewLine().Append(len).Append(" when "); + if (twoPart) line.Append("("); + if (valueCaseSensitive) + { + line.Append("cs0 is ").Append(cs0); + } + else + { + line.Append("uc0 is ").Append(uc0); + } + + if (len > AsciiHash.MaxBytesHashed) + { + if (valueCaseSensitive) + { + line.Append(" & cs1 is ").Append(cs1); + } + else + { + line.Append(" & uc1 is ").Append(uc1); + } + } + if (twoPart) line.Append(")"); + if (len > 2 * AsciiHash.MaxBytesHashed) + { + line.Append(" && "); + var csValue = SyntaxFactory + .LiteralExpression( + SyntaxKind.StringLiteralExpression, + SyntaxFactory.Literal(member.ParseText.Substring(2 * AsciiHash.MaxBytesHashed))) + .ToFullString(); + + line.Append("global::RESPite.AsciiHash.") + .Append(valueCaseSensitive ? nameof(AsciiHash.SequenceEqualsCS) : nameof(AsciiHash.SequenceEqualsCI)) + .Append("(").Append(method.From.Name).Append(".Slice(").Append(2 * AsciiHash.MaxBytesHashed).Append("), ").Append(csValue); + if (method.From.IsBytes) line.Append("u8"); + line.Append(")"); + } + + line.Append(" => ").Append(method.To.Type).Append(".").Append(member.EnumMember).Append(","); + } + + NewLine().Append("_ => (").Append(method.To.Type).Append(")").Append(method.DefaultValue) + .Append(","); + indent--; + NewLine().Append("};"); + } + + indent--; + NewLine().Append("}"); + } + + // handle any closing braces + while (braces-- > 0) + { + indent--; + NewLine().Append("}"); + } + } + } + + private static bool HasCaseSensitiveCharacters(string value) + { + foreach (char c in value ?? "") + { + if (char.IsLetter(c)) return true; + } + + return false; + } + + private static bool HasCaseSensitiveCharacters(BasicArray<(string EnumMember, string ParseText)> members) + { + // do we have alphabet characters? case sensitivity doesn't apply if not + foreach (var member in members) + { + if (HasCaseSensitiveCharacters(member.ParseText)) return true; + } + + return false; + } + + private static string Format(RefKind refKind) => refKind switch + { + RefKind.None => "", + RefKind.In => "in ", + RefKind.Out => "out ", + RefKind.Ref => "ref ", + RefKind.RefReadOnlyParameter or RefKind.RefReadOnly => "ref readonly ", + _ => throw new NotSupportedException($"RefKind {refKind} is not yet supported."), + }; + private static string Format(Accessibility accessibility) => accessibility switch + { + Accessibility.Public => "public", + Accessibility.Private => "private", + Accessibility.Internal => "internal", + Accessibility.Protected => "protected", + Accessibility.ProtectedAndInternal => "private protected", + Accessibility.ProtectedOrInternal => "protected internal", + _ => throw new NotSupportedException($"Accessibility {accessibility} is not yet supported."), + }; + + private static void BuildTypeImplementations( + StringBuilder sb, + in ImmutableArray<(string Namespace, string ParentType, string Name, string Value)> types) + { + if (types.IsDefaultOrEmpty) return; // nope + + int indent = 0; + StringBuilder NewLine() => sb.AppendLine().Append(' ', indent * 4); + + foreach (var grp in types.GroupBy(l => (l.Namespace, l.ParentType))) + { + NewLine(); + int braces = 0; + if (!string.IsNullOrWhiteSpace(grp.Key.Namespace)) + { + NewLine().Append("namespace ").Append(grp.Key.Namespace); + NewLine().Append("{"); + indent++; + braces++; + } + + if (!string.IsNullOrWhiteSpace(grp.Key.ParentType)) + { + if (grp.Key.ParentType.Contains('.')) // nested types + { + foreach (var part in grp.Key.ParentType.Split('.')) + { + NewLine().Append("partial class ").Append(part); + NewLine().Append("{"); + indent++; + braces++; + } + } + else + { + NewLine().Append("partial class ").Append(grp.Key.ParentType); + NewLine().Append("{"); + indent++; + braces++; + } + } + + foreach (var literal in grp) + { + // perform string escaping on the generated value (this includes the quotes, note) + var csValue = SyntaxFactory + .LiteralExpression(SyntaxKind.StringLiteralExpression, SyntaxFactory.Literal(literal.Value)) + .ToFullString(); + + AsciiHash.Hash(literal.Value, out var hashCS, out var hashUC); + NewLine().Append("static partial class ").Append(literal.Name); + NewLine().Append("{"); + indent++; + NewLine().Append("public const int Length = ").Append(literal.Value.Length).Append(';'); + NewLine().Append("public const long HashCS = ").Append(hashCS).Append(';'); + NewLine().Append("public const long HashUC = ").Append(hashUC).Append(';'); + NewLine().Append("public static ReadOnlySpan U8 => ").Append(csValue).Append("u8;"); + NewLine().Append("public const string Text = ").Append(csValue).Append(';'); + if (literal.Value.Length <= AsciiHash.MaxBytesHashed) + { + // the case-sensitive hash enforces all the values + NewLine().Append( + "public static bool IsCS(ReadOnlySpan value, long cs) => cs == HashCS & value.Length == Length;"); + NewLine().Append( + "public static bool IsCI(ReadOnlySpan value, long uc) => uc == HashUC & value.Length == Length;"); + } + else + { + NewLine().Append( + "public static bool IsCS(ReadOnlySpan value, long cs) => cs == HashCS && value.SequenceEqual(U8);"); + NewLine().Append( + "public static bool IsCI(ReadOnlySpan value, long uc) => uc == HashUC && global::RESPite.AsciiHash.SequenceEqualsCI(value, U8);"); + } + + indent--; + NewLine().Append("}"); + } + + // handle any closing braces + while (braces-- > 0) + { + indent--; + NewLine().Append("}"); + } + } + } +} diff --git a/eng/StackExchange.Redis.Build/BasicArray.cs b/eng/StackExchange.Redis.Build/BasicArray.cs new file mode 100644 index 000000000..dc7984c75 --- /dev/null +++ b/eng/StackExchange.Redis.Build/BasicArray.cs @@ -0,0 +1,85 @@ +using System.Collections; + +namespace StackExchange.Redis.Build; + +// like ImmutableArray, but with decent equality semantics +public readonly struct BasicArray : IEquatable>, IReadOnlyList +{ + private readonly T[] _elements; + + private BasicArray(T[] elements, int length) + { + _elements = elements; + Length = length; + } + + private static readonly EqualityComparer _comparer = EqualityComparer.Default; + + public int Length { get; } + public bool IsEmpty => Length == 0; + + public ref readonly T this[int index] + { + get + { + if (index < 0 | index >= Length) Throw(); + return ref _elements[index]; + + static void Throw() => throw new IndexOutOfRangeException(); + } + } + + public ReadOnlySpan Span => _elements.AsSpan(0, Length); + + public bool Equals(BasicArray other) + { + if (Length != other.Length) return false; + var y = other.Span; + int i = 0; + foreach (ref readonly T el in this.Span) + { + if (!_comparer.Equals(el, y[i])) return false; + } + + return true; + } + + public ReadOnlySpan.Enumerator GetEnumerator() => Span.GetEnumerator(); + + private IEnumerator EnumeratorCore() + { + for (int i = 0; i < Length; i++) yield return this[i]; + } + + public override bool Equals(object? obj) => obj is BasicArray other && Equals(other); + + public override int GetHashCode() + { + var hash = Length; + foreach (ref readonly T el in this.Span) + { + _ = (hash * -37) + _comparer.GetHashCode(el); + } + + return hash; + } + IEnumerator IEnumerable.GetEnumerator() => EnumeratorCore(); + IEnumerator IEnumerable.GetEnumerator() => EnumeratorCore(); + + int IReadOnlyCollection.Count => Length; + T IReadOnlyList.this[int index] => this[index]; + + public struct Builder(int maxLength) + { + public int Count { get; private set; } + private readonly T[] elements = maxLength == 0 ? [] : new T[maxLength]; + + public void Add(in T value) + { + elements[Count] = value; + Count++; + } + + public BasicArray Build() => new(elements, Count); + } +} diff --git a/eng/StackExchange.Redis.Build/StackExchange.Redis.Build.csproj b/eng/StackExchange.Redis.Build/StackExchange.Redis.Build.csproj new file mode 100644 index 000000000..3cde6f5f6 --- /dev/null +++ b/eng/StackExchange.Redis.Build/StackExchange.Redis.Build.csproj @@ -0,0 +1,23 @@ + + + + netstandard2.0 + enable + enable + true + + + + + + + + + Shared/AsciiHash.cs + + + Shared/Experiments.cs + + + + diff --git a/src/Directory.Build.props b/src/Directory.Build.props index 93251be41..27366ae98 100644 --- a/src/Directory.Build.props +++ b/src/Directory.Build.props @@ -3,10 +3,12 @@ true true + false + true - - - - + + + + diff --git a/src/NRediSearch/AddOptions.cs b/src/NRediSearch/AddOptions.cs deleted file mode 100644 index bd3fb9d20..000000000 --- a/src/NRediSearch/AddOptions.cs +++ /dev/null @@ -1,66 +0,0 @@ -// .NET port of https://github.com/RedisLabs/JRediSearch/ - -namespace NRediSearch -{ - public sealed class AddOptions - { - public enum ReplacementPolicy - { - /// - /// The default mode. This will cause the add operation to fail if the document already exists - /// - None, - /// - /// Replace/reindex the entire document. This has the effect of atomically deleting the previous - /// document and replacing it with the context of the new document. Fields in the old document which - /// are not present in the new document are lost - /// - Full, - /// - /// Only reindex/replace fields that are updated in the command. Fields in the old document which are - /// not present in the new document are preserved.Fields that are present in both are overwritten by - /// the new document - /// - Partial, - } - - public string Language { get; set; } - public bool NoSave { get; set; } - public ReplacementPolicy ReplacePolicy { get; set; } - - /// - /// Create a new DocumentOptions object. Methods can later be chained via a builder-like pattern - /// - public AddOptions() { } - - /// - /// Set the indexing language - /// - /// Set the indexing language - public AddOptions SetLanguage(string language) - { - Language = language; - return this; - } - /// - /// Whether document's contents should not be stored in the database. - /// - /// if enabled, the document is not stored on the server. This saves disk/memory space on the - /// server but prevents retrieving the document itself. - public AddOptions SetNoSave(bool enabled) - { - NoSave = enabled; - return this; - } - - /// - /// Indicate the behavior for the existing document. - /// - /// One of the replacement modes. - public AddOptions SetReplacementPolicy(ReplacementPolicy mode) - { - ReplacePolicy = mode; - return this; - } - } -} diff --git a/src/NRediSearch/Aggregation/AggregationBuilder.cs b/src/NRediSearch/Aggregation/AggregationBuilder.cs deleted file mode 100644 index f3dc4b47f..000000000 --- a/src/NRediSearch/Aggregation/AggregationBuilder.cs +++ /dev/null @@ -1,150 +0,0 @@ -// .NET port of https://github.com/RedisLabs/JRediSearch/ -using System.Collections.Generic; -using System.Linq; -using NRediSearch.Aggregation.Reducers; - -namespace NRediSearch.Aggregation -{ - public sealed class AggregationBuilder - { - private readonly List _args = new List(); - - public bool IsWithCursor { get; private set; } - - internal string GetArgsString() => string.Join(" ", _args); - - public AggregationBuilder(string query = "*") => _args.Add(query); - - public AggregationBuilder Load(params string[] fields) - { - AddCommandArguments(_args, "LOAD", fields); - - return this; - } - - public AggregationBuilder Limit(int offset, int count) - { - var limit = new Limit(offset, count); - - limit.SerializeRedisArgs(_args); - - return this; - } - - public AggregationBuilder Limit(int count) => Limit(0, count); - - public AggregationBuilder SortBy(params SortedField[] fields) - { - _args.Add("SORTBY"); - _args.Add(fields.Length * 2); - - foreach (var field in fields) - { - _args.Add(field.Field); - _args.Add(field.OrderAsArg()); - } - - return this; - } - - public AggregationBuilder SortBy(int max, params SortedField[] fields) - { - SortBy(fields); - - if (max > 0) - { - _args.Add("MAX"); - _args.Add(max); - } - - return this; - } - - public AggregationBuilder SortByAscending(string field) => SortBy(SortedField.Ascending(field)); - - public AggregationBuilder SortByDescending(string field) => SortBy(SortedField.Descending(field)); - - public AggregationBuilder Apply(string projection, string alias) - { - _args.Add("APPLY"); - _args.Add(projection); - _args.Add("AS"); - _args.Add(alias); - - return this; - } - - public AggregationBuilder GroupBy(IReadOnlyCollection fields, IReadOnlyCollection reducers) - { - var group = new Group(fields.ToArray()); - - foreach (var r in reducers) - { - group.Reduce(r); - } - - GroupBy(group); - - return this; - } - - public AggregationBuilder GroupBy(string field, params Reducer[] reducers) => GroupBy(new[] { field }, reducers); - - public AggregationBuilder GroupBy(Group group) - { - _args.Add("GROUPBY"); - - group.SerializeRedisArgs(_args); - - return this; - } - - public AggregationBuilder Filter(string expression) - { - _args.Add("FILTER"); - _args.Add(expression); - - return this; - } - - public AggregationBuilder Cursor(int count, long maxIdle) - { - IsWithCursor = true; - - if (count > 0) - { - _args.Add("WITHCURSOR"); - _args.Add("COUNT"); - _args.Add(count); - - if (maxIdle < long.MaxValue && maxIdle >= 0) - { - _args.Add("MAXIDLE"); - _args.Add(maxIdle); - } - } - - return this; - } - - internal void SerializeRedisArgs(List args) - { - foreach (var arg in _args) - { - args.Add(arg); - } - } - - private static void AddCommandLength(List list, string command, int length) - { - list.Add(command); - list.Add(length); - } - - private static void AddCommandArguments(List destination, string command, IReadOnlyCollection source) - { - AddCommandLength(destination, command, source.Count); - destination.AddRange(source); - } - } -} diff --git a/src/NRediSearch/Aggregation/AggregationRequest.cs b/src/NRediSearch/Aggregation/AggregationRequest.cs deleted file mode 100644 index e5ad26572..000000000 --- a/src/NRediSearch/Aggregation/AggregationRequest.cs +++ /dev/null @@ -1,168 +0,0 @@ -// .NET port of https://github.com/RedisLabs/JRediSearch/ - -using System; -using System.Collections.Generic; -using NRediSearch.Aggregation.Reducers; -using StackExchange.Redis; - -namespace NRediSearch.Aggregation -{ - public class AggregationRequest - { - private readonly string _query; - private readonly List _load = new List(); - private readonly List _groups = new List(); - private readonly List _sortby = new List(); - private readonly Dictionary _projections = new Dictionary(); - - private Limit _limit = new Limit(0, 0); - private int _sortByMax = 0; - public AggregationRequest(string query) - { - _query = query; - } - public AggregationRequest() : this("*") { } - - public AggregationRequest Load(string field) - { - _load.Add(field); - return this; - } - public AggregationRequest Load(params string[] fields) - { - _load.AddRange(fields); - return this; - } - - public AggregationRequest Limit(int offset, int count) - { - var limit = new Limit(offset, count); - if (_groups.Count == 0) - { - _limit = limit; - } - else - { - _groups[_groups.Count - 1].Limit(limit); - } - return this; - } - - public AggregationRequest Limit(int count) => Limit(0, count); - - public AggregationRequest SortBy(SortedField field) - { - _sortby.Add(field); - return this; - } - public AggregationRequest SortBy(params SortedField[] fields) - { - _sortby.AddRange(fields); - return this; - } - public AggregationRequest SortBy(IList fields, int max) - { - _sortby.AddRange(fields); - _sortByMax = max; - return this; - } - public AggregationRequest SortBy(SortedField field, int max) - { - _sortby.Add(field); - _sortByMax = max; - return this; - } - - public AggregationRequest SortBy(string field, Order order) => SortBy(new SortedField(field, order)); - public AggregationRequest SortByAscending(string field) => SortBy(field, Order.Ascending); - public AggregationRequest SortByDescending(string field) => SortBy(field, Order.Descending); - - public AggregationRequest Apply(string projection, string alias) - { - _projections.Add(alias, projection); - return this; - } - - public AggregationRequest GroupBy(IList fields, IList reducers) - { - Group g = new Group(fields); - foreach (var r in reducers) - { - g.Reduce(r); - } - _groups.Add(g); - return this; - } - - public AggregationRequest GroupBy(String field, params Reducer[] reducers) - { - return GroupBy(new string[] { field }, reducers); - } - - public AggregationRequest GroupBy(Group group) - { - _groups.Add(group); - return this; - } - - private static void AddCmdLen(List list, string cmd, int len) - { - list.Add(cmd.Literal()); - list.Add(len); - } - private static void AddCmdArgs(List dst, string cmd, IList src) - { - AddCmdLen(dst, cmd, src.Count); - foreach (var obj in src) - dst.Add(obj); - } - - internal void SerializeRedisArgs(List args) - { - args.Add(_query); - - if (_load.Count != 0) - { - AddCmdArgs(args, "LOAD", _load); - } - - if (_groups.Count != 0) - { - foreach (var group in _groups) - { - args.Add("GROUPBY".Literal()); - group.SerializeRedisArgs(args); - } - } - - if (_projections.Count != 0) - { - args.Add("APPLY".Literal()); - foreach (var e in _projections) - { - args.Add(e.Value); - args.Add("AS".Literal()); - args.Add(e.Key); - } - } - - if (_sortby.Count != 0) - { - args.Add("SORTBY".Literal()); - args.Add((_sortby.Count * 2).Boxed()); - foreach (var field in _sortby) - { - args.Add(field.Field); - args.Add(field.OrderAsArg()); - } - if (_sortByMax > 0) - { - args.Add("MAX".Literal()); - args.Add(_sortByMax.Boxed()); - } - } - - _limit.SerializeRedisArgs(args); - } - } -} diff --git a/src/NRediSearch/Aggregation/Group.cs b/src/NRediSearch/Aggregation/Group.cs deleted file mode 100644 index 31e29230f..000000000 --- a/src/NRediSearch/Aggregation/Group.cs +++ /dev/null @@ -1,50 +0,0 @@ -// .NET port of https://github.com/RedisLabs/JRediSearch/ - -using System.Collections.Generic; -using NRediSearch.Aggregation.Reducers; - -namespace NRediSearch.Aggregation -{ - public sealed class Group - { - private readonly IList _reducers = new List(); - private readonly IList _fields; - private Limit _limit = new Limit(0, 0); - - public Group(params string[] fields) => _fields = fields; - - public Group(IList fields) => _fields = fields; - - internal Group Limit(Limit limit) - { - _limit = limit; - return this; - } - - internal Group Reduce(Reducer r) - { - _reducers.Add(r); - return this; - } - - internal void SerializeRedisArgs(List args) - { - args.Add(_fields.Count.Boxed()); - foreach (var field in _fields) - args.Add(field); - foreach (var r in _reducers) - { - args.Add("REDUCE".Literal()); - args.Add(r.Name.Literal()); - r.SerializeRedisArgs(args); - var alias = r.Alias; - if (!string.IsNullOrEmpty(alias)) - { - args.Add("AS".Literal()); - args.Add(alias); - } - } - _limit.SerializeRedisArgs(args); - } - } -} diff --git a/src/NRediSearch/Aggregation/Limit.cs b/src/NRediSearch/Aggregation/Limit.cs deleted file mode 100644 index 05649d3de..000000000 --- a/src/NRediSearch/Aggregation/Limit.cs +++ /dev/null @@ -1,25 +0,0 @@ -// .NET port of https://github.com/RedisLabs/JRediSearch/ - -using System.Collections.Generic; - -namespace NRediSearch.Aggregation -{ - internal readonly struct Limit - { - private readonly int _offset, _count; - - public Limit(int offset, int count) - { - _offset = offset; - _count = count; - } - - internal void SerializeRedisArgs(List args) - { - if (_count == 0) return; - args.Add("LIMIT".Literal()); - args.Add(_offset.Boxed()); - args.Add(_count.Boxed()); - } - } -} diff --git a/src/NRediSearch/Aggregation/Reducers/Reducer.cs b/src/NRediSearch/Aggregation/Reducers/Reducer.cs deleted file mode 100644 index 559604c8b..000000000 --- a/src/NRediSearch/Aggregation/Reducers/Reducer.cs +++ /dev/null @@ -1,51 +0,0 @@ -// .NET port of https://github.com/RedisLabs/JRediSearch/ - -using System; -using System.Collections.Generic; - -namespace NRediSearch.Aggregation.Reducers -{ - // This class is normally received via one of the subclasses or via Reducers - public abstract class Reducer - { - public override string ToString() => Name; - private readonly string _field; - - internal Reducer(string field) => _field = field; - - /// - /// The name of the reducer - /// - public abstract string Name { get; } - - public string Alias { get; set; } - - public Reducer As(string alias) - { - Alias = alias; - return this; - } - public Reducer SetAliasAsField() - { - if (string.IsNullOrEmpty(_field)) throw new InvalidOperationException("Cannot set to field name since no field exists"); - return As(_field); - } - - protected virtual int GetOwnArgsCount() => _field == null ? 0 : 1; - protected virtual void AddOwnArgs(List args) - { - if (_field != null) args.Add(_field); - } - - internal void SerializeRedisArgs(List args) - { - int count = GetOwnArgsCount(); - args.Add(count.Boxed()); - int before = args.Count; - AddOwnArgs(args); - int after = args.Count; - if (count != (after - before)) - throw new InvalidOperationException($"Reducer '{ToString()}' incorrectly reported the arg-count as {count}, but added {after - before}"); - } - } -} diff --git a/src/NRediSearch/Aggregation/Reducers/Reducers.cs b/src/NRediSearch/Aggregation/Reducers/Reducers.cs deleted file mode 100644 index 457f97dd3..000000000 --- a/src/NRediSearch/Aggregation/Reducers/Reducers.cs +++ /dev/null @@ -1,103 +0,0 @@ -// .NET port of https://github.com/RedisLabs/JRediSearch/ - -using System.Collections.Generic; - -namespace NRediSearch.Aggregation.Reducers -{ - public static class Reducers - { - public static Reducer Count() => CountReducer.Instance; - private sealed class CountReducer : Reducer - { - internal static readonly Reducer Instance = new CountReducer(); - private CountReducer() : base(null) { } - public override string Name => "COUNT"; - } - - private sealed class SingleFieldReducer : Reducer - { - public override string Name { get; } - - internal SingleFieldReducer(string name, string field) : base(field) - { - Name = name; - } - } - - public static Reducer CountDistinct(string field) => new SingleFieldReducer("COUNT_DISTINCT", field); - - public static Reducer CountDistinctish(string field) => new SingleFieldReducer("COUNT_DISTINCTISH", field); - - public static Reducer Sum(string field) => new SingleFieldReducer("SUM", field); - - public static Reducer Min(string field) => new SingleFieldReducer("MIN", field); - - public static Reducer Max(string field) => new SingleFieldReducer("MAX", field); - - public static Reducer Avg(string field) => new SingleFieldReducer("AVG", field); - - public static Reducer StdDev(string field) => new SingleFieldReducer("STDDEV", field); - - public static Reducer Quantile(string field, double percentile) => new QuantileReducer(field, percentile); - - private sealed class QuantileReducer : Reducer - { - private readonly double _percentile; - public QuantileReducer(string field, double percentile) : base(field) - { - _percentile = percentile; - } - protected override int GetOwnArgsCount() => base.GetOwnArgsCount() + 1; - protected override void AddOwnArgs(List args) - { - base.AddOwnArgs(args); - args.Add(_percentile); - } - public override string Name => "QUANTILE"; - } - public static Reducer FirstValue(string field, SortedField sortBy) => new FirstValueReducer(field, sortBy); - private sealed class FirstValueReducer : Reducer - { - private readonly SortedField? _sortBy; - public FirstValueReducer(string field, SortedField? sortBy) : base(field) - { - _sortBy = sortBy; - } - public override string Name => "FIRST_VALUE"; - - protected override int GetOwnArgsCount() => base.GetOwnArgsCount() + (_sortBy.HasValue ? 3 : 0); - protected override void AddOwnArgs(List args) - { - base.AddOwnArgs(args); - if (_sortBy != null) - { - var sortBy = _sortBy.GetValueOrDefault(); - args.Add("BY".Literal()); - args.Add(sortBy.Field); - args.Add(sortBy.OrderAsArg()); - } - } - } - public static Reducer FirstValue(string field) => new FirstValueReducer(field, null); - - public static Reducer ToList(string field) => new SingleFieldReducer("TOLIST", field); - - public static Reducer RandomSample(string field, int size) => new RandomSampleReducer(field, size); - - private sealed class RandomSampleReducer : Reducer - { - private readonly int _size; - public RandomSampleReducer(string field, int size) : base(field) - { - _size = size; - } - public override string Name => "RANDOM_SAMPLE"; - protected override int GetOwnArgsCount() => base.GetOwnArgsCount() + 1; - protected override void AddOwnArgs(List args) - { - base.AddOwnArgs(args); - args.Add(_size.Boxed()); - } - } - } -} diff --git a/src/NRediSearch/Aggregation/Row.cs b/src/NRediSearch/Aggregation/Row.cs deleted file mode 100644 index a2adec522..000000000 --- a/src/NRediSearch/Aggregation/Row.cs +++ /dev/null @@ -1,24 +0,0 @@ -// .NET port of https://github.com/RedisLabs/JRediSearch/ - -using System.Collections.Generic; -using StackExchange.Redis; - -namespace NRediSearch.Aggregation -{ - public readonly struct Row - { - private readonly Dictionary _fields; - - internal Row(Dictionary fields) - { - _fields = fields; - } - - public bool ContainsKey(string key) => _fields.ContainsKey(key); - public RedisValue this[string key] => _fields.TryGetValue(key, out var result) ? result : RedisValue.Null; - - public string GetString(string key) => _fields.TryGetValue(key, out var result) ? (string)result : default; - public long GetInt64(string key) => _fields.TryGetValue(key, out var result) ? (long)result : default; - public double GetDouble(string key) => _fields.TryGetValue(key, out var result) ? (double)result : default; - } -} diff --git a/src/NRediSearch/Aggregation/SortedField.cs b/src/NRediSearch/Aggregation/SortedField.cs deleted file mode 100644 index 84705c0f8..000000000 --- a/src/NRediSearch/Aggregation/SortedField.cs +++ /dev/null @@ -1,23 +0,0 @@ -// .NET port of https://github.com/RedisLabs/JRediSearch/ - -using StackExchange.Redis; - -namespace NRediSearch.Aggregation -{ - public readonly struct SortedField - { - public SortedField(string field, Order order) - { - Field = field; - Order = order; - } - - public string Field { get; } - public Order Order { get; } - - internal object OrderAsArg() => (Order == Order.Ascending ? "ASC" : "DESC").Literal(); - - public static SortedField Ascending(string field) => new SortedField(field, Order.Ascending); - public static SortedField Descending(string field) => new SortedField(field, Order.Descending); - } -} diff --git a/src/NRediSearch/AggregationResult.cs b/src/NRediSearch/AggregationResult.cs deleted file mode 100644 index 2f757c3fc..000000000 --- a/src/NRediSearch/AggregationResult.cs +++ /dev/null @@ -1,47 +0,0 @@ -// .NET port of https://github.com/RedisLabs/JRediSearch/ - -using System.Collections.Generic; -using NRediSearch.Aggregation; -using StackExchange.Redis; - -namespace NRediSearch -{ - public sealed class AggregationResult - { - private readonly Dictionary[] _results; - - internal AggregationResult(RedisResult result, long cursorId = -1) - { - var arr = (RedisResult[])result; - - _results = new Dictionary[arr.Length - 1]; - for (int i = 1; i < arr.Length; i++) - { - var raw = (RedisResult[])arr[i]; - var cur = new Dictionary(); - for (int j = 0; j < raw.Length;) - { - var key = (string)raw[j++]; - var val = raw[j++]; - if (val.Type != ResultType.MultiBulk) - cur.Add(key, (RedisValue)val); - } - _results[i - 1] = cur; - } - - CursorId = cursorId; - } - public IReadOnlyList> GetResults() => _results; - - public Dictionary this[int index] - => index >= _results.Length ? null : _results[index]; - - public Row? GetRow(int index) - { - if (index >= _results.Length) return null; - return new Row(_results[index]); - } - - public long CursorId { get; } - } -} diff --git a/src/NRediSearch/AssemblyInfo.cs b/src/NRediSearch/AssemblyInfo.cs deleted file mode 100644 index c9fcce461..000000000 --- a/src/NRediSearch/AssemblyInfo.cs +++ /dev/null @@ -1,4 +0,0 @@ -// .NET port of https://github.com/RedisLabs/JRediSearch/ - -using System.Runtime.CompilerServices; -[assembly: InternalsVisibleTo("NRediSearch.Test, PublicKey=00240000048000009400000006020000002400005253413100040000010001007791a689e9d8950b44a9a8886baad2ea180e7a8a854f158c9b98345ca5009cdd2362c84f368f1c3658c132b3c0f74e44ff16aeb2e5b353b6e0fe02f923a050470caeac2bde47a2238a9c7125ed7dab14f486a5a64558df96640933b9f2b6db188fc4a820f96dce963b662fa8864adbff38e5b4542343f162ecdc6dad16912fff")] diff --git a/src/NRediSearch/Client.cs b/src/NRediSearch/Client.cs deleted file mode 100644 index 9b5861388..000000000 --- a/src/NRediSearch/Client.cs +++ /dev/null @@ -1,1405 +0,0 @@ -// .NET port of https://github.com/RedisLabs/JRediSearch/ - -using System; -using System.Collections.Generic; -using System.Threading.Tasks; -using NRediSearch.Aggregation; -using StackExchange.Redis; -using static NRediSearch.Schema; -using static NRediSearch.SuggestionOptions; - -namespace NRediSearch -{ - public sealed class Client - { - [Flags] - public enum IndexOptions - { - /// - /// All options disabled - /// - None = 0, - /// - /// Set this to tell the index not to save term offset vectors. This reduces memory consumption but does not - /// allow performing exact matches, and reduces overall relevance of multi-term queries - /// - UseTermOffsets = 1, - /// - /// If set (default), we keep flags per index record telling us what fields the term appeared on, - /// and allowing us to filter results by field - /// - KeepFieldFlags = 2, - /// - /// The default indexing options - use term offsets, keep fields flags, keep term frequencies - /// - Default = UseTermOffsets | KeepFieldFlags | KeepTermFrequencies, - /// - /// If set, we keep an index of the top entries per term, allowing extremely fast single word queries - /// regardless of index size, at the cost of more memory - /// - [Obsolete("'NOSCOREIDX' was removed from RediSearch.", true)] - UseScoreIndexes = 4, - /// - /// If set, we will disable the Stop-Words completely - /// - DisableStopWords = 8, - /// - /// If set, we keep an index of the top entries per term, allowing extremely fast single word queries - /// regardless of index size, at the cost of more memory - /// - KeepTermFrequencies = 16 - } - - public sealed class IndexDefinition - { - public enum IndexType - { - /// - /// Used to indicates that the index should follow the keys of type Hash changes - /// - Hash - } - - internal readonly IndexType _type = IndexType.Hash; - internal readonly bool _async; - internal readonly string[] _prefixes; - internal readonly string _filter; - internal readonly string _languageField; - internal readonly string _language; - internal readonly string _scoreFiled; - internal readonly double _score; - internal readonly string _payloadField; - - public IndexDefinition(bool async = false, string[] prefixes = null, - string filter = null, string languageField = null, string language = null, - string scoreFiled = null, double score = 1.0, string payloadField = null) - { - _async = async; - _prefixes = prefixes; - _filter = filter; - _languageField = languageField; - _language = language; - _scoreFiled = scoreFiled; - _score = score; - _payloadField = payloadField; - } - - internal void SerializeRedisArgs(List args) - { - args.Add("ON".Literal()); - args.Add(_type.ToString("g")); - if (_async) - { - args.Add("ASYNC".Literal()); - } - if (_prefixes?.Length > 0) - { - args.Add("PREFIX".Literal()); - args.Add(_prefixes.Length.ToString()); - args.AddRange(_prefixes); - } - if (_filter != null) - { - args.Add("FILTER".Literal()); - args.Add(_filter); - } - if (_languageField != null) { - args.Add("LANGUAGE_FIELD".Literal()); - args.Add(_languageField); - } - if (_language != null) { - args.Add("LANGUAGE".Literal()); - args.Add(_language); - } - if (_scoreFiled != null) { - args.Add("SCORE_FIELD".Literal()); - args.Add(_scoreFiled); - } - if (_score != 1.0) { - args.Add("SCORE".Literal()); - args.Add(_score.ToString()); - } - if (_payloadField != null) { - args.Add("PAYLOAD_FIELD".Literal()); - args.Add(_payloadField); - } - } - - } - - public sealed class ConfiguredIndexOptions - { - // This news up a enum which results in the 0 equivalent. - // It's not used in the library and I'm guessing this isn't intentional. - public static IndexOptions Default => new IndexOptions(); - - private IndexOptions _options; - private IndexDefinition _definition; - private string[] _stopwords; - - public ConfiguredIndexOptions(IndexOptions options = IndexOptions.Default) - { - _options = options; - } - - public ConfiguredIndexOptions(IndexDefinition definition, IndexOptions options = IndexOptions.Default) - : this(options) - { - _definition = definition; - } - - /// - /// Set a custom stopword list. - /// - /// The new stopwords to use. - public ConfiguredIndexOptions SetStopwords(params string[] stopwords) - { - _stopwords = stopwords ?? throw new ArgumentNullException(nameof(stopwords)); - if (stopwords.Length == 0) _options |= IndexOptions.DisableStopWords; - else _options &= ~IndexOptions.DisableStopWords; - return this; - } - - public ConfiguredIndexOptions SetNoStopwords() - { - _options |= IndexOptions.DisableStopWords; - - return this; - } - - internal void SerializeRedisArgs(List args) - { - SerializeRedisArgs(_options, args, _definition); - if (_stopwords?.Length > 0) - { - args.Add("STOPWORDS".Literal()); - args.Add(_stopwords.Length.Boxed()); - args.AddRange(_stopwords); - } - } - - internal static void SerializeRedisArgs(IndexOptions options, List args, IndexDefinition definition) - { - definition?.SerializeRedisArgs(args); - if ((options & IndexOptions.UseTermOffsets) == 0) - { - args.Add("NOOFFSETS".Literal()); - } - if ((options & IndexOptions.KeepFieldFlags) == 0) - { - args.Add("NOFIELDS".Literal()); - } - if ((options & IndexOptions.KeepTermFrequencies) == 0) - { - args.Add("NOFREQS".Literal()); - } - if ((options & IndexOptions.DisableStopWords) == IndexOptions.DisableStopWords) - { - args.Add("STOPWORDS".Literal()); - args.Add(0.Boxed()); - } - } - } - - private readonly IDatabaseAsync _db; - private IDatabase DbSync - => (_db as IDatabase) ?? throw new InvalidOperationException("Synchronous operations are not available on this database instance"); - - private readonly object _boxedIndexName; - public RedisKey IndexName => (RedisKey)_boxedIndexName; - public Client(RedisKey indexName, IDatabaseAsync db) - { - _db = db ?? throw new ArgumentNullException(nameof(db)); - _boxedIndexName = indexName; // only box once, not per-command - } - - public Client(RedisKey indexName, IDatabase db) : this(indexName, (IDatabaseAsync)db) { } - - /// - /// Create the index definition in redis - /// - /// a schema definition - /// index option flags - /// true if successful - public bool CreateIndex(Schema schema, ConfiguredIndexOptions options) - { - var args = new List - { - _boxedIndexName - }; - options.SerializeRedisArgs(args); - args.Add("SCHEMA".Literal()); - - foreach (var f in schema.Fields) - { - f.SerializeRedisArgs(args); - } - - return (string)DbSync.Execute("FT.CREATE", args) == "OK"; - } - - /// - /// Create the index definition in redis - /// - /// a schema definition - /// index option flags - /// true if successful - public async Task CreateIndexAsync(Schema schema, ConfiguredIndexOptions options) - { - var args = new List - { - _boxedIndexName - }; - options.SerializeRedisArgs(args); - args.Add("SCHEMA".Literal()); - - foreach (var f in schema.Fields) - { - f.SerializeRedisArgs(args); - } - - return (string)await _db.ExecuteAsync("FT.CREATE", args).ConfigureAwait(false) == "OK"; - } - - /// - /// Alter index add fields - /// - /// list of fields - /// `true` is successful - public bool AlterIndex(params Field[] fields) - { - var args = new List - { - _boxedIndexName, - "SCHEMA".Literal(), - "ADD".Literal() - }; - - foreach (var field in fields) - { - field.SerializeRedisArgs(args); - } - - return (string)DbSync.Execute("FT.ALTER", args) == "OK"; - } - - /// - /// Alter index add fields - /// - /// list of fields - /// `true` is successful - public async Task AlterIndexAsync(params Field[] fields) - { - var args = new List - { - _boxedIndexName, - "SCHEMA".Literal(), - "ADD".Literal() - }; - - foreach (var field in fields) - { - field.SerializeRedisArgs(args); - } - - return (string)(await _db.ExecuteAsync("FT.ALTER", args).ConfigureAwait(false)) == "OK"; - } - - /// - /// Search the index - /// - /// a object with the query string and optional parameters - /// a object with the results - public SearchResult Search(Query q) - { - var args = new List - { - _boxedIndexName - }; - q.SerializeRedisArgs(args); - - var resp = (RedisResult[])DbSync.Execute("FT.SEARCH", args); - return new SearchResult(resp, !q.NoContent, q.WithScores, q.WithPayloads, q.ExplainScore); - } - - /// - /// Search the index - /// - /// a object with the query string and optional parameters - /// a object with the results - public async Task SearchAsync(Query q) - { - var args = new List - { - _boxedIndexName - }; - q.SerializeRedisArgs(args); - - var resp = (RedisResult[])await _db.ExecuteAsync("FT.SEARCH", args).ConfigureAwait(false); - return new SearchResult(resp, !q.NoContent, q.WithScores, q.WithPayloads, q.ExplainScore); - } - - /// - /// Return Distinct Values in a TAG field - /// - /// TAG field name - /// List of TAG field values - public RedisValue[] TagVals(string fieldName) => - (RedisValue[])DbSync.Execute("FT.TAGVALS", _boxedIndexName, fieldName); - - /// - /// Return Distinct Values in a TAG field - /// - /// TAG field name - /// List of TAG field values - public async Task TagValsAsync(string fieldName) => - (RedisValue[])await _db.ExecuteAsync("FT.TAGVALS", _boxedIndexName, fieldName).ConfigureAwait(false); - - /// - /// Add a single document to the query - /// - /// the id of the document. It cannot belong to a document already in the index unless replace is set - /// the document's score, floating point number between 0 and 1 - /// a map of the document's fields - /// if set, we only index the document and do not save its contents. This allows fetching just doc ids - /// if set, and the document already exists, we reindex and update it - /// if set, we can save a payload in the index to be retrieved or evaluated by scoring functions on the server - public bool AddDocument(string docId, Dictionary fields, double score = 1.0, bool noSave = false, bool replace = false, byte[] payload = null) - { - var args = BuildAddDocumentArgs(docId, fields, score, noSave, replace, payload); - return (string)DbSync.Execute("FT.ADD", args) == "OK"; - } - - /// - /// Add a single document to the query - /// - /// the id of the document. It cannot belong to a document already in the index unless replace is set - /// the document's score, floating point number between 0 and 1 - /// a map of the document's fields - /// if set, we only index the document and do not save its contents. This allows fetching just doc ids - /// if set, and the document already exists, we reindex and update it - /// if set, we can save a payload in the index to be retrieved or evaluated by scoring functions on the server - /// true if the operation succeeded, false otherwise - public async Task AddDocumentAsync(string docId, Dictionary fields, double score = 1.0, bool noSave = false, bool replace = false, byte[] payload = null) - { - var args = BuildAddDocumentArgs(docId, fields, score, noSave, replace, payload); - - try - { - return (string)await _db.ExecuteAsync("FT.ADD", args).ConfigureAwait(false) == "OK"; - } - catch (RedisServerException ex) when (ex.Message == "Document already in index") - { - return false; - } - } - - /// - /// Add a document to the index - /// - /// The document to add - /// Options for the operation - /// true if the operation succeeded, false otherwise - public bool AddDocument(Document doc, AddOptions options = null) - { - var args = BuildAddDocumentArgs(doc.Id, doc._properties, doc.Score, options?.NoSave ?? false, options?.ReplacePolicy ?? AddOptions.ReplacementPolicy.None, doc.Payload, options?.Language); - - try - { - return (string)DbSync.Execute("FT.ADD", args) == "OK"; - } - catch (RedisServerException ex) when (ex.Message == "Document already in index" || ex.Message == "Document already exists") - { - return false; - } - - } - - /// - /// Add a document to the index - /// - /// The document to add - /// Options for the operation - /// true if the operation succeeded, false otherwise. Note that if the operation fails, an exception will be thrown - public async Task AddDocumentAsync(Document doc, AddOptions options = null) - { - var args = BuildAddDocumentArgs(doc.Id, doc._properties, doc.Score, options?.NoSave ?? false, options?.ReplacePolicy ?? AddOptions.ReplacementPolicy.None, doc.Payload, options?.Language); - return (string)await _db.ExecuteAsync("FT.ADD", args).ConfigureAwait(false) == "OK"; - } - - /// - /// Add a batch of documents to the index. - /// - /// The documents to add - /// `true` on success for each document - public bool[] AddDocuments(params Document[] documents) => - AddDocuments(new AddOptions(), documents); - - /// - /// Add a batch of documents to the index - /// - /// Options for the operation - /// The documents to add - /// `true` on success for each document - public bool[] AddDocuments(AddOptions options, params Document[] documents) - { - var result = new bool[documents.Length]; - - for (var i = 0; i < documents.Length; i++) - { - result[i] = AddDocument(documents[i], options); - } - - return result; - } - - /// - /// Add a batch of documents to the index. - /// - /// The documents to add - /// `true` on success for each document - public Task AddDocumentsAsync(params Document[] documents) => - AddDocumentsAsync(new AddOptions(), documents); - - /// - /// Add a batch of documents to the index - /// - /// Options for the operation - /// The documents to add - /// `true` on success for each document - public async Task AddDocumentsAsync(AddOptions options, params Document[] documents) - { - var result = new bool[documents.Length]; - - for (var i = 0; i < documents.Length; i++) - { - result[i] = await AddDocumentAsync(documents[i], options); - } - - return result; - } - - private List BuildAddDocumentArgs(string docId, Dictionary fields, double score, bool noSave, bool replace, byte[] payload) - => BuildAddDocumentArgs(docId, fields, score, noSave, replace ? AddOptions.ReplacementPolicy.Full : AddOptions.ReplacementPolicy.None, payload, null); - private List BuildAddDocumentArgs(string docId, Dictionary fields, double score, bool noSave, AddOptions.ReplacementPolicy replacementPolicy, byte[] payload, string language) - { - var args = new List { _boxedIndexName, docId, score }; - if (noSave) - { - args.Add("NOSAVE".Literal()); - } - if (replacementPolicy != AddOptions.ReplacementPolicy.None) - { - args.Add("REPLACE".Literal()); - if (replacementPolicy == AddOptions.ReplacementPolicy.Partial) - { - args.Add("PARTIAL".Literal()); - } - } - if (!string.IsNullOrWhiteSpace(language)) - { - args.Add("LANGUAGE".Literal()); - args.Add(language); - } - - if (payload != null) - { - args.Add("PAYLOAD".Literal()); - args.Add(payload); - } - - args.Add("FIELDS".Literal()); - foreach (var ent in fields) - { - args.Add(ent.Key); - args.Add(ent.Value); - } - return args; - } - - /// - /// Convenience method for calling AddDocument with replace=true. - /// - /// The ID of the document to replce. - /// The document fields. - /// The new score. - /// The new payload. - public bool ReplaceDocument(string docId, Dictionary fields, double score = 1.0, byte[] payload = null) - => AddDocument(docId, fields, score, false, true, payload); - - /// - /// Convenience method for calling AddDocumentAsync with replace=true. - /// - /// The ID of the document to replce. - /// The document fields. - /// The new score. - /// The new payload. - public Task ReplaceDocumentAsync(string docId, Dictionary fields, double score = 1.0, byte[] payload = null) - => AddDocumentAsync(docId, fields, score, false, true, payload); - - /// - /// Index a document already in redis as a HASH key. - /// [Deprecated] Use IDatabase.HashSet instead. - /// - /// the id of the document in redis. This must match an existing, unindexed HASH key - /// the document's index score, between 0 and 1 - /// if set, and the document already exists, we reindex and update it - /// true on success - [Obsolete("Use IDatabase.HashSet instead.")] - public bool AddHash(string docId, double score, bool replace) => AddHash((RedisKey)docId, score, replace); - - /// - /// Index a document already in redis as a HASH key. - /// [Deprecated] Use IDatabase.HashSet instead. - /// - /// the id of the document in redis. This must match an existing, unindexed HASH key - /// the document's index score, between 0 and 1 - /// if set, and the document already exists, we reindex and update it - /// true on success - [Obsolete("Use IDatabase.HashSet instead.")] - public bool AddHash(RedisKey docId, double score, bool replace) - { - var args = new List { _boxedIndexName, docId, score }; - if (replace) - { - args.Add("REPLACE".Literal()); - } - return (string)DbSync.Execute("FT.ADDHASH", args) == "OK"; - } - - /// - /// Index a document already in redis as a HASH key. - /// [Deprecated] Use IDatabase.HashSet instead. - /// - /// the id of the document in redis. This must match an existing, unindexed HASH key - /// the document's index score, between 0 and 1 - /// if set, and the document already exists, we reindex and update it - /// true on success - [Obsolete("Use IDatabase.HashSet instead.")] - public Task AddHashAsync(string docId, double score, bool replace) => AddHashAsync((RedisKey)docId, score, replace); - - /// - /// Index a document already in redis as a HASH key. - /// [Deprecated] Use IDatabase.HashSet instead. - /// - /// the id of the document in redis. This must match an existing, unindexed HASH key - /// the document's index score, between 0 and 1 - /// if set, and the document already exists, we reindex and update it - /// true on success - [Obsolete("Use IDatabase.HashSet instead.")] - public async Task AddHashAsync(RedisKey docId, double score, bool replace) - { - var args = new List { _boxedIndexName, docId, score }; - if (replace) - { - args.Add("REPLACE".Literal()); - } - return (string)await _db.ExecuteAsync("FT.ADDHASH", args).ConfigureAwait(false) == "OK"; - } - - /// - /// Get the index info, including memory consumption and other statistics - /// - /// a map of key/value pairs - public Dictionary GetInfo() => - ParseGetInfo(DbSync.Execute("FT.INFO", _boxedIndexName)); - - /// - /// Get the index info, including memory consumption and other statistics - /// - /// a map of key/value pairs - public async Task> GetInfoAsync() => - ParseGetInfo(await _db.ExecuteAsync("FT.INFO", _boxedIndexName).ConfigureAwait(false)); - - private static Dictionary ParseGetInfo(RedisResult value) - { - var res = (RedisResult[])value; - var info = new Dictionary(); - for (int i = 0; i < res.Length; i += 2) - { - var val = res[i + 1]; - if (val.Type != ResultType.MultiBulk) - { - info.Add((string)res[i], (RedisValue)val); - } - } - return info; - } - - /// - /// Get the index info, including memory consumption and other statistics. - /// - /// An `InfoResult` object with parsed values from the FT.INFO command. - public InfoResult GetInfoParsed() => - new InfoResult(DbSync.Execute("FT.INFO", _boxedIndexName)); - - - - /// - /// Get the index info, including memory consumption and other statistics. - /// - /// An `InfoResult` object with parsed values from the FT.INFO command. - public async Task GetInfoParsedAsync() => - new InfoResult(await _db.ExecuteAsync("FT.INFO", _boxedIndexName).ConfigureAwait(false)); - - /// - /// Delete a document from the index. - /// - /// the document's id - /// if true also deletes the actual document if it is in the index - /// true if it has been deleted, false if it did not exist - public bool DeleteDocument(string docId, bool deleteDocument = false) - { - var args = new List - { - _boxedIndexName, - docId - }; - - if (deleteDocument) - { - args.Add("DD".Literal()); - } - - return (long)DbSync.Execute("FT.DEL", args) == 1; - } - - /// - /// Delete a document from the index. - /// - /// the document's id - /// the document's id - /// true if it has been deleted, false if it did not exist - public async Task DeleteDocumentAsync(string docId, bool deleteDocument = false) - { - var args = new List - { - _boxedIndexName, - docId - }; - - if (deleteDocument) - { - args.Add("DD".Literal()); - } - - return (long)await _db.ExecuteAsync("FT.DEL", args).ConfigureAwait(false) == 1; - } - - /// - /// Delete multiple documents from an index. - /// - /// if true also deletes the actual document ifs it is in the index - /// the document ids to delete - /// true on success for each document if it has been deleted, false if it did not exist - public bool[] DeleteDocuments(bool deleteDocuments, params string[] docIds) - { - var result = new bool[docIds.Length]; - - for (var i = 0; i < docIds.Length; i++) - { - result[i] = DeleteDocument(docIds[i], deleteDocuments); - } - - return result; - } - - /// - /// Delete multiple documents from an index. - /// - /// if true also deletes the actual document ifs it is in the index - /// the document ids to delete - /// true on success for each document if it has been deleted, false if it did not exist - public async Task DeleteDocumentsAsync(bool deleteDocuments, params string[] docIds) - { - var result = new bool[docIds.Length]; - - for (var i = 0; i < docIds.Length; i++) - { - result[i] = await DeleteDocumentAsync(docIds[i], deleteDocuments); - } - - return result; - } - - /// - /// Drop the index and all associated keys, including documents - /// - /// true on success - public bool DropIndex() - { - return (string)DbSync.Execute("FT.DROP", _boxedIndexName) == "OK"; - } - /// - /// Drop the index and all associated keys, including documents - /// - /// true on success - public async Task DropIndexAsync() - { - return (string)await _db.ExecuteAsync("FT.DROP", _boxedIndexName).ConfigureAwait(false) == "OK"; - } - - /// - /// [Deprecated] Optimize memory consumption of the index by removing extra saved capacity. This does not affect speed - /// - [Obsolete("Index optimizations are done by the internal garbage collector in the background.")] - public long OptimizeIndex() - { - return default; - } - - /// - /// [Deprecated] Optimize memory consumption of the index by removing extra saved capacity. This does not affect speed - /// - [Obsolete("Index optimizations are done by the internal garbage collector in the background.")] - public Task OptimizeIndexAsync() - { - return Task.FromResult(default(long)); - } - - /// - /// Get the size of an autoc-complete suggestion dictionary - /// - public long CountSuggestions() - => (long)DbSync.Execute("FT.SUGLEN", _boxedIndexName); - - /// - /// Get the size of an autoc-complete suggestion dictionary - /// - public async Task CountSuggestionsAsync() - => (long)await _db.ExecuteAsync("FT.SUGLEN", _boxedIndexName).ConfigureAwait(false); - - /// - /// Add a suggestion string to an auto-complete suggestion dictionary. This is disconnected from the index definitions, and leaves creating and updating suggestino dictionaries to the user. - /// - /// the Suggestion to be added - /// if set, we increment the existing entry of the suggestion by the given score, instead of replacing the score. This is useful for updating the dictionary based on user queries in real time - /// the current size of the suggestion dictionary. - public long AddSuggestion(Suggestion suggestion, bool increment = false) - { - var args = new List - { - _boxedIndexName, - suggestion.String, - suggestion.Score - }; - - if (increment) - { - args.Add("INCR".Literal()); - } - - if (suggestion.Payload != null) - { - args.Add("PAYLOAD".Literal()); - args.Add(suggestion.Payload); - } - - return (long)DbSync.Execute("FT.SUGADD", args); - } - - /// - /// Add a suggestion string to an auto-complete suggestion dictionary. This is disconnected from the index definitions, and leaves creating and updating suggestino dictionaries to the user. - /// - /// the Suggestion to be added - /// if set, we increment the existing entry of the suggestion by the given score, instead of replacing the score. This is useful for updating the dictionary based on user queries in real time - /// the current size of the suggestion dictionary. - public async Task AddSuggestionAsync(Suggestion suggestion, bool increment = false) - { - var args = new List - { - _boxedIndexName, - suggestion.String, - suggestion.Score - }; - - if (increment) - { - args.Add("INCR".Literal()); - } - - if (suggestion.Payload != null) - { - args.Add("PAYLOAD".Literal()); - args.Add(suggestion.Payload); - } - - return (long)await _db.ExecuteAsync("FT.SUGADD", args).ConfigureAwait(false); - } - - /// - /// Delete a string from a suggestion index. - /// - /// the string to delete - public bool DeleteSuggestion(string value) - => (long)DbSync.Execute("FT.SUGDEL", _boxedIndexName, value) == 1; - - /// - /// Delete a string from a suggestion index. - /// - /// the string to delete - public async Task DeleteSuggestionAsync(string value) - => (long)await _db.ExecuteAsync("FT.SUGDEL", _boxedIndexName, value).ConfigureAwait(false) == 1; - - /// - /// Get completion suggestions for a prefix - /// - /// the prefix to complete on - /// if set,we do a fuzzy prefix search, including prefixes at levenshtein distance of 1 from the prefix sent - /// If set, we limit the results to a maximum of num. (Note: The default is 5, and the number cannot be greater than 10). - /// a list of the top suggestions matching the prefix - public string[] GetSuggestions(string prefix, bool fuzzy = false, int max = 5) - { - var optionsBuilder = SuggestionOptions.Builder.Max(max); - - if (fuzzy) - { - optionsBuilder.Fuzzy(); - } - - var suggestions = GetSuggestions(prefix, optionsBuilder.Build()); - - var result = new string[suggestions.Length]; - - for (var i = 0; i < suggestions.Length; i++) - { - result[i] = suggestions[i].String; - } - - return result; - } - - /// - /// Get completion suggestions for a prefix - /// - /// the prefix to complete on - /// the options on what you need returned and other usage - /// a list of the top suggestions matching the prefix - public Suggestion[] GetSuggestions(string prefix, SuggestionOptions options) - { - var args = new List - { - _boxedIndexName, - prefix, - "MAX".Literal(), - options.Max.Boxed() - }; - - if (options.Fuzzy) - { - args.Add("FUZZY".Literal()); - } - - if (options.With != WithOptions.None) - { - args.AddRange(options.GetFlags()); - } - - var results = (RedisResult[])DbSync.Execute("FT.SUGGET", args); - - if (options.With == WithOptions.None) - { - return GetSuggestionsNoOptions(results); - } - - if (options.GetIsPayloadAndScores()) - { - return GetSuggestionsWithPayloadAndScores(results); - } - - if (options.GetIsPayload()) - { - return GetSuggestionsWithPayload(results); - } - - if (options.GetIsScores()) - { - return GetSuggestionsWithScores(results); - } - - return default; - } - - /// - /// Get completion suggestions for a prefix - /// - /// the prefix to complete on - /// if set,we do a fuzzy prefix search, including prefixes at levenshtein distance of 1 from the prefix sent - /// If set, we limit the results to a maximum of num. (Note: The default is 5, and the number cannot be greater than 10). - /// a list of the top suggestions matching the prefix - public async Task GetSuggestionsAsync(string prefix, bool fuzzy = false, int max = 5) - { - var optionsBuilder = SuggestionOptions.Builder.Max(max); - - if (fuzzy) - { - optionsBuilder.Fuzzy(); - } - - var suggestions = await GetSuggestionsAsync(prefix, optionsBuilder.Build()); - - var result = new string[suggestions.Length]; - - for(var i = 0; i < suggestions.Length; i++) - { - result[i] = suggestions[i].String; - } - - return result; - } - - - /// - /// Get completion suggestions for a prefix - /// - /// the prefix to complete on - /// the options on what you need returned and other usage - /// a list of the top suggestions matching the prefix - public async Task GetSuggestionsAsync(string prefix, SuggestionOptions options) - { - var args = new List - { - _boxedIndexName, - prefix, - "MAX".Literal(), - options.Max.Boxed() - }; - - if (options.Fuzzy) - { - args.Add("FUZZY".Literal()); - } - - if (options.With != WithOptions.None) - { - args.AddRange(options.GetFlags()); - } - - var results = (RedisResult[])await _db.ExecuteAsync("FT.SUGGET", args).ConfigureAwait(false); - - if (options.With == WithOptions.None) - { - return GetSuggestionsNoOptions(results); - } - - if (options.GetIsPayloadAndScores()) - { - return GetSuggestionsWithPayloadAndScores(results); - } - - if (options.GetIsPayload()) - { - return GetSuggestionsWithPayload(results); - } - - if (options.GetIsScores()) - { - return GetSuggestionsWithScores(results); - } - - return default; - } - - /// - /// Perform an aggregate query - /// - /// The query to watch - [Obsolete("Use `Aggregate` method that takes an `AggregationBuilder`.")] - public AggregationResult Aggregate(AggregationRequest query) - { - var args = new List - { - _boxedIndexName - }; - query.SerializeRedisArgs(args); - - var resp = DbSync.Execute("FT.AGGREGATE", args); - - return new AggregationResult(resp); - } - - /// - /// Perform an aggregate query - /// - /// The query to watch - [Obsolete("Use `AggregateAsync` method that takes an `AggregationBuilder`.")] - public async Task AggregateAsync(AggregationRequest query) - { - var args = new List - { - _boxedIndexName - }; - query.SerializeRedisArgs(args); - - var resp = await _db.ExecuteAsync("FT.AGGREGATE", args).ConfigureAwait(false); - - return new AggregationResult(resp); - } - - /// - /// Perform an aggregate query - /// - /// The query to watch - public AggregationResult Aggregate(AggregationBuilder query) - { - var args = new List - { - _boxedIndexName - }; - - query.SerializeRedisArgs(args); - - var resp = DbSync.Execute("FT.AGGREGATE", args); - - if (query.IsWithCursor) - { - var respArray = (RedisResult[])resp; - - return new AggregationResult(respArray[0], (long)respArray[1]); - } - else - { - return new AggregationResult(resp); - } - } - - /// - /// Perform an aggregate query - /// - /// The query to watch - public async Task AggregateAsync(AggregationBuilder query) - { - var args = new List - { - _boxedIndexName - }; - - query.SerializeRedisArgs(args); - - var resp = await _db.ExecuteAsync("FT.AGGREGATE", args).ConfigureAwait(false); - - if (query.IsWithCursor) - { - var respArray = (RedisResult[])resp; - - return new AggregationResult(respArray[0], (long)respArray[1]); - } - else - { - return new AggregationResult(resp); - } - } - - /// - /// Read from an existing aggregate cursor. - /// - /// The cursor's ID. - /// Limit the amount of returned results. - /// A AggregationResult object with the results - public AggregationResult CursorRead(long cursorId, int count = -1) - { - var args = new List - { - "READ", - _boxedIndexName, - cursorId - - }; - - if (count > -1) - { - args.Add("COUNT"); - args.Add(count); - } - - RedisResult[] resp = (RedisResult[])DbSync.Execute("FT.CURSOR", args); - - return new AggregationResult(resp[0], (long)resp[1]); - } - - /// - /// Read from an existing aggregate cursor. - /// - /// The cursor's ID. - /// Limit the amount of returned results. - /// A AggregationResult object with the results - public async Task CursorReadAsync(long cursorId, int count) - { - var args = new List - { - "READ", - _boxedIndexName, - cursorId - - }; - - if (count > -1) - { - args.Add("COUNT"); - args.Add(count); - } - - RedisResult[] resp = (RedisResult[])(await _db.ExecuteAsync("FT.CURSOR", args).ConfigureAwait(false)); - - return new AggregationResult(resp[0], (long)resp[1]); - } - - /// - /// Delete a cursor from the index. - /// - /// The cursor's ID. - /// `true` if it has been deleted, `false` if it did not exist. - public bool CursorDelete(long cursorId) - { - var args = new List - { - "DEL", - _boxedIndexName, - cursorId - }; - - return (string)DbSync.Execute("FT.CURSOR", args) == "OK"; - } - - /// - /// Delete a cursor from the index. - /// - /// The cursor's ID. - /// `true` if it has been deleted, `false` if it did not exist. - public async Task CursorDeleteAsync(long cursorId) - { - var args = new List - { - "DEL", - _boxedIndexName, - cursorId - }; - - return (string)(await _db.ExecuteAsync("FT.CURSOR", args).ConfigureAwait(false)) == "OK"; - } - - /// - /// Generate an explanatory textual query tree for this query string - /// - /// The query to explain - /// A string describing this query - public string Explain(Query q) - { - var args = new List - { - _boxedIndexName - }; - q.SerializeRedisArgs(args); - return (string)DbSync.Execute("FT.EXPLAIN", args); - } - - /// - /// Generate an explanatory textual query tree for this query string - /// - /// The query to explain - /// A string describing this query - public async Task ExplainAsync(Query q) - { - var args = new List - { - _boxedIndexName - }; - q.SerializeRedisArgs(args); - return (string)await _db.ExecuteAsync("FT.EXPLAIN", args).ConfigureAwait(false); - } - - /// - /// Get a document from the index. - /// - /// The document ID to retrieve. - /// The document as stored in the index. If the document does not exist, null is returned. - public Document GetDocument(string docId) - => Document.Parse(docId, DbSync.Execute("FT.GET", _boxedIndexName, docId)); - - /// - /// Get a document from the index. - /// - /// The document ID to retrieve. - /// The document as stored in the index. If the document does not exist, null is returned. - public async Task GetDocumentAsync(string docId) - => Document.Parse(docId, await _db.ExecuteAsync("FT.GET", _boxedIndexName, docId).ConfigureAwait(false)); - - /// - /// Gets a series of documents from the index. - /// - /// The document IDs to retrieve. - /// The documents stored in the index. If the document does not exist, null is returned in the list. - public Document[] GetDocuments(params string[] docIds) - { - if (docIds.Length == 0) - { - return Array.Empty(); - } - - var args = new List - { - _boxedIndexName - }; - - foreach (var docId in docIds) - { - args.Add(docId); - } - - var queryResults = (RedisResult[])DbSync.Execute("FT.MGET", args); - - var result = new Document[docIds.Length]; - - for (var i = 0; i < docIds.Length; i++) - { - var queryResult = queryResults[i]; - - if (queryResult.IsNull) - { - result[i] = null; - } - else - { - result[i] = Document.Parse(docIds[i], queryResult); - } - } - - return result; - } - - /// - /// Gets a series of documents from the index. - /// - /// The document IDs to retrieve. - /// The documents stored in the index. If the document does not exist, null is returned in the list. - public async Task GetDocumentsAsync(params string[] docIds) - { - if (docIds.Length == 0) - { - return new Document[] { }; - } - - var args = new List - { - _boxedIndexName - }; - - foreach (var docId in docIds) - { - args.Add(docId); - } - - var queryResults = (RedisResult[])await _db.ExecuteAsync("FT.MGET", args).ConfigureAwait(false); - - var result = new Document[docIds.Length]; - - for (var i = 0; i < docIds.Length; i++) - { - var queryResult = queryResults[i]; - - if (queryResult.IsNull) - { - result[i] = null; - } - else - { - result[i] = Document.Parse(docIds[i], queryResult); - } - } - - return result; - } - - /// - /// Replace specific fields in a document. Unlike #replaceDocument(), fields not present in the field list - /// are not erased, but retained. This avoids reindexing the entire document if the new values are not - /// indexed (though a reindex will happen). - /// - /// The ID of the document. - /// The fields and values to update. - /// The new score of the document. - public bool UpdateDocument(string docId, Dictionary fields, double score = 1.0) - { - var args = BuildAddDocumentArgs(docId, fields, score, false, AddOptions.ReplacementPolicy.Partial, null, null); - return (string)DbSync.Execute("FT.ADD", args) == "OK"; - } - - /// - /// Replace specific fields in a document. Unlike #replaceDocument(), fields not present in the field list - /// are not erased, but retained. This avoids reindexing the entire document if the new values are not - /// indexed (though a reindex will happen - /// - /// The ID of the document. - /// The fields and values to update. - /// The new score of the document. - public async Task UpdateDocumentAsync(string docId, Dictionary fields, double score = 1.0) - { - var args = BuildAddDocumentArgs(docId, fields, score, false, AddOptions.ReplacementPolicy.Partial, null, null); - return (string)await _db.ExecuteAsync("FT.ADD", args).ConfigureAwait(false) == "OK"; - } - - private static Suggestion[] GetSuggestionsNoOptions(RedisResult[] results) - { - var suggestions = new Suggestion[results.Length]; - - for (var i = 0; i < results.Length; i++) - { - suggestions[i] = Suggestion.Builder.String((string)results[i]).Build(true); - } - - return suggestions; - } - - private static Suggestion[] GetSuggestionsWithPayloadAndScores(RedisResult[] results) - { - var suggestions = new Suggestion[results.Length / 3]; - - for (var i = 3; i <= results.Length; i += 3) - { - var suggestion = Suggestion.Builder; - - suggestion.String((string)results[i - 3]); - suggestion.Score((double)results[i - 2]); - suggestion.Payload((string)results[i - 1]); - - suggestions[(i / 3) - 1] = suggestion.Build(true); - } - - return suggestions; - } - - private static Suggestion[] GetSuggestionsWithPayload(RedisResult[] results) - { - var suggestions = new Suggestion[results.Length / 2]; - - for (var i = 2; i <= results.Length; i += 2) - { - var suggestion = Suggestion.Builder; - - suggestion.String((string)results[i - 2]); - suggestion.Payload((string)results[i - 1]); - - suggestions[(i / 2) - 1] = suggestion.Build(true); - } - - return suggestions; - } - - private static Suggestion[] GetSuggestionsWithScores(RedisResult[] results) - { - var suggestions = new Suggestion[results.Length / 2]; - - for (var i = 2; i <= results.Length; i += 2) - { - var suggestion = Suggestion.Builder; - - suggestion.String((string)results[i - 2]); - suggestion.Score((double)results[i - 1]); - - suggestions[(i / 2) - 1] = suggestion.Build(true); - } - - return suggestions; - } - } -} diff --git a/src/NRediSearch/Document.cs b/src/NRediSearch/Document.cs deleted file mode 100644 index f7118e572..000000000 --- a/src/NRediSearch/Document.cs +++ /dev/null @@ -1,83 +0,0 @@ -// .NET port of https://github.com/RedisLabs/JRediSearch/ - -using System.Collections.Generic; -using StackExchange.Redis; - -namespace NRediSearch -{ - /// - /// Document represents a single indexed document or entity in the engine - /// - public class Document - { - public string Id { get; } - public double Score { get; } - public byte[] Payload { get; } - public string[] ScoreExplained { get; private set; } - internal readonly Dictionary _properties; - public Document(string id, double score, byte[] payload) : this(id, null, score, payload) { } - public Document(string id) : this(id, null, 1.0, null) { } - - public Document(string id, Dictionary fields, double score = 1.0) : this(id, fields, score, null) { } - - public Document(string id, Dictionary fields, double score, byte[] payload) - { - Id = id; - _properties = fields ?? new Dictionary(); - Score = score; - Payload = payload; - } - - public IEnumerable> GetProperties() => _properties; - - public static Document Load(string id, double score, byte[] payload, RedisValue[] fields) - { - Document ret = new Document(id, score, payload); - if (fields != null) - { - for (int i = 0; i < fields.Length; i += 2) - { - ret[(string)fields[i]] = fields[i + 1]; - } - } - return ret; - } - - public static Document Load(string id, double score, byte[] payload, RedisValue[] fields, string[] scoreExplained) - { - Document ret = Document.Load(id, score, payload, fields); - if (scoreExplained != null) - { - ret.ScoreExplained = scoreExplained; - } - return ret; - } - - public RedisValue this[string key] - { - get { return _properties.TryGetValue(key, out var val) ? val : default(RedisValue); } - internal set { _properties[key] = value; } - } - - public bool HasProperty(string key) => _properties.ContainsKey(key); - - internal static Document Parse(string docId, RedisResult result) - { - if (result == null || result.IsNull) return null; - var arr = (RedisResult[])result; - var doc = new Document(docId); - - for(int i = 0; i < arr.Length; ) - { - doc[(string)arr[i++]] = (RedisValue)arr[i++]; - } - return doc; - } - - public Document Set(string field, RedisValue value) - { - this[field] = value; - return this; - } - } -} diff --git a/src/NRediSearch/Extensions.cs b/src/NRediSearch/Extensions.cs deleted file mode 100644 index e8770a287..000000000 --- a/src/NRediSearch/Extensions.cs +++ /dev/null @@ -1,38 +0,0 @@ -// .NET port of https://github.com/RedisLabs/JRediSearch/ - -using System; -using System.Globalization; -using StackExchange.Redis; - -namespace NRediSearch -{ - public static class Extensions - { - internal static string AsRedisString(this double value, bool forceDecimal = false) - { - if (double.IsNegativeInfinity(value)) - { - return "-inf"; - } - else if (double.IsPositiveInfinity(value)) - { - return "inf"; - } - else - { - return value.ToString(forceDecimal ? "#.0" : "G17", NumberFormatInfo.InvariantInfo); - } - } - internal static string AsRedisString(this GeoUnit value) - { - switch (value) - { - case GeoUnit.Feet: return "ft"; - case GeoUnit.Kilometers: return "km"; - case GeoUnit.Meters: return "m"; - case GeoUnit.Miles: return "mi"; - default: throw new InvalidOperationException($"Unknown unit: {value}"); - } - } - } -} diff --git a/src/NRediSearch/InfoResult.cs b/src/NRediSearch/InfoResult.cs deleted file mode 100644 index 8798a813d..000000000 --- a/src/NRediSearch/InfoResult.cs +++ /dev/null @@ -1,127 +0,0 @@ -using System.Collections.Generic; -using StackExchange.Redis; - -namespace NRediSearch -{ - public class InfoResult - { - private readonly Dictionary _all = new Dictionary(); - - public string IndexName => GetString("index_name"); - - public Dictionary Fields => GetRedisResultsDictionary("fields"); - - public long NumDocs => GetLong("num_docs"); - - public long NumTerms => GetLong("num_terms"); - - public long NumRecords => GetLong("num_records"); - - public double InvertedSzMebibytes => GetDouble("inverted_sz_mb"); - - public double InvertedCapMebibytes => GetDouble("inverted_cap_mb"); - - public double InvertedCapOvh => GetDouble("inverted_cap_ovh"); - - public double OffsetVectorsSzMebibytes => GetDouble("offset_vectors_sz_mb"); - - public double SkipIndexSizeMebibytes => GetDouble("skip_index_size_mb"); - - public double ScoreIndexSizeMebibytes => GetDouble("score_index_size_mb"); - - public double RecordsPerDocAvg => GetDouble("records_per_doc_avg"); - - public double BytesPerRecordAvg => GetDouble("bytes_per_record_avg"); - - public double OffsetsPerTermAvg => GetDouble("offsets_per_term_avg"); - - public double OffsetBitsPerRecordAvg => GetDouble("offset_bits_per_record_avg"); - - public string MaxDocId => GetString("max_doc_id"); - - public double DocTableSizeMebibytes => GetDouble("doc_table_size_mb"); - - public double SortableValueSizeMebibytes => GetDouble("sortable_value_size_mb"); - - public double KeyTableSizeMebibytes => GetDouble("key_table_size_mb"); - - public Dictionary GcStats => GetRedisResultDictionary("gc_stats"); - - public Dictionary CursorStats => GetRedisResultDictionary("cursor_stats"); - - public InfoResult(RedisResult result) - { - var results = (RedisResult[])result; - - for (var i = 0; i < results.Length; i += 2) - { - var key = (string)results[i]; - var value = results[i + 1]; - - _all.Add(key, value); - } - } - - private string GetString(string key) => _all.TryGetValue(key, out var value) ? (string)value : default; - - private long GetLong(string key) => _all.TryGetValue(key, out var value) ? (long)value : default; - - private double GetDouble(string key) - { - if (_all.TryGetValue(key, out var value)) - { - if ((string)value == "-nan") - { - return default; - } - else - { - return (double)value; - } - } - else - { - return default; - } - } - - private Dictionary GetRedisResultDictionary(string key) - { - if (_all.TryGetValue(key, out var value)) - { - var values = (RedisResult[])value; - var result = new Dictionary(); - - for (var ii = 0; ii < values.Length; ii += 2) - { - result.Add((string)values[ii], values[ii + 1]); - } - - return result; - } - else - { - return default; - } - } - - private Dictionary GetRedisResultsDictionary(string key) - { - if (_all.TryGetValue(key, out var value)) - { - var result = new Dictionary(); - - foreach (RedisResult[] fv in (RedisResult[])value) - { - result.Add((string)fv[0], fv); - } - - return result; - } - else - { - return default; - } - } - } -} diff --git a/src/NRediSearch/Literals.cs b/src/NRediSearch/Literals.cs deleted file mode 100644 index 82e8000a3..000000000 --- a/src/NRediSearch/Literals.cs +++ /dev/null @@ -1,50 +0,0 @@ -// .NET port of https://github.com/RedisLabs/JRediSearch/ - -using StackExchange.Redis; -using System.Collections; -using System.Linq; - -namespace NRediSearch -{ - /// - /// Cache to ensure we encode and box literals once only - /// - internal static class Literals - { - private static readonly Hashtable _boxed = new Hashtable(); - private static readonly object _null = RedisValue.Null; - /// - /// Obtain a lazily-cached pre-encoded and boxed representation of a string - /// - /// The value to get a literal representation for. - /// This should only be used for fixed values, not user data (the cache is never reclaimed, so it will be a memory leak) - public static object Literal(this string value) - { - if (value == null) return _null; - - object boxed = _boxed[value]; - if (boxed == null) - { - lock (_boxed) - { - boxed = _boxed[value]; - if (boxed == null) - { - boxed = (RedisValue)value; - _boxed.Add(value, boxed); - } - } - } - return boxed; - } - - private const int BOXED_MIN = -1, BOXED_MAX = 20; - private static readonly object[] s_Boxed = Enumerable.Range(BOXED_MIN, BOXED_MAX - BOXED_MIN).Select(i => (object)i).ToArray(); - - /// - /// Obtain a pre-boxed integer if possible, else box the inbound value - /// - /// The value to get a pre-boxed integer for. - public static object Boxed(this int value) => value >= BOXED_MIN && value < BOXED_MAX ? s_Boxed[value - BOXED_MIN] : value; - } -} diff --git a/src/NRediSearch/NRediSearch.csproj b/src/NRediSearch/NRediSearch.csproj deleted file mode 100644 index 553e41612..000000000 --- a/src/NRediSearch/NRediSearch.csproj +++ /dev/null @@ -1,11 +0,0 @@ - - - netstandard2.0;netcoreapp3.1;net5.0 - false - Redis;Search;Modules;RediSearch - true - - - - - \ No newline at end of file diff --git a/src/NRediSearch/Query.cs b/src/NRediSearch/Query.cs deleted file mode 100644 index 6f9c9fdff..000000000 --- a/src/NRediSearch/Query.cs +++ /dev/null @@ -1,505 +0,0 @@ -// .NET port of https://github.com/RedisLabs/JRediSearch/ - -using System.Collections.Generic; -using System.Globalization; -using StackExchange.Redis; - -namespace NRediSearch -{ - /// - /// Query represents query parameters and filters to load results from the engine - /// - public sealed class Query - { - /// - /// Filter represents a filtering rules in a query - /// - public abstract class Filter - { - public string Property { get; } - - internal abstract void SerializeRedisArgs(List args); - - internal Filter(string property) - { - Property = property; - } - } - - /// - /// NumericFilter wraps a range filter on a numeric field. It can be inclusive or exclusive - /// - public class NumericFilter : Filter - { - private readonly double min, max; - private readonly bool exclusiveMin, exclusiveMax; - - public NumericFilter(string property, double min, bool exclusiveMin, double max, bool exclusiveMax) : base(property) - { - this.min = min; - this.max = max; - this.exclusiveMax = exclusiveMax; - this.exclusiveMin = exclusiveMin; - } - - public NumericFilter(string property, double min, double max) : this(property, min, false, max, false) { } - - internal override void SerializeRedisArgs(List args) - { - static RedisValue FormatNum(double num, bool exclude) - { - if (!exclude || double.IsInfinity(num)) - { - return (RedisValue)num; // can use directly - } - // need to add leading bracket - return "(" + num.ToString("G17", NumberFormatInfo.InvariantInfo); - } - args.Add("FILTER".Literal()); - args.Add(Property); - args.Add(FormatNum(min, exclusiveMin)); - args.Add(FormatNum(max, exclusiveMax)); - } - } - - /// - /// GeoFilter encapsulates a radius filter on a geographical indexed fields - /// - public class GeoFilter : Filter - { - private readonly double lon, lat, radius; - private readonly GeoUnit unit; - - public GeoFilter(string property, double lon, double lat, double radius, GeoUnit unit) : base(property) - { - this.lon = lon; - this.lat = lat; - this.radius = radius; - this.unit = unit; - } - - internal override void SerializeRedisArgs(List args) - { - args.Add("GEOFILTER".Literal()); - args.Add(Property); - args.Add(lon); - args.Add(lat); - args.Add(radius); - args.Add(unit.AsRedisString().Literal()); - } - } - - internal readonly struct Paging - { - public int Offset { get; } - public int Count { get; } - - public Paging(int offset, int count) - { - Offset = offset; - Count = count; - } - } - - /// - /// The query's filter list. We only support AND operation on all those filters - /// - internal readonly List _filters = new List(); - - /// - /// The textual part of the query - /// - public string QueryString { get; } - - /// - /// The sorting parameters - /// - internal Paging _paging = new Paging(0, 10); - - /// - /// Set the query to verbatim mode, disabling stemming and query expansion - /// - public bool Verbatim { get; set; } - /// - /// Set the query not to return the contents of documents, and rather just return the ids - /// - public bool NoContent { get; set; } - /// - /// Set the query not to filter for stopwords. In general this should not be used - /// - public bool NoStopwords { get; set; } - /// - /// Set the query to return a factored score for each results. This is useful to merge results from multiple queries. - /// - public bool WithScores { get; set; } - /// - /// Set the query to return object payloads, if any were given - /// - public bool WithPayloads { get; set; } - - /// - /// Set the query language, for stemming purposes; see http://redisearch.io for documentation on languages and stemming - /// - public string Language { get; set; } - - /// - /// Set the query scoring. see https://oss.redislabs.com/redisearch/Scoring.html for documentation - /// - public string Scoring { get; set; } - public bool ExplainScore { get; set; } - - internal string[] _fields = null; - internal string[] _keys = null; - internal string[] _returnFields = null; - /// - /// Set the query payload to be evaluated by the scoring function - /// - public byte[] Payload { get; set; } - - /// - /// Set the query parameter to sort by - /// - public string SortBy { get; set; } - - /// - /// Set the query parameter to sort by ASC by default - /// - public bool SortAscending { get; set; } = true; - - // highlight and summarize - internal bool _wantsHighlight = false, _wantsSummarize = false; - internal string[] _highlightFields = null; - internal string[] _summarizeFields = null; - internal HighlightTags? _highlightTags = null; - internal string _summarizeSeparator = null; - internal int _summarizeNumFragments = -1, _summarizeFragmentLen = -1; - - /// - /// Create a new index - /// - /// The query string to use for this query. - public Query(string queryString) - { - QueryString = queryString; - } - - internal void SerializeRedisArgs(List args) - { - args.Add(QueryString); - - if (Verbatim) - { - args.Add("VERBATIM".Literal()); - } - if (NoContent) - { - args.Add("NOCONTENT".Literal()); - } - if (NoStopwords) - { - args.Add("NOSTOPWORDS".Literal()); - } - if (WithScores) - { - args.Add("WITHSCORES".Literal()); - } - if (WithPayloads) - { - args.Add("WITHPAYLOADS".Literal()); - } - if (Language != null) - { - args.Add("LANGUAGE".Literal()); - args.Add(Language); - } - if (_fields?.Length > 0) - { - args.Add("INFIELDS".Literal()); - args.Add(_fields.Length.Boxed()); - args.AddRange(_fields); - } - if (_keys?.Length > 0) - { - args.Add("INKEYS".Literal()); - args.Add(_keys.Length.Boxed()); - args.AddRange(_keys); - } - if (_returnFields?.Length > 0) - { - args.Add("RETURN".Literal()); - args.Add(_returnFields.Length.Boxed()); - args.AddRange(_returnFields); - } - - if (SortBy != null) - { - args.Add("SORTBY".Literal()); - args.Add(SortBy); - args.Add((SortAscending ? "ASC" : "DESC").Literal()); - } - - if (Scoring != null) - { - args.Add("SCORER".Literal()); - args.Add(Scoring); - - if (ExplainScore) - { - args.Add("EXPLAINSCORE".Literal()); - } - } - - if (Payload != null) - { - args.Add("PAYLOAD".Literal()); - args.Add(Payload); - } - - if (_paging.Offset != 0 || _paging.Count != 10) - { - args.Add("LIMIT".Literal()); - args.Add(_paging.Offset.Boxed()); - args.Add(_paging.Count.Boxed()); - } - - if (_filters?.Count > 0) - { - foreach (var f in _filters) - { - f.SerializeRedisArgs(args); - } - } - - if (_wantsHighlight) - { - args.Add("HIGHLIGHT".Literal()); - if (_highlightFields != null) - { - args.Add("FIELDS".Literal()); - args.Add(_highlightFields.Length.Boxed()); - foreach (var s in _highlightFields) - { - args.Add(s); - } - } - if (_highlightTags != null) - { - args.Add("TAGS".Literal()); - var tags = _highlightTags.GetValueOrDefault(); - args.Add(tags.Open); - args.Add(tags.Close); - } - } - if (_wantsSummarize) - { - args.Add("SUMMARIZE".Literal()); - if (_summarizeFields != null) - { - args.Add("FIELDS".Literal()); - args.Add(_summarizeFields.Length.Boxed()); - foreach (var s in _summarizeFields) - { - args.Add(s); - } - } - if (_summarizeNumFragments != -1) - { - args.Add("FRAGS".Literal()); - args.Add(_summarizeNumFragments.Boxed()); - } - if (_summarizeFragmentLen != -1) - { - args.Add("LEN".Literal()); - args.Add(_summarizeFragmentLen.Boxed()); - } - if (_summarizeSeparator != null) - { - args.Add("SEPARATOR".Literal()); - args.Add(_summarizeSeparator); - } - } - - if (_keys != null && _keys.Length > 0) - { - args.Add("INKEYS".Literal()); - args.Add(_keys.Length.Boxed()); - - foreach (var key in _keys) - { - args.Add(key); - } - } - - if (_returnFields != null && _returnFields.Length > 0) - { - args.Add("RETURN".Literal()); - args.Add(_returnFields.Length.Boxed()); - - foreach (var returnField in _returnFields) - { - args.Add(returnField); - } - } - } - - /// - /// Limit the results to a certain offset and limit - /// - /// the first result to show, zero based indexing - /// how many results we want to show - /// the query itself, for builder-style syntax - public Query Limit(int offset, int count) - { - _paging = new Paging(offset, count); - return this; - } - - /// - /// Add a filter to the query's filter list - /// - /// either a numeric or geo filter object - /// the query itself - public Query AddFilter(Filter f) - { - _filters.Add(f); - return this; - } - - /// - /// Limit the query to results that are limited to a specific set of fields - /// - /// a list of TEXT fields in the schemas - /// the query object itself - public Query LimitFields(params string[] fields) - { - _fields = fields; - return this; - } - - /// - /// Limit the query to results that are limited to a specific set of keys - /// - /// a list of the TEXT fields in the schemas - /// the query object itself - public Query LimitKeys(params string[] keys) - { - _keys = keys; - return this; - } - - /// - /// Result's projection - the fields to return by the query - /// - /// fields a list of TEXT fields in the schemas - /// the query object itself - public Query ReturnFields(params string[] fields) - { - _returnFields = fields; - return this; - } - - public readonly struct HighlightTags - { - public HighlightTags(string open, string close) - { - Open = open; - Close = close; - } - public string Open { get; } - public string Close { get; } - } - - public Query HighlightFields(HighlightTags tags, params string[] fields) => HighlightFieldsImpl(tags, fields); - public Query HighlightFields(params string[] fields) => HighlightFieldsImpl(null, fields); - private Query HighlightFieldsImpl(HighlightTags? tags, string[] fields) - { - if (fields == null || fields.Length > 0) - { - _highlightFields = fields; - } - _highlightTags = tags; - _wantsHighlight = true; - return this; - } - - public Query SummarizeFields(int contextLen, int fragmentCount, string separator, params string[] fields) - { - if (fields == null || fields.Length > 0) - { - _summarizeFields = fields; - } - _summarizeFragmentLen = contextLen; - _summarizeNumFragments = fragmentCount; - _summarizeSeparator = separator; - _wantsSummarize = true; - return this; - } - - public Query SummarizeFields(params string[] fields) => SummarizeFields(-1, -1, null, fields); - - /// - /// Set the query to be sorted by a sortable field defined in the schema - /// - /// the sorting field's name - /// if set to true, the sorting order is ascending, else descending - /// the query object itself - public Query SetSortBy(string field, bool ascending = true) - { - SortBy = field; - SortAscending = ascending; - return this; - } - - public Query SetWithScores(bool value = true) - { - WithScores = value; - return this; - } - - public Query SetNoContent(bool value = true) - { - NoContent = value; - return this; - } - - public Query SetVerbatim(bool value = true) - { - Verbatim = value; - return this; - } - - public Query SetNoStopwords(bool value = true) - { - NoStopwords = value; - return this; - } - public Query SetLanguage(string language) - { - Language = language; - return this; - } - - /// - /// RediSearch comes with a few very basic scoring functions to evaluate document relevance. They are all based on document scores and term frequency. - /// This is regardless of the ability to use sortable fields. - /// Scoring functions are specified by adding the SCORER {scorer_name} argument to a search query. - /// If you prefer a custom scoring function, it is possible to add more functions using the Extension API. - /// These are the pre-bundled scoring functions available in RediSearch and how they work.Each function is mentioned by registered name, - /// that can be passed as a SCORER argument in FT.SEARCH - /// Pre-bundled scoring: - /// - TFIDF (default) (https://oss.redislabs.com/redisearch/Scoring.html#tfidf_default) - /// - TFIDF.DOCNORM (https://oss.redislabs.com/redisearch/Scoring.html#tfidfdocnorm) - /// - BM25 (https://oss.redislabs.com/redisearch/Scoring.html#bm25) - /// - DISMAX (https://oss.redislabs.com/redisearch/Scoring.html#dismax) - /// - DOCSCORE (https://oss.redislabs.com/redisearch/Scoring.html#docscore) - /// - HAMMING (https://oss.redislabs.com/redisearch/Scoring.html#hamming) - /// - /// - /// - public Query SetScoring(string scoring) - { - Scoring = scoring; - return this; - } - } -} diff --git a/src/NRediSearch/QueryBuilder/DisjunctNode.cs b/src/NRediSearch/QueryBuilder/DisjunctNode.cs deleted file mode 100644 index fb00b2e87..000000000 --- a/src/NRediSearch/QueryBuilder/DisjunctNode.cs +++ /dev/null @@ -1,25 +0,0 @@ -// .NET port of https://github.com/RedisLabs/JRediSearch/ - -namespace NRediSearch.QueryBuilder -{ - /// - /// A disjunct node. evaluates to true if any of its children are false. Conversely, this node evaluates to false - /// only iff all of its children are true, making it the exact inverse of IntersectNode - /// - /// DisjunctUnionNode which evalutes to true if all its children are false. - public class DisjunctNode : IntersectNode - { - public override string ToString(ParenMode mode) - { - var ret = base.ToString(ParenMode.Never); - if (ShouldUseParens(mode)) - { - return "-(" + ret + ")"; - } - else - { - return "-" + ret; - } - } - } -} diff --git a/src/NRediSearch/QueryBuilder/DisjunctUnionNode.cs b/src/NRediSearch/QueryBuilder/DisjunctUnionNode.cs deleted file mode 100644 index bc162b812..000000000 --- a/src/NRediSearch/QueryBuilder/DisjunctUnionNode.cs +++ /dev/null @@ -1,14 +0,0 @@ -// .NET port of https://github.com/RedisLabs/JRediSearch/ - -namespace NRediSearch.QueryBuilder -{ - /// - /// A disjunct union node is the inverse of a UnionNode. It evaluates to true only iff all its - /// children are false. Conversely, it evaluates to false if any of its children are true. - /// - /// see DisjunctNode which evaluates to true if any of its children are false. - public class DisjunctUnionNode : DisjunctNode - { - protected override string GetJoinString() => "|"; - } -} diff --git a/src/NRediSearch/QueryBuilder/GeoValue.cs b/src/NRediSearch/QueryBuilder/GeoValue.cs deleted file mode 100644 index 655b919a9..000000000 --- a/src/NRediSearch/QueryBuilder/GeoValue.cs +++ /dev/null @@ -1,33 +0,0 @@ -// .NET port of https://github.com/RedisLabs/JRediSearch/ - -using System.Text; -using StackExchange.Redis; - -namespace NRediSearch.QueryBuilder -{ - public class GeoValue : Value - { - private readonly GeoUnit _unit; - private readonly double _lon, _lat, _radius; - - public GeoValue(double lon, double lat, double radius, GeoUnit unit) - { - _lon = lon; - _lat = lat; - _radius = radius; - _unit = unit; - } - - public override string ToString() - { - return new StringBuilder("[") - .Append(_lon.AsRedisString(true)).Append(" ") - .Append(_lat.AsRedisString(true)).Append(" ") - .Append(_radius.AsRedisString(true)).Append(" ") - .Append(_unit.AsRedisString()) - .Append("]").ToString(); - } - - public override bool IsCombinable() => false; - } -} diff --git a/src/NRediSearch/QueryBuilder/IntersectNode.cs b/src/NRediSearch/QueryBuilder/IntersectNode.cs deleted file mode 100644 index 2f9e678ce..000000000 --- a/src/NRediSearch/QueryBuilder/IntersectNode.cs +++ /dev/null @@ -1,12 +0,0 @@ -// .NET port of https://github.com/RedisLabs/JRediSearch/ - -namespace NRediSearch.QueryBuilder -{ - /// - /// The intersection node evaluates to true if any of its children are true. - /// - public class IntersectNode : QueryNode - { - protected override string GetJoinString() => " "; - } -} diff --git a/src/NRediSearch/QueryBuilder/Node.cs b/src/NRediSearch/QueryBuilder/Node.cs deleted file mode 100644 index 81a5936ce..000000000 --- a/src/NRediSearch/QueryBuilder/Node.cs +++ /dev/null @@ -1,31 +0,0 @@ -// .NET port of https://github.com/RedisLabs/JRediSearch/ - -namespace NRediSearch.QueryBuilder -{ - public enum ParenMode - { - /// - /// Always encapsulate - /// - Always, - /// - /// Never encapsulate. Note that this may be ignored if parentheses are semantically required (e.g. - ///
@foo:(val1|val2)
. However something like
@foo:v1 @bar:v2
need not be parenthesized. - ///
- Never, - /// - /// Determine encapsulation based on number of children. If the node only has one child, it is not - /// parenthesized, if it has more than one child, it is parenthesized - /// - Default, - } - public interface INode - { - /// - /// Returns the string form of this node. - /// - /// Whether the string should be encapsulated in parentheses
(...)
- /// The string query. - string ToString(ParenMode mode); - } -} diff --git a/src/NRediSearch/QueryBuilder/OptionalNode.cs b/src/NRediSearch/QueryBuilder/OptionalNode.cs deleted file mode 100644 index 4d05d4daf..000000000 --- a/src/NRediSearch/QueryBuilder/OptionalNode.cs +++ /dev/null @@ -1,25 +0,0 @@ -// .NET port of https://github.com/RedisLabs/JRediSearch/ - -namespace NRediSearch.QueryBuilder -{ - /// - /// The optional node affects scoring and ordering. If it evaluates to true, the result is ranked - /// higher. It is helpful to combine it with a UnionNode to rank a document higher if it meets - /// one of several criteria. - /// - public class OptionalNode : IntersectNode - { - public override string ToString(ParenMode mode) - { - var ret = base.ToString(ParenMode.Never); - if (ShouldUseParens(mode)) - { - return "~(" + ret + ")"; - } - else - { - return "~" + ret; - } - } - } -} diff --git a/src/NRediSearch/QueryBuilder/QueryBuilder.cs b/src/NRediSearch/QueryBuilder/QueryBuilder.cs deleted file mode 100644 index aa94f274d..000000000 --- a/src/NRediSearch/QueryBuilder/QueryBuilder.cs +++ /dev/null @@ -1,123 +0,0 @@ -// .NET port of https://github.com/RedisLabs/JRediSearch/ - -namespace NRediSearch.QueryBuilder -{ - /// - /// - /// This class contains methods to construct query nodes. These query nodes can be added to parent query - /// nodes (building a chain) or used as the root query node. - /// - /// You can use
using static
for these helper methods.
- ///
- public static class QueryBuilder - { - public static QueryNode Intersect() => new IntersectNode(); - - /// - /// Create a new intersection node with child nodes. An intersection node is true if all its children - /// are also true - /// - /// sub-condition to add - /// The node - public static QueryNode Intersect(params INode[] n) => Intersect().Add(n); - - /// - /// Create a new intersection node with a field-value pair. - /// - /// The field that should contain this value. If this value is empty, then any field will be checked. - /// Value to check for. The node will be true only if the field (or any field) contains *all* of the values. - /// The query node. - public static QueryNode Intersect(string field, params Value[] values) => Intersect().Add(field, values); - - /// - /// Helper method to create a new intersection node with a string value. - /// - /// The field to check. If left null or empty, all fields will be checked. - /// The value to check. - /// The query node. - public static QueryNode Intersect(string field, string stringValue) => Intersect(field, Values.Value(stringValue)); - - public static QueryNode Union() => new UnionNode(); - - /// - /// Create a union node. Union nodes evaluate to true if any of its children are true. - /// - /// Child node. - /// The union node. - public static QueryNode Union(params INode[] n) => Union().Add(n); - - /// - /// Create a union node which can match an one or more values. - /// - /// Field to check. If empty, all fields are checked. - /// Values to search for. The node evaluates to true if matches any of the values. - /// The union node. - public static QueryNode Union(string field, params Value[] values) => Union().Add(field, values); - - /// - /// Convenience method to match one or more strings. This is equivalent to . - /// - /// Field to match. - /// Strings to check for. - /// The union node. - public static QueryNode Union(string field, params string[] values) => Union(field, Values.Value(values)); - - public static QueryNode Disjunct() => new DisjunctNode(); - - /// - /// Create a disjunct node. Disjunct nodes are true iff any of its children are not true. - /// Conversely, this node evaluates to false if all its children are true. - /// - /// Child nodes to add. - /// The disjunct node. - public static QueryNode Disjunct(params INode[] n) => Disjunct().Add(n); - - /// - /// Create a disjunct node using one or more values. The node will evaluate to true iff the field does not - /// match any of the values. - /// - /// Field to check for (empty or null for any field). - /// The values to check for. - /// The disjunct node. - public static QueryNode Disjunct(string field, params Value[] values) => Disjunct().Add(field, values); - - /// - /// Create a disjunct node using one or more values. The node will evaluate to true iff the field does not - /// match any of the values. - /// - /// Field to check for (empty or null for any field). - /// The values to check for. - /// The disjunct node. - public static QueryNode Disjunct(string field, params string[] values) => Disjunct(field, Values.Value(values)); - - public static QueryNode DisjunctUnion() => new DisjunctUnionNode(); - - /// - /// Create a disjunct union node. This node evaluates to true if all of its children are not true. - /// Conversely, this node evaluates as false if any of its children are true. - /// - /// The nodes to union. - /// The node. - public static QueryNode DisjunctUnion(params INode[] n) => DisjunctUnion().Add(n); - - public static QueryNode DisjunctUnion(string field, params Value[] values) => DisjunctUnion().Add(field, values); - - public static QueryNode DisjunctUnion(string field, params string[] values) => DisjunctUnion(field, Values.Value(values)); - - /// - /// Creates a new . - /// - /// The new . - public static QueryNode Optional() => new OptionalNode(); - - /// - /// Create an optional node. Optional nodes do not affect which results are returned but they influence - /// ordering and scoring. - /// - /// The nodes to evaluate as optional. - /// The new node. - public static QueryNode Optional(params INode[] n) => Optional().Add(n); - - public static QueryNode Optional(string field, params Value[] values) => Optional().Add(field, values); - } -} diff --git a/src/NRediSearch/QueryBuilder/QueryNode.cs b/src/NRediSearch/QueryBuilder/QueryNode.cs deleted file mode 100644 index 6883c0eee..000000000 --- a/src/NRediSearch/QueryBuilder/QueryNode.cs +++ /dev/null @@ -1,99 +0,0 @@ -// .NET port of https://github.com/RedisLabs/JRediSearch/ - -using System.Collections.Generic; -using System.Linq; -using System.Text; - -namespace NRediSearch.QueryBuilder -{ - public abstract class QueryNode : INode - { - private readonly List children = new List(); - - protected abstract string GetJoinString(); - - /** - * Add a match criteria to this node - * @param field The field to check. If null or empty, then any field is checked - * @param values Values to check for. - * @return The current node, for chaining. - */ - public QueryNode Add(string field, params Value[] values) - { - children.Add(new ValueNode(field, GetJoinString(), values)); - return this; - } - - /** - * Convenience method to add a list of string values - * @param field Field to check for - * @param values One or more string values. - * @return The current node, for chaining. - */ - public QueryNode Add(string field, params string[] values) - { - children.Add(new ValueNode(field, GetJoinString(), values)); - return this; - } - - /** - * Add a list of values from a collection - * @param field The field to check - * @param values Collection of values to match - * @return The current node for chaining. - */ - public QueryNode Add(string field, IList values) - { - return Add(field, values.ToArray()); - } - - /** - * Add children nodes to this node. - * @param nodes Children nodes to add - * @return The current node, for chaining. - */ - public QueryNode Add(params INode[] nodes) - { - children.AddRange(nodes); - return this; - } - - protected bool ShouldUseParens(ParenMode mode) - { - if (mode == ParenMode.Always) - { - return true; - } - else if (mode == ParenMode.Never) - { - return false; - } - else - { - return children.Count > 1; - } - } - - public virtual string ToString(ParenMode mode) - { - StringBuilder sb = new StringBuilder(); - - if (ShouldUseParens(mode)) - { - sb.Append("("); - } - var sj = new StringJoiner(sb, GetJoinString()); - foreach (var n in children) - { - sj.Add(n.ToString(mode)); - } - if (ShouldUseParens(mode)) - { - sb.Append(")"); - } - return sb.ToString(); - } - - public override string ToString() => ToString(ParenMode.Default); - } -} diff --git a/src/NRediSearch/QueryBuilder/RangeValue.cs b/src/NRediSearch/QueryBuilder/RangeValue.cs deleted file mode 100644 index f53fb7ab4..000000000 --- a/src/NRediSearch/QueryBuilder/RangeValue.cs +++ /dev/null @@ -1,51 +0,0 @@ -// .NET port of https://github.com/RedisLabs/JRediSearch/ - -using System.Text; - -namespace NRediSearch.QueryBuilder -{ - public sealed class RangeValue : Value - { - private readonly double from, to; - private bool inclusiveMin = true, inclusiveMax = true; - - public override bool IsCombinable() => false; - - private static void AppendNum(StringBuilder sb, double n, bool inclusive) - { - if (!inclusive) - { - sb.Append("("); - } - sb.Append(n.AsRedisString(true)); - } - - public override string ToString() - { - StringBuilder sb = new StringBuilder(); - sb.Append("["); - AppendNum(sb, from, inclusiveMin); - sb.Append(" "); - AppendNum(sb, to, inclusiveMax); - sb.Append("]"); - return sb.ToString(); - } - - public RangeValue(double from, double to) - { - this.from = from; - this.to = to; - } - - public RangeValue InclusiveMin(bool val) - { - inclusiveMin = val; - return this; - } - public RangeValue InclusiveMax(bool val) - { - inclusiveMax = val; - return this; - } - } -} diff --git a/src/NRediSearch/QueryBuilder/StringJoiner.cs b/src/NRediSearch/QueryBuilder/StringJoiner.cs deleted file mode 100644 index c8b3cf2b4..000000000 --- a/src/NRediSearch/QueryBuilder/StringJoiner.cs +++ /dev/null @@ -1,25 +0,0 @@ -// .NET port of https://github.com/RedisLabs/JRediSearch/ - -using System.Text; - -namespace NRediSearch.QueryBuilder -{ - internal ref struct StringJoiner // this is to replace a Java feature cleanly - { - private readonly StringBuilder _sb; - private readonly string _delimiter; - private bool _isFirst; - public StringJoiner(StringBuilder sb, string delimiter) - { - _sb = sb; - _delimiter = delimiter; - _isFirst = true; - } - public void Add(string value) - { - if (_isFirst) _isFirst = false; - else _sb.Append(_delimiter); - _sb.Append(value); - } - } -} diff --git a/src/NRediSearch/QueryBuilder/UnionNode.cs b/src/NRediSearch/QueryBuilder/UnionNode.cs deleted file mode 100644 index 33a409b67..000000000 --- a/src/NRediSearch/QueryBuilder/UnionNode.cs +++ /dev/null @@ -1,9 +0,0 @@ -// .NET port of https://github.com/RedisLabs/JRediSearch/ - -namespace NRediSearch.QueryBuilder -{ - public class UnionNode : QueryNode - { - protected override string GetJoinString() => "|"; - } -} diff --git a/src/NRediSearch/QueryBuilder/Value.cs b/src/NRediSearch/QueryBuilder/Value.cs deleted file mode 100644 index e934804e6..000000000 --- a/src/NRediSearch/QueryBuilder/Value.cs +++ /dev/null @@ -1,9 +0,0 @@ -// .NET port of https://github.com/RedisLabs/JRediSearch/ - -namespace NRediSearch.QueryBuilder -{ - public abstract class Value - { - public virtual bool IsCombinable() => false; - } -} diff --git a/src/NRediSearch/QueryBuilder/ValueNode.cs b/src/NRediSearch/QueryBuilder/ValueNode.cs deleted file mode 100644 index c17044053..000000000 --- a/src/NRediSearch/QueryBuilder/ValueNode.cs +++ /dev/null @@ -1,93 +0,0 @@ -// .NET port of https://github.com/RedisLabs/JRediSearch/ - -using System.Text; - -namespace NRediSearch.QueryBuilder -{ - public class ValueNode : INode - { - private readonly Value[] _values; - private readonly string _field, _joinString; - - public ValueNode(string field, string joinstr, params Value[] values) - { - _field = field; - _values = values; - _joinString = joinstr; - } - - private static Value[] FromStrings(string[] values) - { - Value[] objs = new Value[values.Length]; - for (int i = 0; i < values.Length; i++) - { - objs[i] = Values.Value(values[i]); - } - return objs; - } - - public ValueNode(string field, string joinstr, params string[] values) - : this(field, joinstr, FromStrings(values)) { } - - private string FormatField() - { - if (string.IsNullOrWhiteSpace(_field)) return ""; - return "@" + _field + ":"; - } - - private string ToStringCombinable(ParenMode mode) - { - StringBuilder sb = new StringBuilder(FormatField()); - if (_values.Length > 1 || mode == ParenMode.Always) - { - sb.Append("("); - } - var sj = new StringJoiner(sb, _joinString); - foreach (var v in _values) - { - sj.Add(v.ToString()); - } - if (_values.Length > 1 || mode == ParenMode.Always) - { - sb.Append(")"); - } - return sb.ToString(); - } - - private string ToStringDefault(ParenMode mode) - { - bool useParen = mode == ParenMode.Always; - if (!useParen) - { - useParen = mode != ParenMode.Never && _values.Length > 1; - } - var sb = new StringBuilder(); - if (useParen) - { - sb.Append("("); - } - var sj = new StringJoiner(sb, _joinString); - foreach (var v in _values) - { - sj.Add(FormatField() + v); - } - if (useParen) - { - sb.Append(")"); - } - return sb.ToString(); - } - - public string ToString(ParenMode mode) - { - if (_values[0].IsCombinable()) - { - return ToStringCombinable(mode); - } - else - { - return ToStringDefault(mode); - } - } - } -} diff --git a/src/NRediSearch/QueryBuilder/Values.cs b/src/NRediSearch/QueryBuilder/Values.cs deleted file mode 100644 index caceb4db2..000000000 --- a/src/NRediSearch/QueryBuilder/Values.cs +++ /dev/null @@ -1,56 +0,0 @@ -// .NET port of https://github.com/RedisLabs/JRediSearch/ - -using System; - -namespace NRediSearch.QueryBuilder -{ - public static class Values - { - private abstract class ScalableValue : Value - { - public override bool IsCombinable() => true; - } - private sealed class ValueValue : ScalableValue - { - private readonly string s; - public ValueValue(string s) - { - this.s = s; - } - public override string ToString() => s; - } - public static Value Value(string s) => new ValueValue(s); - - internal static Value[] Value(string[] s) => Array.ConvertAll(s, _ => Value(_)); - - public static RangeValue Between(double from, double to) => new RangeValue(from, to); - - public static RangeValue Between(int from, int to) => new RangeValue((double)from, (double)to); - - public static RangeValue Equal(double d) => new RangeValue(d, d); - - public static RangeValue Equal(int i) => Equal((double)i); - - public static RangeValue LessThan(double d) => new RangeValue(double.NegativeInfinity, d).InclusiveMax(false); - - public static RangeValue GreaterThan(double d) => new RangeValue(d, double.PositiveInfinity).InclusiveMin(false); - public static RangeValue LessThanOrEqual(double d) => LessThan(d).InclusiveMax(true); - - public static RangeValue GreaterThanOrEqual(double d) => GreaterThan(d).InclusiveMin(true); - - public static Value Tags(params string[] tags) - { - if (tags.Length == 0) - { - throw new ArgumentException("Must have at least one tag", nameof(tags)); - } - return new TagValue("{" + string.Join(" | ", tags) + "}"); - } - private sealed class TagValue : Value - { - private readonly string s; - public TagValue(string s) { this.s = s; } - public override string ToString() => s; - } - } -} diff --git a/src/NRediSearch/Schema.cs b/src/NRediSearch/Schema.cs deleted file mode 100644 index cd2e4ae36..000000000 --- a/src/NRediSearch/Schema.cs +++ /dev/null @@ -1,181 +0,0 @@ -// .NET port of https://github.com/RedisLabs/JRediSearch/ - -using System; -using System.Collections.Generic; - -namespace NRediSearch -{ - /// - /// Schema abstracts the schema definition when creating an index. - /// Documents can contain fields not mentioned in the schema, but the index will only index pre-defined fields - /// - public sealed class Schema - { - public enum FieldType - { - FullText, - Geo, - Numeric, - Tag - } - - public class Field - { - public string Name { get; } - public FieldType Type { get; } - public bool Sortable { get; } - public bool NoIndex { get; } - - internal Field(string name, FieldType type, bool sortable, bool noIndex = false) - { - Name = name; - Type = type; - Sortable = sortable; - NoIndex = noIndex; - } - - internal virtual void SerializeRedisArgs(List args) - { - static object GetForRedis(FieldType type) - { - switch (type) - { - case FieldType.FullText: return "TEXT".Literal(); - case FieldType.Geo: return "GEO".Literal(); - case FieldType.Numeric: return "NUMERIC".Literal(); - case FieldType.Tag: return "TAG".Literal(); - default: throw new ArgumentOutOfRangeException(nameof(type)); - } - } - args.Add(Name); - args.Add(GetForRedis(Type)); - if (Sortable) { args.Add("SORTABLE".Literal()); } - if (NoIndex) { args.Add("NOINDEX".Literal()); } - } - } - - public class TextField : Field - { - public double Weight { get; } - public bool NoStem { get; } - - public TextField(string name, double weight = 1.0, bool sortable = false, bool noStem = false, bool noIndex = false) : base(name, FieldType.FullText, sortable, noIndex) - { - Weight = weight; - NoStem = noStem; - } - - internal override void SerializeRedisArgs(List args) - { - base.SerializeRedisArgs(args); - if (Weight != 1.0) - { - args.Add("WEIGHT".Literal()); - args.Add(Weight); - } - if (NoStem) args.Add("NOSTEM".Literal()); - } - } - - public List Fields { get; } = new List(); - - /// - /// Add a field to the schema. - /// - /// The to add. - /// The object. - public Schema AddField(Field field) - { - Fields.Add(field ?? throw new ArgumentNullException(nameof(field))); - return this; - } - - /// - /// Add a text field to the schema with a given weight. - /// - /// The field's name. - /// Its weight, a positive floating point number. - /// The object. - public Schema AddTextField(string name, double weight = 1.0) - { - Fields.Add(new TextField(name, weight)); - return this; - } - - /// - /// Add a text field that can be sorted on. - /// - /// The field's name. - /// Its weight, a positive floating point number. - /// The object. - public Schema AddSortableTextField(string name, double weight = 1.0) - { - Fields.Add(new TextField(name, weight, true)); - return this; - } - - /// - /// Add a numeric field to the schema. - /// - /// The field's name. - /// The object. - public Schema AddGeoField(string name) - { - Fields.Add(new Field(name, FieldType.Geo, false)); - return this; - } - - /// - /// Add a numeric field to the schema. - /// - /// The field's name. - /// The object. - public Schema AddNumericField(string name) - { - Fields.Add(new Field(name, FieldType.Numeric, false)); - return this; - } - - /// - /// Add a numeric field that can be sorted on. - /// - /// The field's name. - /// The object. - public Schema AddSortableNumericField(string name) - { - Fields.Add(new Field(name, FieldType.Numeric, true)); - return this; - } - - public class TagField : Field - { - public string Separator { get; } - internal TagField(string name, string separator = ",") : base(name, FieldType.Tag, false) - { - Separator = separator; - } - - internal override void SerializeRedisArgs(List args) - { - base.SerializeRedisArgs(args); - if (Separator != ",") - { - args.Add("SEPARATOR".Literal()); - args.Add(Separator); - } - } - } - - /// - /// Add a TAG field. - /// - /// The field's name. - /// The tag separator. - /// The object. - public Schema AddTagField(string name, string separator = ",") - { - Fields.Add(new TagField(name, separator)); - return this; - } - } -} diff --git a/src/NRediSearch/SearchResult.cs b/src/NRediSearch/SearchResult.cs deleted file mode 100644 index a55defdd1..000000000 --- a/src/NRediSearch/SearchResult.cs +++ /dev/null @@ -1,100 +0,0 @@ -// .NET port of https://github.com/RedisLabs/JRediSearch/ - -using StackExchange.Redis; -using System.Collections.Generic; -using System.Linq; - -namespace NRediSearch -{ - /// - /// SearchResult encapsulates the returned result from a search query. - /// It contains publically accessible fields for the total number of results, and an array of - /// objects conatining the actual returned documents. - /// - public class SearchResult - { - public long TotalResults { get; } - public List Documents { get; } - - internal SearchResult(RedisResult[] resp, bool hasContent, bool hasScores, bool hasPayloads, bool shouldExplainScore) - { - // Calculate the step distance to walk over the results. - // The order of results is id, score (if withScore), payLoad (if hasPayloads), fields - int step = 1; - int scoreOffset = 0; - int contentOffset = 1; - int payloadOffset = 0; - if (hasScores) - { - step++; - scoreOffset = 1; - contentOffset++; - - } - if (hasContent) - { - step++; - if (hasPayloads) - { - payloadOffset = scoreOffset + 1; - step++; - contentOffset++; - } - } - - // the first element is always the number of results - TotalResults = (long)resp[0]; - var docs = new List((resp.Length - 1) / step); - Documents = docs; - for (int i = 1; i < resp.Length; i += step) - { - var id = (string)resp[i]; - double score = 1.0; - byte[] payload = null; - RedisValue[] fields = null; - string[] scoreExplained = null; - if (hasScores) - { - if (shouldExplainScore) - { - var scoreResult = (RedisResult[])resp[i + scoreOffset]; - score = (double) scoreResult[0]; - var redisResultsScoreExplained = (RedisResult[]) scoreResult[1]; - scoreExplained = FlatRedisResultArray(redisResultsScoreExplained).ToArray(); - } - else - { - score = (double)resp[i + scoreOffset]; - } - } - if (hasPayloads) - { - payload = (byte[])resp[i + payloadOffset]; - } - - if (hasContent) - { - fields = (RedisValue[])resp[i + contentOffset]; - } - - docs.Add(Document.Load(id, score, payload, fields, scoreExplained)); - } - } - - static IEnumerable FlatRedisResultArray(RedisResult[] collection) - { - foreach (var o in collection) - { - if (o.Type == ResultType.MultiBulk) - { - foreach (string t in FlatRedisResultArray((RedisResult[])o)) - yield return t; - } - else - { - yield return o.ToString(); - } - } - } - } -} diff --git a/src/NRediSearch/Suggestion.cs b/src/NRediSearch/Suggestion.cs deleted file mode 100644 index 3e907b735..000000000 --- a/src/NRediSearch/Suggestion.cs +++ /dev/null @@ -1,108 +0,0 @@ -// .NET port of https://github.com/RedisLabs/JRediSearch/ -using System; -using StackExchange.Redis; - -namespace NRediSearch -{ - public sealed class Suggestion - { - public string String { get; } - public double Score { get; } - public string Payload { get; } - - private Suggestion(SuggestionBuilder builder) - { - String = builder._string; - Score = builder._score; - Payload = builder._payload; - } - - public override bool Equals(object obj) - { - if (this == obj) - { - return true; - } - - if(!(obj is Suggestion that)) - { - return false; - } - - return Score == that.Score && String == that.String && Payload == that.Payload; - } - - public override int GetHashCode() - { - unchecked - { - int hash = 17; - - hash = hash * 31 + String.GetHashCode(); - hash = hash * 31 + Score.GetHashCode(); - hash = hash * 31 + Payload.GetHashCode(); - - return hash; - } - } - - public override string ToString() => - $"Suggestion{{string='{String}', score={Score}, payload='{Payload}'}}"; - - public SuggestionBuilder ToBuilder() => new SuggestionBuilder(this); - - public static SuggestionBuilder Builder => new SuggestionBuilder(); - - public sealed class SuggestionBuilder - { - internal string _string; - internal double _score = 1.0; - internal string _payload; - - public SuggestionBuilder() { } - - public SuggestionBuilder(Suggestion suggestion) - { - _string = suggestion.String; - _score = suggestion.Score; - _payload = suggestion.Payload; - } - - public SuggestionBuilder String(string @string) - { - _string = @string; - - return this; - } - - public SuggestionBuilder Score(double score) - { - _score = score; - - return this; - } - - public SuggestionBuilder Payload(string payload) - { - _payload = payload; - - return this; - } - - public Suggestion Build() => Build(false); - - internal Suggestion Build(bool fromServer) - { - bool isStringMissing = _string == null; - bool isScoreOutOfRange = !fromServer && (_score < 0.0 || _score > 1.0); - - if (isStringMissing || isScoreOutOfRange) - { - throw new RedisCommandException($"Missing required fields: {(isStringMissing ? "string" : string.Empty)} {(isScoreOutOfRange ? "score not within range" : string.Empty)}: {_score.ToString()}"); - } - - return new Suggestion(this); - } - } - } -} diff --git a/src/NRediSearch/SuggestionOptions.cs b/src/NRediSearch/SuggestionOptions.cs deleted file mode 100644 index d7c30365a..000000000 --- a/src/NRediSearch/SuggestionOptions.cs +++ /dev/null @@ -1,107 +0,0 @@ -// .NET port of https://github.com/RedisLabs/JRediSearch/ -using System; - -namespace NRediSearch -{ - public class SuggestionOptions - { - private readonly object WITHPAYLOADS_FLAG = "WITHPAYLOADS".Literal(); - private readonly object WITHSCORES_FLAG = "WITHSCORES".Literal(); - - public SuggestionOptions(SuggestionOptionsBuilder builder) - { - With = builder._with; - Fuzzy = builder._fuzzy; - Max = builder._max; - } - - public static SuggestionOptionsBuilder Builder => new SuggestionOptionsBuilder(); - - public WithOptions With { get; } - - public bool Fuzzy { get; } - - public int Max { get; } = 5; - - public object[] GetFlags() - { - if (HasOption(WithOptions.PayloadsAndScores)) - { - return new[] { WITHPAYLOADS_FLAG, WITHSCORES_FLAG }; - } - - if (HasOption(WithOptions.Payloads)) - { - return new[] { WITHPAYLOADS_FLAG }; - } - - if (HasOption(WithOptions.Scores)) - { - return new[] { WITHSCORES_FLAG }; - } - - return default; - } - - public SuggestionOptionsBuilder ToBuilder() => new SuggestionOptionsBuilder(this); - - internal bool GetIsPayloadAndScores() => HasOption(WithOptions.PayloadsAndScores); - - internal bool GetIsPayload() => HasOption(WithOptions.Payloads); - - internal bool GetIsScores() => HasOption(WithOptions.Scores); - - [Flags] - public enum WithOptions - { - None = 0, - Payloads = 1, - Scores = 2, - PayloadsAndScores = Payloads | Scores - } - - internal bool HasOption(WithOptions option) => (With & option) != 0; - - public sealed class SuggestionOptionsBuilder - { - internal WithOptions _with; - internal bool _fuzzy; - internal int _max = 5; - - public SuggestionOptionsBuilder() { } - - public SuggestionOptionsBuilder(SuggestionOptions options) - { - _with = options.With; - _fuzzy = options.Fuzzy; - _max = options.Max; - } - - public SuggestionOptionsBuilder Fuzzy() - { - _fuzzy = true; - - return this; - } - - public SuggestionOptionsBuilder Max(int max) - { - _max = max; - - return this; - } - - public SuggestionOptionsBuilder With(WithOptions with) - { - _with = with; - - return this; - } - - public SuggestionOptions Build() - { - return new SuggestionOptions(this); - } - } - } -} diff --git a/src/RESPite/Buffers/CycleBuffer.cs b/src/RESPite/Buffers/CycleBuffer.cs new file mode 100644 index 000000000..14774b357 --- /dev/null +++ b/src/RESPite/Buffers/CycleBuffer.cs @@ -0,0 +1,753 @@ +using System.Buffers; +using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; +using RESPite.Internal; + +namespace RESPite.Buffers; + +/// +/// Manages the state for a based IO buffer. Unlike Pipe, +/// it is not intended for a separate producer-consumer - there is no thread-safety, and no +/// activation; it just handles the buffers. It is intended to be used as a mutable (non-readonly) +/// field in a type that performs IO; the internal state mutates - it should not be passed around. +/// +/// Notionally, there is an uncommitted area (write) and a committed area (read). Process: +/// - producer loop (*note no concurrency**) +/// - call to get a new scratch +/// - (write to that span) +/// - call to mark complete portions +/// - consumer loop (*note no concurrency**) +/// - call to see if there is a single-span chunk; otherwise +/// - call to get the multi-span chunk +/// - (process none, some, or all of that data) +/// - call to indicate how much data is no longer needed +/// Emphasis: no concurrency! This is intended for a single worker acting as both producer and consumer. +/// +/// There is a *lot* of validation in debug mode; we want to be super sure that we don't corrupt buffer state. +/// +[Experimental(Experiments.Respite, UrlFormat = Experiments.UrlFormat)] +public partial struct CycleBuffer +{ + #if TRACK_MEMORY + private static MemoryPool DefaultPool => MemoryTrackedPool.Shared; + #else + private static MemoryPool DefaultPool => MemoryPool.Shared; + #endif + + // note: if someone uses an uninitialized CycleBuffer (via default): that's a skills issue; git gud + public static CycleBuffer Create( + MemoryPool? pool = null, + int pageSize = DefaultPageSize, + ICycleBufferCallback? callback = null) + { + pool ??= DefaultPool; + if (pageSize <= 0) pageSize = DefaultPageSize; + if (pageSize > pool.MaxBufferSize) pageSize = pool.MaxBufferSize; + return new CycleBuffer(pool, pageSize, callback); + } + + private CycleBuffer(MemoryPool pool, int pageSize, ICycleBufferCallback? callback) + { + Pool = pool; + PageSize = pageSize; + _callback = callback; + leasedStart = -1; + } + + private const int DefaultPageSize = 8 * 1024; + + public int PageSize { get; } + public MemoryPool Pool { get; } + private readonly ICycleBufferCallback? _callback; + + private Segment? startSegment, endSegment; + + private int endSegmentCommitted, endSegmentLength; + private int leasedStart; + + public bool TryGetCommitted(out ReadOnlySpan span) + { + DebugAssertValid(); + if (!ReferenceEquals(startSegment, endSegment)) + { + span = default; + return false; + } + + span = startSegment is null ? default : startSegment.Memory.Span.Slice(start: 0, length: endSegmentCommitted); + return true; + } + + /// + /// Commits data written to buffers from , making it available for consumption + /// via . This compares to . + /// + public void Commit(int count) + { + DebugAssertValid(); + if (leasedStart < 0) + { + ThrowNoLease(); + } + + if (count <= 0) + { + if (count < 0) ThrowCount(); + return; + } + + var available = endSegmentLength - endSegmentCommitted; + if (count > available) ThrowCount(); + + var afterLeasedStart = endSegment!.StartTrimCount + endSegmentCommitted; + + if (leasedStart != afterLeasedStart) CopyDueToDiscardDuringWrite(count); + endSegmentCommitted += count; + DebugAssertValid(); + + static void ThrowCount() => throw new ArgumentOutOfRangeException(nameof(count)); + static void ThrowNoLease() => throw new InvalidOperationException("No open lease"); + } + + private void CopyDueToDiscardDuringWrite(int count) + { + var targetOffset = endSegment!.StartTrimCount + endSegmentCommitted; + if (targetOffset != leasedStart) + { + var full = endSegment.UntrimmedMemory.Span; + full.Slice(leasedStart, count) + .CopyTo(full.Slice(targetOffset, count)); + } + } + public bool CommittedIsEmpty => ReferenceEquals(startSegment, endSegment) & endSegmentCommitted == 0; + + /// + /// Marks committed data as fully consumed; it will no longer appear in later calls to . + /// + public void DiscardCommitted(int count) + { + DebugAssertValid(); + if (count == 0) return; + + // optimize for most common case, where we consume everything + if (ReferenceEquals(startSegment, endSegment) + & count == endSegmentCommitted + & count > 0) + { + /* + we are consuming all the data in the single segment; we can + just reset that segment back to full size and re-use as-is; + note that we also know that there must *be* a segment + for the count check to pass + */ + endSegmentCommitted = 0; + endSegmentLength = endSegment!.Untrim(expandBackwards: true); + DebugAssertValid(0); + DebugCounters.OnDiscardFull(count); + } + else + { + DiscardCommittedSlow(count); + } + } + + public void DiscardCommitted(long count) + { + DebugAssertValid(); + if (count == 0) return; + + // optimize for most common case, where we consume everything + if (ReferenceEquals(startSegment, endSegment) + & count == endSegmentCommitted + & count > 0) // checks sign *and* non-trimmed + { + // see for logic + endSegmentCommitted = 0; + endSegmentLength = endSegment!.Untrim(expandBackwards: true); + DebugAssertValid(0); + DebugCounters.OnDiscardFull(count); + } + else + { + DiscardCommittedSlow(count); + } + } + + private void DiscardCommittedSlow(long count) + { + DebugCounters.OnDiscardPartial(count); + DebugAssertValid(); +#if DEBUG + var originalLength = GetCommittedLength(); + var originalCount = count; + var expectedLength = originalLength - originalCount; + string blame = nameof(DiscardCommittedSlow); +#endif + while (count > 0) + { + DebugAssertValid(); + var segment = startSegment; + if (segment is null) break; + if (ReferenceEquals(segment, endSegment)) + { + // first==final==only segment + if (count == endSegmentCommitted) + { + endSegmentLength = startSegment!.Untrim(); + endSegmentCommitted = 0; // = untrimmed and unused +#if DEBUG + blame += ",full-final (t)"; +#endif + } + else + { + // discard from the start (note: don't need to compensate with writingCopyOffset until we untrim) + int count32 = checked((int)count); + segment.TrimStart(count32); + endSegmentLength -= count32; + endSegmentCommitted -= count32; +#if DEBUG + blame += ",partial-final"; +#endif + } + + count = 0; + break; + } + else if (count < segment.Length) + { + // multiple, but can take some (not all) of the first buffer +#if DEBUG + var len = segment.Length; +#endif + segment.TrimStart((int)count); + Debug.Assert(segment.Length > 0, "parial trim should have left non-empty segment"); +#if DEBUG + Debug.Assert(segment.Length == len - count, "trim failure"); + blame += ",partial-first"; +#endif + count = 0; + break; + } + else + { + // multiple; discard the entire first segment + count -= segment.Length; + startSegment = + segment.ResetAndGetNext(); // we already did a ref-check, so we know this isn't going past endSegment + endSegment!.AppendOrRecycle(segment, maxDepth: 2); + DebugAssertValid(); +#if DEBUG + blame += ",full-first"; +#endif + } + } + + if (count != 0) ThrowCount(); +#if DEBUG + DebugAssertValid(expectedLength, blame); + _ = originalLength; + _ = originalCount; +#endif + + [DoesNotReturn] + static void ThrowCount() => throw new ArgumentOutOfRangeException(nameof(count)); + } + + [Conditional("DEBUG")] + private void DebugAssertValid(long expectedCommittedLength, [CallerMemberName] string caller = "") + { + DebugAssertValid(); + var actual = GetCommittedLength(); + Debug.Assert( + expectedCommittedLength >= 0, + $"Expected committed length is just... wrong: {expectedCommittedLength} (from {caller})"); + Debug.Assert( + expectedCommittedLength == actual, + $"Committed length mismatch: expected {expectedCommittedLength}, got {actual} (from {caller})"); + } + + [Conditional("DEBUG")] + private void DebugAssertValid() + { +#if DEBUG + if (startSegment is null) + { + Debug.Assert( + endSegmentLength == 0 & endSegmentCommitted == 0, + "un-init state should be zero"); + return; + } + + Debug.Assert(endSegment is not null, "end segment must not be null if start segment exists"); + Debug.Assert( + endSegmentLength == endSegment!.Length, + $"end segment length is incorrect - expected {endSegmentLength}, got {endSegment.Length}"); + Debug.Assert(endSegmentCommitted <= endSegmentLength, $"end segment is over-committed - {endSegmentCommitted} of {endSegmentLength}"); + + // check running indices + startSegment?.DebugAssertValidChain(); +#endif + } + + public long GetCommittedLength() + { + if (ReferenceEquals(startSegment, endSegment)) + { + return endSegmentCommitted; + } + + // note that the start-segment is pre-trimmed; we don't need to account for an offset on the left + return (endSegment!.RunningIndex + endSegmentCommitted) - startSegment!.RunningIndex; + } + + /// + /// When used with , this means "any non-empty buffer". + /// + public const int GetAnything = 0; + + /// + /// When used with , this means "any full buffer". + /// + public const int GetFullPagesOnly = -1; + + public bool TryGetFirstCommittedSpan(int minBytes, out ReadOnlySpan span) + { + DebugAssertValid(); + if (TryGetFirstCommittedMemory(minBytes, out var memory)) + { + span = memory.Span; + return true; + } + + span = default; + return false; + } + + /// + /// The minLength arg: -ve means "full segments only" (useful when buffering outbound network data to avoid + /// packet fragmentation); otherwise, it is the minimum length we want. + /// + public bool TryGetFirstCommittedMemory(int minBytes, out ReadOnlyMemory memory) + { + if (minBytes == 0) minBytes = 1; // success always means "at least something" + DebugAssertValid(); + if (ReferenceEquals(startSegment, endSegment)) + { + // single page + var available = endSegmentCommitted; + if (available == 0) + { + // empty (includes uninitialized) + memory = default; + return false; + } + + memory = startSegment!.Memory; + var memLength = memory.Length; + if (available == memLength) + { + // full segment; is it enough to make the caller happy? + return available >= minBytes; + } + + // partial segment (and we know it isn't empty) + memory = memory.Slice(start: 0, length: available); + return available >= minBytes & minBytes > 0; // last check here applies the -ve logic + } + + // multi-page; hand out the first page (which is, by definition: full) + memory = startSegment!.Memory; + return memory.Length >= minBytes; + } + + /// + /// Note that this chain is invalidated by any other operations; no concurrency. + /// + public ReadOnlySequence GetAllCommitted() + { + if (ReferenceEquals(startSegment, endSegment)) + { + // single segment, fine + return startSegment is null + ? default + : new ReadOnlySequence(startSegment.Memory.Slice(start: 0, length: endSegmentCommitted)); + } + +#if PARSE_DETAIL + long length = GetCommittedLength(); +#endif + ReadOnlySequence ros = new(startSegment!, 0, endSegment!, endSegmentCommitted); +#if PARSE_DETAIL + Debug.Assert(ros.Length == length, $"length mismatch: calculated {length}, actual {ros.Length}"); +#endif + return ros; + } + + private Segment GetNextSegment() + { + DebugAssertValid(); + if (endSegment is not null) + { + endSegment.TrimEnd(endSegmentCommitted); + Debug.Assert(endSegment.Length == endSegmentCommitted, "trim failure"); + endSegmentLength = endSegmentCommitted; + DebugAssertValid(); + + // advertise the old page as available + _callback?.PageComplete(); + + var spare = endSegment.Next; + if (spare is not null) + { + // we already have a dangling segment; just update state + endSegment.DebugAssertValidChain(); + endSegment = spare; + endSegmentCommitted = 0; + endSegmentLength = spare.Length; + DebugAssertValid(); + return spare; + } + } + + Segment newSegment = Segment.Create(Pool.Rent(PageSize)); + if (endSegment is null) + { + // tabula rasa + endSegmentLength = newSegment.Length; + endSegment = startSegment = newSegment; + DebugAssertValid(); + return newSegment; + } + + endSegment.Append(newSegment); + endSegmentCommitted = 0; + endSegmentLength = newSegment.Length; + endSegment = newSegment; + DebugAssertValid(); + return newSegment; + } + + /// + /// Gets a scratch area for new data; this compares to . + /// + public Span GetUncommittedSpan(int hint = 0) + => GetUncommittedMemory(hint).Span; + + /// + /// Gets a scratch area for new data; this compares to . + /// + public Memory GetUncommittedMemory(int hint = 0) + { + DebugAssertValid(); + var segment = endSegment; + if (segment is not null) + { + leasedStart = segment.StartTrimCount + endSegmentCommitted; + var memory = segment.Memory; + if (endSegmentCommitted != 0) memory = memory.Slice(start: endSegmentCommitted); + if (hint <= 0) // allow anything non-empty + { + if (!memory.IsEmpty) return MemoryMarshal.AsMemory(memory); + } + else if (memory.Length >= Math.Min(hint, PageSize >> 2)) // respect the hint up to 1/4 of the page size + { + return MemoryMarshal.AsMemory(memory); + } + } + + // new segment, will always be entire + segment = GetNextSegment(); + leasedStart = segment.StartTrimCount + endSegmentCommitted; + Debug.Assert(leasedStart == 0, "should be zero for a new segment"); + return MemoryMarshal.AsMemory(segment.Memory); + } + + /// + /// This is the available unused buffer space, commonly used as the IO read-buffer to avoid + /// additional buffer-copy operations. + /// + public int UncommittedAvailable + { + get + { + DebugAssertValid(); + return endSegmentLength - endSegmentCommitted; + } + } + + private sealed class Segment : ReadOnlySequenceSegment + { + private Segment() { } + private IMemoryOwner _lease = NullLease.Instance; + private static Segment? _spare; + private Flags _flags; + + [Flags] + private enum Flags + { + None = 0, + StartTrim = 1 << 0, + EndTrim = 1 << 2, + } + + public static Segment Create(IMemoryOwner lease) + { + Debug.Assert(lease is not null, "null lease"); + var memory = lease!.Memory; + if (memory.IsEmpty) ThrowEmpty(); + + var obj = Interlocked.Exchange(ref _spare, null) ?? new(); + return obj.Init(lease, memory); + static void ThrowEmpty() => throw new InvalidOperationException("leased segment is empty"); + } + + private Segment Init(IMemoryOwner lease, Memory memory) + { + _lease = lease; + Memory = memory; + return this; + } + + public int Length => Memory.Length; + + public void Append(Segment next) + { + Debug.Assert(Next is null, "current segment already has a next"); + Debug.Assert(next.Next is null && next.RunningIndex == 0, "inbound next segment is already in a chain"); + next.RunningIndex = RunningIndex + Length; + Next = next; + DebugAssertValidChain(); + } + + private void ApplyChainDelta(int delta) + { + if (delta != 0) + { + var node = Next; + while (node is not null) + { + node.RunningIndex += delta; + node = node.Next; + } + } + } + + public void TrimEnd(int newLength) + { + var delta = Length - newLength; + if (delta != 0) + { + // buffer wasn't fully used; trim + _flags |= Flags.EndTrim; + Memory = Memory.Slice(0, newLength); + ApplyChainDelta(-delta); + DebugAssertValidChain(); + } + } + + public void TrimStart(int remove) + { + if (remove != 0) + { + _flags |= Flags.StartTrim; + Memory = Memory.Slice(start: remove); + RunningIndex += remove; // so that ROS length keeps working; note we *don't* need to adjust the chain + DebugAssertValidChain(); + StartTrimCount += remove; + } + } + + public new Segment? Next + { + get => (Segment?)base.Next; + private set => base.Next = value; + } + + public Segment? ResetAndGetNext() + { + var next = Next; + Next = null; + RunningIndex = 0; + _flags = Flags.None; + Memory = UntrimmedMemory; // reset, in case we trimmed it + DebugAssertValidChain(); + return next; + } + + public void Recycle() + { + var lease = _lease; + _lease = NullLease.Instance; + lease.Dispose(); + Next = null; + Memory = default; + RunningIndex = 0; + _flags = Flags.None; + Interlocked.Exchange(ref _spare, this); + DebugAssertValidChain(); + } + + private sealed class NullLease : IMemoryOwner + { + private NullLease() { } + public static readonly NullLease Instance = new NullLease(); + public void Dispose() { } + + public Memory Memory => default; + } + + public int StartTrimCount { get; private set; } + + /// + /// Get the full memory of the lease, before any trimming. + /// + public Memory UntrimmedMemory => _lease.Memory; + + /// + /// Undo any trimming, returning the new full capacity. + /// + public int Untrim(bool expandBackwards = false) + { + var fullMemory = UntrimmedMemory; + var fullLength = fullMemory.Length; + var delta = fullLength - Length; + if (delta != 0) + { + _flags &= ~(Flags.StartTrim | Flags.EndTrim); + Memory = fullMemory; + if (expandBackwards & RunningIndex >= delta) + { + // push our origin earlier; only valid if + // we're the first segment, otherwise + // we break someone-else's chain + RunningIndex -= delta; + } + else + { + // push everyone else later + ApplyChainDelta(delta); + } + + DebugAssertValidChain(); + } + + StartTrimCount = 0; + return fullLength; + } + + public bool StartTrimmed => (_flags & Flags.StartTrim) != 0; + public bool EndTrimmed => (_flags & Flags.EndTrim) != 0; + + [Conditional("DEBUG")] + public void DebugAssertValidChain([CallerMemberName] string blame = "") + { + var node = this; + var runningIndex = RunningIndex; + int index = 0; + while (node.Next is { } next) + { + index++; + var nextRunningIndex = runningIndex + node.Length; + if (nextRunningIndex != next.RunningIndex) ThrowRunningIndex(blame, index); + node = next; + runningIndex = nextRunningIndex; + static void ThrowRunningIndex(string blame, int index) => throw new InvalidOperationException( + $"Critical running index corruption in dangling chain, from '{blame}', segment {index}"); + } + } + + public void AppendOrRecycle(Segment segment, int maxDepth) + { + segment.Memory.DebugScramble(); + var node = this; + while (maxDepth-- > 0 && node is not null) + { + if (node.Next is null) // found somewhere to attach it + { + if (segment.Untrim() == 0) break; // turned out to be useless + segment.RunningIndex = node.RunningIndex + node.Length; + node.Next = segment; + return; + } + + node = node.Next; + } + + segment.Recycle(); + } + } + + /// + /// Discard all data and buffers. + /// + public void Release() + { + var node = startSegment; + startSegment = endSegment = null; + endSegmentCommitted = endSegmentLength = 0; + while (node is not null) + { + var next = node.Next; + node.Recycle(); + node = next; + } + } + + /// + /// Writes a value to the buffer; comparable to . + /// + public void Write(ReadOnlySpan value) + { + int srcLength = value.Length; + while (srcLength != 0) + { + var target = GetUncommittedSpan(hint: srcLength); + var tgtLength = target.Length; + if (tgtLength >= srcLength) + { + value.CopyTo(target); + Commit(srcLength); + return; + } + + value.Slice(0, tgtLength).CopyTo(target); + Commit(tgtLength); + value = value.Slice(tgtLength); + srcLength -= tgtLength; + } + } + + /// + /// Writes a value to the buffer; comparable to . + /// + public void Write(in ReadOnlySequence value) + { + if (value.IsSingleSegment) + { +#if NET + Write(value.FirstSpan); +#else + Write(value.First.Span); +#endif + } + else + { + WriteMultiSegment(ref this, in value); + } + + static void WriteMultiSegment(ref CycleBuffer @this, in ReadOnlySequence value) + { + foreach (var segment in value) + { +#if NET + @this.Write(value.FirstSpan); +#else + @this.Write(value.First.Span); +#endif + } + } + } +} diff --git a/src/RESPite/Buffers/ICycleBufferCallback.cs b/src/RESPite/Buffers/ICycleBufferCallback.cs new file mode 100644 index 000000000..9dcf1baa4 --- /dev/null +++ b/src/RESPite/Buffers/ICycleBufferCallback.cs @@ -0,0 +1,14 @@ +using System.Diagnostics.CodeAnalysis; + +namespace RESPite.Buffers; + +[Experimental(Experiments.Respite, UrlFormat = Experiments.UrlFormat)] +public interface ICycleBufferCallback +{ + /// + /// Notify that a page is available; this means that a consumer that wants + /// unflushed data can activate when pages are rotated, allowing large + /// payloads to be written concurrent with write. + /// + void PageComplete(); +} diff --git a/src/RESPite/Buffers/MemoryTrackedPool.cs b/src/RESPite/Buffers/MemoryTrackedPool.cs new file mode 100644 index 000000000..862910488 --- /dev/null +++ b/src/RESPite/Buffers/MemoryTrackedPool.cs @@ -0,0 +1,63 @@ +#if TRACK_MEMORY +using System.Buffers; +using System.Diagnostics.CodeAnalysis; + +namespace RESPite.Buffers; + +internal sealed class MemoryTrackedPool : MemoryPool +{ + // like MemoryPool, but tracks and reports double disposal via a custom memory manager, which + // allows all future use of a Memory to be tracked; contrast ArrayMemoryPool, which only tracks + // the initial fetch of .Memory from the lease + public override IMemoryOwner Rent(int minBufferSize = -1) => MemoryManager.Rent(minBufferSize); + + protected override void Dispose(bool disposing) + { + } + + // ReSharper disable once ArrangeModifiersOrder - you're wrong + public static new MemoryTrackedPool Shared { get; } = new(); + + public override int MaxBufferSize => MemoryPool.Shared.MaxBufferSize; + + private MemoryTrackedPool() + { + } + + private sealed class MemoryManager : MemoryManager + { + public static IMemoryOwner Rent(int minBufferSize = -1) => new MemoryManager(minBufferSize); + + private T[]? array; + private MemoryManager(int minBufferSize) + { + array = ArrayPool.Shared.Rent(Math.Max(64, minBufferSize)); + } + + private T[] CheckDisposed() + { + return array ?? Throw(); + [DoesNotReturn] + static T[] Throw() => throw new ObjectDisposedException("Use-after-free of Memory-" + typeof(T).Name); + } + + public override MemoryHandle Pin(int elementIndex = 0) => throw new NotSupportedException(nameof(Pin)); + + public override void Unpin() => throw new NotSupportedException(nameof(Unpin)); + + public override Span GetSpan() => CheckDisposed(); + + protected override bool TryGetArray(out ArraySegment segment) + { + segment = new ArraySegment(CheckDisposed()); + return true; + } + + protected override void Dispose(bool disposing) + { + var arr = Interlocked.Exchange(ref array, null); + if (arr is not null) ArrayPool.Shared.Return(arr); + } + } +} +#endif diff --git a/src/RESPite/Internal/BlockBuffer.cs b/src/RESPite/Internal/BlockBuffer.cs new file mode 100644 index 000000000..752d74c8d --- /dev/null +++ b/src/RESPite/Internal/BlockBuffer.cs @@ -0,0 +1,341 @@ +using System.Buffers; +using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; + +namespace RESPite.Internal; + +internal abstract partial class BlockBufferSerializer +{ + internal sealed class BlockBuffer : MemoryManager + { + private BlockBuffer(BlockBufferSerializer parent, int minCapacity) + { + _arrayPool = parent._arrayPool; + _array = _arrayPool.Rent(minCapacity); + DebugCounters.OnBufferCapacity(_array.Length); +#if DEBUG + _parent = parent; + parent.DebugBufferCreated(); +#endif + } + + private int _refCount = 1; + private int _finalizedOffset, _writeOffset; + private readonly ArrayPool _arrayPool; + private byte[] _array; +#if DEBUG + private int _finalizedCount; + private BlockBufferSerializer _parent; +#endif + + public override string ToString() => +#if DEBUG + $"{_finalizedCount} messages; " + +#endif + $"{_finalizedOffset} finalized bytes; writing: {NonFinalizedData.Length} bytes, {Available} available; observers: {_refCount}"; + + // only used when filling; _buffer should be non-null + private int Available => _array.Length - _writeOffset; + public Memory UncommittedMemory => _array.AsMemory(_writeOffset); + public Span UncommittedSpan => _array.AsSpan(_writeOffset); + + // decrease ref-count; dispose if necessary + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public void Release() + { + if (Interlocked.Decrement(ref _refCount) <= 0) Recycle(); + } + + public void AddRef() + { + if (!TryAddRef()) Throw(); + static void Throw() => throw new ObjectDisposedException(nameof(BlockBuffer)); + } + + public bool TryAddRef() + { + int count; + do + { + count = Volatile.Read(ref _refCount); + if (count <= 0) return false; + } + // repeat until we can successfully swap/incr + while (Interlocked.CompareExchange(ref _refCount, count + 1, count) != count); + + return true; + } + + [MethodImpl(MethodImplOptions.NoInlining)] // called rarely vs Dispose + private void Recycle() + { + var count = Volatile.Read(ref _refCount); + if (count == 0) + { + _array.DebugScramble(); +#if DEBUG + GC.SuppressFinalize(this); // only have a finalizer in debug + _parent.DebugBufferRecycled(_array.Length); +#endif + _arrayPool.Return(_array); + _array = []; + } + + Debug.Assert(count == 0, $"over-disposal? count={count}"); + } + +#if DEBUG +#pragma warning disable CA2015 // Adding a finalizer to a type derived from MemoryManager may permit memory to be freed while it is still in use by a Span + // (the above is fine because we don't actually release anything - just a counter) + ~BlockBuffer() + { + _parent.DebugBufferLeaked(); + DebugCounters.OnBufferLeaked(); + } +#pragma warning restore CA2015 +#endif + + public static BlockBuffer GetBuffer(BlockBufferSerializer parent, int sizeHint) + { + // note this isn't an actual "max", just a max of what we guarantee; we give the caller + // whatever is left in the buffer; the clamped hint just decides whether we need a *new* buffer + const int MinSize = 16, MaxSize = 128; + sizeHint = Math.Min(Math.Max(sizeHint, MinSize), MaxSize); + + var buffer = parent.Buffer; // most common path is "exists, with enough data" + return buffer is not null && buffer.AvailableWithResetIfUseful() >= sizeHint + ? buffer + : GetBufferSlow(parent, sizeHint); + } + + // would it be useful and possible to reset? i.e. if all finalized chunks have been returned, + private int AvailableWithResetIfUseful() + { + if (_finalizedOffset != 0 // at least some chunks have been finalized + && Volatile.Read(ref _refCount) == 1 // all finalized chunks returned + & _writeOffset == _finalizedOffset) // we're not in the middle of serializing something new + { + _writeOffset = _finalizedOffset = 0; // swipe left + } + + return _array.Length - _writeOffset; + } + + private static BlockBuffer GetBufferSlow(BlockBufferSerializer parent, int minBytes) + { + // note clamp on size hint has already been applied + const int DefaultBufferSize = 2048; + var buffer = parent.Buffer; + if (buffer is null) + { + // first buffer + return parent.Buffer = new BlockBuffer(parent, DefaultBufferSize); + } + + Debug.Assert(minBytes > buffer.Available, "existing buffer has capacity - why are we here?"); + + if (buffer.TryResizeFor(minBytes)) + { + Debug.Assert(buffer.Available >= minBytes); + return buffer; + } + + // We've tried reset and resize - no more tricks; we need to move to a new buffer, starting with a + // capacity for any existing data in this message, plus the new chunk we're adding. + var nonFinalizedBytes = buffer.NonFinalizedData; + var newBuffer = new BlockBuffer(parent, Math.Max(nonFinalizedBytes.Length + minBytes, DefaultBufferSize)); + + // copy the existing message data, if any (the previous message might have finished near the + // boundary, in which case we might not have written anything yet) + newBuffer.CopyFrom(nonFinalizedBytes); + Debug.Assert(newBuffer.Available >= minBytes, "should have requested extra capacity"); + + // the ~emperor~ buffer is dead; long live the ~emperor~ buffer + parent.Buffer = newBuffer; + buffer.MarkComplete(parent); + return newBuffer; + } + + // used for elective reset (rather than "because we ran out of space") + public static void Clear(BlockBufferSerializer parent) + { + if (parent.Buffer is { } buffer) + { + parent.Buffer = null; + buffer.MarkComplete(parent); + } + } + + public static ReadOnlyMemory RetainCurrent(BlockBufferSerializer parent) + { + if (parent.Buffer is { } buffer && buffer._finalizedOffset != 0) + { + parent.Buffer = null; + buffer.AddRef(); + return buffer.CreateMemory(0, buffer._finalizedOffset); + } + // nothing useful to detach! + return default; + } + + private void MarkComplete(BlockBufferSerializer parent) + { + // record that the old buffer no longer logically has any non-committed bytes (mostly just for ToString()) + _writeOffset = _finalizedOffset; + Debug.Assert(IsNonCommittedEmpty); + + // see if the caller wants to take ownership of the segment + if (_finalizedOffset != 0 && !parent.ClaimSegment(CreateMemory(0, _finalizedOffset))) + { + Release(); // decrement the observer + } +#if DEBUG + DebugCounters.OnBufferCompleted(_finalizedCount, _finalizedOffset); +#endif + } + + private void CopyFrom(Span source) + { + source.CopyTo(UncommittedSpan); + _writeOffset += source.Length; + } + + private Span NonFinalizedData => _array.AsSpan( + _finalizedOffset, _writeOffset - _finalizedOffset); + + private bool TryResizeFor(int extraBytes) + { + if (_finalizedOffset == 0 & // we can only do this if there are no other messages in the buffer + Volatile.Read(ref _refCount) == 1) // and no-one else is looking (we already tried reset) + { + // we're already on the boundary - don't scrimp; just do the math from the end of the buffer + byte[] newArray = _arrayPool.Rent(_array.Length + extraBytes); + DebugCounters.OnBufferCapacity(newArray.Length - _array.Length); // account for extra only + + // copy the existing data (we always expect some, since we've clamped extraBytes to be + // much smaller than the default buffer size) + NonFinalizedData.CopyTo(newArray); + _array.DebugScramble(); + _arrayPool.Return(_array); + _array = newArray; + return true; + } + + return false; + } + + public static void Advance(BlockBufferSerializer parent, int count) + { + if (count == 0) return; + if (count < 0) ThrowOutOfRange(); + var buffer = parent.Buffer; + if (buffer is null || buffer.Available < count) ThrowOutOfRange(); + buffer._writeOffset += count; + + [DoesNotReturn] + static void ThrowOutOfRange() => throw new ArgumentOutOfRangeException(nameof(count)); + } + + public void RevertUnfinalized(BlockBufferSerializer parent) + { + // undo any writes (something went wrong during serialize) + _finalizedOffset = _writeOffset; + } + + private ReadOnlyMemory FinalizeBlock() + { + var length = _writeOffset - _finalizedOffset; + Debug.Assert(length > 0, "already checked this in FinalizeMessage!"); + var chunk = CreateMemory(_finalizedOffset, length); + _finalizedOffset = _writeOffset; // move the write head +#if DEBUG + _finalizedCount++; + _parent.DebugMessageFinalized(length); +#endif + Interlocked.Increment(ref _refCount); // add an observer + return chunk; + } + + private bool IsNonCommittedEmpty => _finalizedOffset == _writeOffset; + + public static ReadOnlyMemory FinalizeMessage(BlockBufferSerializer parent) + { + var buffer = parent.Buffer; + if (buffer is null || buffer.IsNonCommittedEmpty) + { +#if DEBUG // still count it for logging purposes + if (buffer is not null) buffer._finalizedCount++; + parent.DebugMessageFinalized(0); +#endif + return default; + } + + return buffer.FinalizeBlock(); + } + + // MemoryManager pieces + protected override void Dispose(bool disposing) + { + if (disposing) Release(); + } + + public override Span GetSpan() => _array; + public int Length => _array.Length; + + // base version is CreateMemory(GetSpan().Length); avoid that GetSpan() + public override Memory Memory => CreateMemory(_array.Length); + + public override unsafe MemoryHandle Pin(int elementIndex = 0) + { + // We *could* be cute and use a shared pin - but that's a *lot* + // of work (synchronization), requires extra storage, and for an + // API that is very unlikely; hence: we'll use per-call GC pins. + GCHandle handle = GCHandle.Alloc(_array, GCHandleType.Pinned); + DebugCounters.OnBufferPinned(); // prove how unlikely this is + byte* ptr = (byte*)handle.AddrOfPinnedObject(); + // note no IPinnable in the MemoryHandle; + return new MemoryHandle(ptr + elementIndex, handle); + } + + // This would only be called if we passed out a MemoryHandle with ourselves + // as IPinnable (in Pin), which: we don't. + public override void Unpin() => throw new NotSupportedException(); + + protected override bool TryGetArray(out ArraySegment segment) + { + segment = new ArraySegment(_array); + return true; + } + + internal static void Release(in ReadOnlySequence request) + { + if (request.IsSingleSegment) + { + if (MemoryMarshal.TryGetMemoryManager( + request.First, out var block)) + { + block.Release(); + } + } + else + { + ReleaseMultiBlock(in request); + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static void ReleaseMultiBlock(in ReadOnlySequence request) + { + foreach (var segment in request) + { + if (MemoryMarshal.TryGetMemoryManager( + segment, out var block)) + { + block.Release(); + } + } + } + } + } +} diff --git a/src/RESPite/Internal/BlockBufferSerializer.cs b/src/RESPite/Internal/BlockBufferSerializer.cs new file mode 100644 index 000000000..5f90f66cb --- /dev/null +++ b/src/RESPite/Internal/BlockBufferSerializer.cs @@ -0,0 +1,96 @@ +using System.Buffers; +using System.Diagnostics; + +namespace RESPite.Internal; + +/// +/// Provides abstracted access to a buffer-writing API. Conveniently, we only give the caller +/// RespWriter - which they cannot export (ref-type), thus we never actually give the +/// public caller our IBufferWriter{byte}. Likewise, note that serialization is synchronous, +/// i.e. never switches thread during an operation. This gives us quite a bit of flexibility. +/// There are two main uses of BlockBufferSerializer: +/// 1. thread-local: ambient, used for random messages so that each thread is quietly packing +/// a thread-specific buffer; zero concurrency because of [ThreadStatic] hackery. +/// 2. batching: RespBatch hosts a serializer that reflects the batch we're building; successive +/// commands in the same batch are written adjacently in a shared buffer - we explicitly +/// detect and reject concurrency attempts in a batch (which is fair: a batch has order). +/// +internal abstract partial class BlockBufferSerializer(ArrayPool? arrayPool = null) : IBufferWriter +{ + private readonly ArrayPool _arrayPool = arrayPool ?? ArrayPool.Shared; + private protected abstract BlockBuffer? Buffer { get; set; } + + Memory IBufferWriter.GetMemory(int sizeHint) => BlockBuffer.GetBuffer(this, sizeHint).UncommittedMemory; + + Span IBufferWriter.GetSpan(int sizeHint) => BlockBuffer.GetBuffer(this, sizeHint).UncommittedSpan; + + void IBufferWriter.Advance(int count) => BlockBuffer.Advance(this, count); + + public virtual void Clear() => BlockBuffer.Clear(this); + + internal virtual ReadOnlySequence Flush() => throw new NotSupportedException(); + + /* + public virtual ReadOnlyMemory Serialize( + RespCommandMap? commandMap, + ReadOnlySpan command, + in TRequest request, + IRespFormatter formatter) +#if NET10_0_OR_GREATER + where TRequest : allows ref struct +#endif + { + try + { + var writer = new RespWriter(this); + writer.CommandMap = commandMap; + formatter.Format(command, ref writer, request); + writer.Flush(); + return BlockBuffer.FinalizeMessage(this); + } + catch + { + Buffer?.RevertUnfinalized(this); + throw; + } + } + */ + + internal void Revert() => Buffer?.RevertUnfinalized(this); + + protected virtual bool ClaimSegment(ReadOnlyMemory segment) => false; + +#if DEBUG + private int _countAdded, _countRecycled, _countLeaked, _countMessages; + private long _countMessageBytes; + public int CountLeaked => Volatile.Read(ref _countLeaked); + public int CountRecycled => Volatile.Read(ref _countRecycled); + public int CountAdded => Volatile.Read(ref _countAdded); + public int CountMessages => Volatile.Read(ref _countMessages); + public long CountMessageBytes => Volatile.Read(ref _countMessageBytes); + + [Conditional("DEBUG")] + private void DebugBufferLeaked() => Interlocked.Increment(ref _countLeaked); + + [Conditional("DEBUG")] + private void DebugBufferRecycled(int length) + { + Interlocked.Increment(ref _countRecycled); + DebugCounters.OnBufferRecycled(length); + } + + [Conditional("DEBUG")] + private void DebugBufferCreated() + { + Interlocked.Increment(ref _countAdded); + DebugCounters.OnBufferCreated(); + } + + [Conditional("DEBUG")] + private void DebugMessageFinalized(int bytes) + { + Interlocked.Increment(ref _countMessages); + Interlocked.Add(ref _countMessageBytes, bytes); + } +#endif +} diff --git a/src/RESPite/Internal/DebugCounters.cs b/src/RESPite/Internal/DebugCounters.cs new file mode 100644 index 000000000..2ed742a84 --- /dev/null +++ b/src/RESPite/Internal/DebugCounters.cs @@ -0,0 +1,163 @@ +using System.Diagnostics; + +namespace RESPite.Internal; + +internal partial class DebugCounters +{ +#if DEBUG + private static int + _tallySyncReadCount, + _tallyAsyncReadCount, + _tallyAsyncReadInlineCount, + _tallyDiscardFullCount, + _tallyDiscardPartialCount, + _tallyBufferCreatedCount, + _tallyBufferRecycledCount, + _tallyBufferMessageCount, + _tallyBufferPinCount, + _tallyBufferLeakCount; + + private static long + _tallyReadBytes, + _tallyDiscardAverage, + _tallyBufferMessageBytes, + _tallyBufferRecycledBytes, + _tallyBufferMaxOutstandingBytes, + _tallyBufferTotalBytes; +#endif + + [Conditional("DEBUG")] + public static void OnDiscardFull(long count) + { +#if DEBUG + if (count > 0) + { + Interlocked.Increment(ref _tallyDiscardFullCount); + EstimatedMovingRangeAverage(ref _tallyDiscardAverage, count); + } +#endif + } + + [Conditional("DEBUG")] + public static void OnDiscardPartial(long count) + { +#if DEBUG + if (count > 0) + { + Interlocked.Increment(ref _tallyDiscardPartialCount); + EstimatedMovingRangeAverage(ref _tallyDiscardAverage, count); + } +#endif + } + + [Conditional("DEBUG")] + internal static void OnAsyncRead(int bytes, bool inline) + { +#if DEBUG + Interlocked.Increment(ref inline ? ref _tallyAsyncReadInlineCount : ref _tallyAsyncReadCount); + if (bytes > 0) Interlocked.Add(ref _tallyReadBytes, bytes); +#endif + } + + [Conditional("DEBUG")] + internal static void OnSyncRead(int bytes) + { +#if DEBUG + Interlocked.Increment(ref _tallySyncReadCount); + if (bytes > 0) Interlocked.Add(ref _tallyReadBytes, bytes); +#endif + } + + [Conditional("DEBUG")] + public static void OnBufferCreated() + { +#if DEBUG + Interlocked.Increment(ref _tallyBufferCreatedCount); +#endif + } + + [Conditional("DEBUG")] + public static void OnBufferRecycled(int messageBytes) + { +#if DEBUG + Interlocked.Increment(ref _tallyBufferRecycledCount); + var now = Interlocked.Add(ref _tallyBufferRecycledBytes, messageBytes); + var outstanding = Volatile.Read(ref _tallyBufferMessageBytes) - now; + + while (true) + { + var oldOutstanding = Volatile.Read(ref _tallyBufferMaxOutstandingBytes); + // loop until either it isn't an increase, or we successfully perform + // the swap + if (outstanding <= oldOutstanding + || Interlocked.CompareExchange( + ref _tallyBufferMaxOutstandingBytes, + outstanding, + oldOutstanding) == oldOutstanding) break; + } +#endif + } + + [Conditional("DEBUG")] + public static void OnBufferCompleted(int messageCount, int messageBytes) + { +#if DEBUG + Interlocked.Add(ref _tallyBufferMessageCount, messageCount); + Interlocked.Add(ref _tallyBufferMessageBytes, messageBytes); +#endif + } + + [Conditional("DEBUG")] + public static void OnBufferCapacity(int bytes) + { +#if DEBUG + Interlocked.Add(ref _tallyBufferTotalBytes, bytes); +#endif + } + + [Conditional("DEBUG")] + public static void OnBufferPinned() + { +#if DEBUG + Interlocked.Increment(ref _tallyBufferPinCount); +#endif + } + + [Conditional("DEBUG")] + public static void OnBufferLeaked() + { +#if DEBUG + Interlocked.Increment(ref _tallyBufferLeakCount); +#endif + } + +#if DEBUG + private static void EstimatedMovingRangeAverage(ref long field, long value) + { + var oldValue = Volatile.Read(ref field); + var delta = (value - oldValue) >> 3; // is is a 7:1 old:new EMRA, using integer/bit math (alplha=0.125) + if (delta != 0) Interlocked.Add(ref field, delta); + // note: strictly conflicting concurrent calls can skew the value incorrectly; this is, however, + // preferable to getting into a CEX squabble or requiring a lock - it is debug-only and just useful data + } + + public int SyncReadCount { get; } = Interlocked.Exchange(ref _tallySyncReadCount, 0); + public int AsyncReadCount { get; } = Interlocked.Exchange(ref _tallyAsyncReadCount, 0); + public int AsyncReadInlineCount { get; } = Interlocked.Exchange(ref _tallyAsyncReadInlineCount, 0); + public long ReadBytes { get; } = Interlocked.Exchange(ref _tallyReadBytes, 0); + + public long DiscardAverage { get; } = Interlocked.Exchange(ref _tallyDiscardAverage, 32); + public int DiscardFullCount { get; } = Interlocked.Exchange(ref _tallyDiscardFullCount, 0); + public int DiscardPartialCount { get; } = Interlocked.Exchange(ref _tallyDiscardPartialCount, 0); + + public int BufferCreatedCount { get; } = Interlocked.Exchange(ref _tallyBufferCreatedCount, 0); + public int BufferRecycledCount { get; } = Interlocked.Exchange(ref _tallyBufferRecycledCount, 0); + public long BufferRecycledBytes { get; } = Interlocked.Exchange(ref _tallyBufferRecycledBytes, 0); + public long BufferMaxOutstandingBytes { get; } = Interlocked.Exchange(ref _tallyBufferMaxOutstandingBytes, 0); + public int BufferMessageCount { get; } = Interlocked.Exchange(ref _tallyBufferMessageCount, 0); + public long BufferMessageBytes { get; } = Interlocked.Exchange(ref _tallyBufferMessageBytes, 0); + public long BufferTotalBytes { get; } = Interlocked.Exchange(ref _tallyBufferTotalBytes, 0); + public int BufferPinCount { get; } = Interlocked.Exchange(ref _tallyBufferPinCount, 0); + public int BufferLeakCount { get; } = Interlocked.Exchange(ref _tallyBufferLeakCount, 0); +#endif +} diff --git a/src/RESPite/Internal/Raw.cs b/src/RESPite/Internal/Raw.cs new file mode 100644 index 000000000..9df318630 --- /dev/null +++ b/src/RESPite/Internal/Raw.cs @@ -0,0 +1,138 @@ +using System.Diagnostics; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; +using System.Text; + +#if NET +using System.Runtime.Intrinsics; +using System.Runtime.Intrinsics.X86; +#endif + +namespace RESPite.Internal; + +/// +/// Pre-computed payload fragments, for high-volume scenarios / common values. +/// +/// +/// CPU-endianness applies here; we can't just use "const" - however, modern JITs treat "static readonly" *almost* the same as "const", so: meh. +/// +internal static class Raw +{ + public static ulong Create64(ReadOnlySpan bytes, int length) + { + if (length != bytes.Length) + { + throw new ArgumentException($"Length check failed: {length} vs {bytes.Length}, value: {RespConstants.UTF8.GetString(bytes)}", nameof(length)); + } + if (length < 0 || length > sizeof(ulong)) + { + throw new ArgumentOutOfRangeException(nameof(length), $"Invalid length {length} - must be 0-{sizeof(ulong)}"); + } + + // this *will* be aligned; this approach intentionally chosen for parity with write + Span scratch = stackalloc byte[sizeof(ulong)]; + if (length != sizeof(ulong)) scratch.Slice(length).Clear(); + bytes.CopyTo(scratch); + return Unsafe.ReadUnaligned(ref MemoryMarshal.GetReference(scratch)); + } + + public static uint Create32(ReadOnlySpan bytes, int length) + { + if (length != bytes.Length) + { + throw new ArgumentException($"Length check failed: {length} vs {bytes.Length}, value: {RespConstants.UTF8.GetString(bytes)}", nameof(length)); + } + if (length < 0 || length > sizeof(uint)) + { + throw new ArgumentOutOfRangeException(nameof(length), $"Invalid length {length} - must be 0-{sizeof(uint)}"); + } + + // this *will* be aligned; this approach intentionally chosen for parity with write + Span scratch = stackalloc byte[sizeof(uint)]; + if (length != sizeof(uint)) scratch.Slice(length).Clear(); + bytes.CopyTo(scratch); + return Unsafe.ReadUnaligned(ref MemoryMarshal.GetReference(scratch)); + } + + public static ulong BulkStringEmpty_6 = Create64("$0\r\n\r\n"u8, 6); + + public static ulong BulkStringInt32_M1_8 = Create64("$2\r\n-1\r\n"u8, 8); + public static ulong BulkStringInt32_0_7 = Create64("$1\r\n0\r\n"u8, 7); + public static ulong BulkStringInt32_1_7 = Create64("$1\r\n1\r\n"u8, 7); + public static ulong BulkStringInt32_2_7 = Create64("$1\r\n2\r\n"u8, 7); + public static ulong BulkStringInt32_3_7 = Create64("$1\r\n3\r\n"u8, 7); + public static ulong BulkStringInt32_4_7 = Create64("$1\r\n4\r\n"u8, 7); + public static ulong BulkStringInt32_5_7 = Create64("$1\r\n5\r\n"u8, 7); + public static ulong BulkStringInt32_6_7 = Create64("$1\r\n6\r\n"u8, 7); + public static ulong BulkStringInt32_7_7 = Create64("$1\r\n7\r\n"u8, 7); + public static ulong BulkStringInt32_8_7 = Create64("$1\r\n8\r\n"u8, 7); + public static ulong BulkStringInt32_9_7 = Create64("$1\r\n9\r\n"u8, 7); + public static ulong BulkStringInt32_10_8 = Create64("$2\r\n10\r\n"u8, 8); + + public static ulong BulkStringPrefix_M1_5 = Create64("$-1\r\n"u8, 5); + public static uint BulkStringPrefix_0_4 = Create32("$0\r\n"u8, 4); + public static uint BulkStringPrefix_1_4 = Create32("$1\r\n"u8, 4); + public static uint BulkStringPrefix_2_4 = Create32("$2\r\n"u8, 4); + public static uint BulkStringPrefix_3_4 = Create32("$3\r\n"u8, 4); + public static uint BulkStringPrefix_4_4 = Create32("$4\r\n"u8, 4); + public static uint BulkStringPrefix_5_4 = Create32("$5\r\n"u8, 4); + public static uint BulkStringPrefix_6_4 = Create32("$6\r\n"u8, 4); + public static uint BulkStringPrefix_7_4 = Create32("$7\r\n"u8, 4); + public static uint BulkStringPrefix_8_4 = Create32("$8\r\n"u8, 4); + public static uint BulkStringPrefix_9_4 = Create32("$9\r\n"u8, 4); + public static ulong BulkStringPrefix_10_5 = Create64("$10\r\n"u8, 5); + + public static ulong ArrayPrefix_M1_5 = Create64("*-1\r\n"u8, 5); + public static uint ArrayPrefix_0_4 = Create32("*0\r\n"u8, 4); + public static uint ArrayPrefix_1_4 = Create32("*1\r\n"u8, 4); + public static uint ArrayPrefix_2_4 = Create32("*2\r\n"u8, 4); + public static uint ArrayPrefix_3_4 = Create32("*3\r\n"u8, 4); + public static uint ArrayPrefix_4_4 = Create32("*4\r\n"u8, 4); + public static uint ArrayPrefix_5_4 = Create32("*5\r\n"u8, 4); + public static uint ArrayPrefix_6_4 = Create32("*6\r\n"u8, 4); + public static uint ArrayPrefix_7_4 = Create32("*7\r\n"u8, 4); + public static uint ArrayPrefix_8_4 = Create32("*8\r\n"u8, 4); + public static uint ArrayPrefix_9_4 = Create32("*9\r\n"u8, 4); + public static ulong ArrayPrefix_10_5 = Create64("*10\r\n"u8, 5); + +#if NET + private static uint FirstAndLast(char first, char last) + { + Debug.Assert(first < 128 && last < 128, "ASCII please"); + Span scratch = [(byte)first, 0, 0, (byte)last]; + // this *will* be aligned; this approach intentionally chosen for how we read + return Unsafe.ReadUnaligned(ref MemoryMarshal.GetReference(scratch)); + } + + public const int CommonRespIndex_Success = 0; + public const int CommonRespIndex_SingleDigitInteger = 1; + public const int CommonRespIndex_DoubleDigitInteger = 2; + public const int CommonRespIndex_SingleDigitString = 3; + public const int CommonRespIndex_DoubleDigitString = 4; + public const int CommonRespIndex_SingleDigitArray = 5; + public const int CommonRespIndex_DoubleDigitArray = 6; + public const int CommonRespIndex_Error = 7; + + public static readonly Vector256 CommonRespPrefixes = Vector256.Create( + FirstAndLast('+', '\r'), // success +OK\r\n + FirstAndLast(':', '\n'), // single-digit integer :4\r\n + FirstAndLast(':', '\r'), // double-digit integer :42\r\n + FirstAndLast('$', '\n'), // 0-9 char string $0\r\n\r\n + FirstAndLast('$', '\r'), // null/10-99 char string $-1\r\n or $10\r\nABCDEFGHIJ\r\n + FirstAndLast('*', '\n'), // 0-9 length array *0\r\n + FirstAndLast('*', '\r'), // null/10-99 length array *-1\r\n or *10\r\n:0\r\n:0\r\n:0\r\n:0\r\n:0\r\n:0\r\n:0\r\n:0\r\n:0\r\n:0\r\n + FirstAndLast('-', 'R')); // common errors -ERR something bad happened + + public static readonly Vector256 FirstLastMask = CreateUInt32(0xFF0000FF); + + private static Vector256 CreateUInt32(uint value) + { +#if NET8_0_OR_GREATER + return Vector256.Create(value); +#else + return Vector256.Create(value, value, value, value, value, value, value, value); +#endif + } + +#endif +} diff --git a/src/RESPite/Internal/RespConstants.cs b/src/RESPite/Internal/RespConstants.cs new file mode 100644 index 000000000..accb8400b --- /dev/null +++ b/src/RESPite/Internal/RespConstants.cs @@ -0,0 +1,53 @@ +using System.Buffers.Binary; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; +using System.Text; +// ReSharper disable InconsistentNaming +namespace RESPite.Internal; + +internal static class RespConstants +{ + public static readonly UTF8Encoding UTF8 = new(false); + + public static ReadOnlySpan CrlfBytes => "\r\n"u8; + + public static readonly ushort CrLfUInt16 = UnsafeCpuUInt16(CrlfBytes); + + public static ReadOnlySpan OKBytes_LC => "ok"u8; + public static ReadOnlySpan OKBytes => "OK"u8; + public static readonly ushort OKUInt16 = UnsafeCpuUInt16(OKBytes); + public static readonly ushort OKUInt16_LC = UnsafeCpuUInt16(OKBytes_LC); + + public static readonly uint BulkStringStreaming = UnsafeCpuUInt32("$?\r\n"u8); + public static readonly uint BulkStringNull = UnsafeCpuUInt32("$-1\r"u8); + + public static readonly uint ArrayStreaming = UnsafeCpuUInt32("*?\r\n"u8); + public static readonly uint ArrayNull = UnsafeCpuUInt32("*-1\r"u8); + + public static ushort UnsafeCpuUInt16(ReadOnlySpan bytes) + => Unsafe.ReadUnaligned(ref MemoryMarshal.GetReference(bytes)); + public static ushort UnsafeCpuUInt16(ReadOnlySpan bytes, int offset) + => Unsafe.ReadUnaligned(ref Unsafe.Add(ref MemoryMarshal.GetReference(bytes), offset)); + public static byte UnsafeCpuByte(ReadOnlySpan bytes, int offset) + => Unsafe.Add(ref MemoryMarshal.GetReference(bytes), offset); + public static uint UnsafeCpuUInt32(ReadOnlySpan bytes) + => Unsafe.ReadUnaligned(ref MemoryMarshal.GetReference(bytes)); + public static uint UnsafeCpuUInt32(ReadOnlySpan bytes, int offset) + => Unsafe.ReadUnaligned(ref Unsafe.Add(ref MemoryMarshal.GetReference(bytes), offset)); + public static ulong UnsafeCpuUInt64(ReadOnlySpan bytes) + => Unsafe.ReadUnaligned(ref MemoryMarshal.GetReference(bytes)); + public static ushort CpuUInt16(ushort bigEndian) + => BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(bigEndian) : bigEndian; + public static uint CpuUInt32(uint bigEndian) + => BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(bigEndian) : bigEndian; + public static ulong CpuUInt64(ulong bigEndian) + => BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(bigEndian) : bigEndian; + + public const int MaxRawBytesInt32 = 11, // "-2147483648" + MaxRawBytesInt64 = 20, // "-9223372036854775808", + MaxProtocolBytesIntegerInt32 = MaxRawBytesInt32 + 3, // ?X10X\r\n where ? could be $, *, etc - usually a length prefix + MaxProtocolBytesBulkStringIntegerInt32 = MaxRawBytesInt32 + 7, // $NN\r\nX11X\r\n for NN (length) 1-11 + MaxProtocolBytesBulkStringIntegerInt64 = MaxRawBytesInt64 + 7, // $NN\r\nX20X\r\n for NN (length) 1-20 + MaxRawBytesNumber = 20, // note G17 format, allow 20 for payload + MaxProtocolBytesBytesNumber = MaxRawBytesNumber + 7; // $NN\r\nX...X\r\n for NN (length) 1-20 +} diff --git a/src/RESPite/Internal/RespOperationExtensions.cs b/src/RESPite/Internal/RespOperationExtensions.cs new file mode 100644 index 000000000..78ecd6d53 --- /dev/null +++ b/src/RESPite/Internal/RespOperationExtensions.cs @@ -0,0 +1,57 @@ +using System.Buffers; +using System.Diagnostics; +using System.Runtime.InteropServices; + +namespace RESPite.Internal; + +internal static class RespOperationExtensions +{ +#if PREVIEW_LANGVER + extension(in RespOperation operation) + { + // since this is valid... + public ref readonly RespOperation Self => ref operation; + + // so is this (the types are layout-identical) + public ref readonly RespOperation Untyped => ref Unsafe.As, RespOperation>( + ref Unsafe.AsRef(in operation)); + } +#endif + + // if we're recycling a buffer, we need to consider it trashable by other threads; for + // debug purposes, force this by overwriting with *****, aka the meaning of life + [Conditional("DEBUG")] + internal static void DebugScramble(this Span value) + => value.Fill(42); + + [Conditional("DEBUG")] + internal static void DebugScramble(this Memory value) + => value.Span.Fill(42); + + [Conditional("DEBUG")] + internal static void DebugScramble(this ReadOnlyMemory value) + => MemoryMarshal.AsMemory(value).Span.Fill(42); + + [Conditional("DEBUG")] + internal static void DebugScramble(this ReadOnlySequence value) + { + if (value.IsSingleSegment) + { + value.First.DebugScramble(); + } + else + { + foreach (var segment in value) + { + segment.DebugScramble(); + } + } + } + + [Conditional("DEBUG")] + internal static void DebugScramble(this byte[]? value) + { + if (value is not null) + value.AsSpan().Fill(42); + } +} diff --git a/src/RESPite/Internal/SynchronizedBlockBufferSerializer.cs b/src/RESPite/Internal/SynchronizedBlockBufferSerializer.cs new file mode 100644 index 000000000..4f00e5194 --- /dev/null +++ b/src/RESPite/Internal/SynchronizedBlockBufferSerializer.cs @@ -0,0 +1,122 @@ +using System.Buffers; + +namespace RESPite.Internal; + +internal partial class BlockBufferSerializer +{ + internal static BlockBufferSerializer Create(bool retainChain = false) => + new SynchronizedBlockBufferSerializer(retainChain); + + /// + /// Used for things like . + /// + private sealed class SynchronizedBlockBufferSerializer(bool retainChain) : BlockBufferSerializer + { + private bool _discardDuringClear; + + private protected override BlockBuffer? Buffer { get; set; } // simple per-instance auto-prop + + /* + // use lock-based synchronization + public override ReadOnlyMemory Serialize( + RespCommandMap? commandMap, + ReadOnlySpan command, + in TRequest request, + IRespFormatter formatter) + { + bool haveLock = false; + try // note that "lock" unrolls to something very similar; we're not adding anything unusual here + { + // in reality, we *expect* people to not attempt to use batches concurrently, *and* + // we expect serialization to be very fast, but: out of an abundance of caution, + // add a timeout - just to avoid surprises (since people can write their own formatters) + Monitor.TryEnter(this, LockTimeout, ref haveLock); + if (!haveLock) ThrowTimeout(); + return base.Serialize(commandMap, command, in request, formatter); + } + finally + { + if (haveLock) Monitor.Exit(this); + } + + static void ThrowTimeout() => throw new TimeoutException( + "It took a long time to get access to the serialization-buffer. This is very odd - please " + + "ask on GitHub, but *as a guess*, you have a custom RESP formatter that is really slow *and* " + + "you are using concurrent access to a RESP batch / transaction."); + } + */ + + private static readonly TimeSpan LockTimeout = TimeSpan.FromSeconds(5); + + private Segment? _head, _tail; + + protected override bool ClaimSegment(ReadOnlyMemory segment) + { + if (retainChain & !_discardDuringClear) + { + if (_head is null) + { + _head = _tail = new Segment(segment); + } + else + { + _tail = new Segment(segment, _tail); + } + + // note we don't need to increment the ref-count; because of this "true" + return true; + } + + return false; + } + + internal override ReadOnlySequence Flush() + { + if (_head is null) + { + // at worst, single-segment - we can skip the alloc + return new(BlockBuffer.RetainCurrent(this)); + } + + // otherwise, flush everything *keeping the chain* + ClearWithDiscard(discard: false); + ReadOnlySequence seq = new(_head, 0, _tail!, _tail!.Length); + _head = _tail = null; + return seq; + } + + public override void Clear() + { + ClearWithDiscard(discard: true); + _head = _tail = null; + } + + private void ClearWithDiscard(bool discard) + { + try + { + _discardDuringClear = discard; + base.Clear(); + } + finally + { + _discardDuringClear = false; + } + } + + private sealed class Segment : ReadOnlySequenceSegment + { + public Segment(ReadOnlyMemory memory, Segment? previous = null) + { + Memory = memory; + if (previous is not null) + { + previous.Next = this; + RunningIndex = previous.RunningIndex + previous.Length; + } + } + + public int Length => Memory.Length; + } + } +} diff --git a/src/RESPite/Internal/ThreadLocalBlockBufferSerializer.cs b/src/RESPite/Internal/ThreadLocalBlockBufferSerializer.cs new file mode 100644 index 000000000..1c1895ff4 --- /dev/null +++ b/src/RESPite/Internal/ThreadLocalBlockBufferSerializer.cs @@ -0,0 +1,21 @@ +namespace RESPite.Internal; + +internal partial class BlockBufferSerializer +{ + internal static BlockBufferSerializer Shared => ThreadLocalBlockBufferSerializer.Instance; + private sealed class ThreadLocalBlockBufferSerializer : BlockBufferSerializer + { + private ThreadLocalBlockBufferSerializer() { } + public static readonly ThreadLocalBlockBufferSerializer Instance = new(); + + [ThreadStatic] + // side-step concurrency using per-thread semantics + private static BlockBuffer? _perTreadBuffer; + + private protected override BlockBuffer? Buffer + { + get => _perTreadBuffer; + set => _perTreadBuffer = value; + } + } +} diff --git a/src/RESPite/Messages/RespAttributeReader.cs b/src/RESPite/Messages/RespAttributeReader.cs new file mode 100644 index 000000000..9d61802c0 --- /dev/null +++ b/src/RESPite/Messages/RespAttributeReader.cs @@ -0,0 +1,71 @@ +using System.Diagnostics.CodeAnalysis; + +namespace RESPite.Messages; + +/// +/// Allows attribute data to be parsed conveniently. +/// +/// The type of data represented by this reader. +[Experimental(Experiments.Respite, UrlFormat = Experiments.UrlFormat)] +public abstract class RespAttributeReader +{ + /// + /// Parse a group of attributes. + /// + public virtual void Read(ref RespReader reader, ref T value) + { + reader.Demand(RespPrefix.Attribute); + _ = ReadKeyValuePairs(ref reader, ref value); + } + + /// + /// Parse an aggregate as a set of key/value pairs. + /// + /// The number of pairs successfully processed. + protected virtual int ReadKeyValuePairs(ref RespReader reader, ref T value) + { + var iterator = reader.AggregateChildren(); + + byte[] pooledBuffer = []; + Span localBuffer = stackalloc byte[128]; + int count = 0; + while (iterator.MoveNext()) + { + if (iterator.Value.IsScalar) + { + var key = iterator.Value.Buffer(ref pooledBuffer, localBuffer); + + if (iterator.MoveNext()) + { + if (ReadKeyValuePair(key, ref iterator.Value, ref value)) + { + count++; + } + } + else + { + break; // no matching value for this key + } + } + else + { + if (iterator.MoveNext()) + { + // we won't try to handle aggregate keys; skip the value + } + else + { + break; // no matching value for this key + } + } + } + iterator.MovePast(out reader); + return count; + } + + /// + /// Parse an individual key/value pair. + /// + /// True if the pair was successfully processed. + public virtual bool ReadKeyValuePair(scoped ReadOnlySpan key, ref RespReader reader, ref T value) => false; +} diff --git a/src/RESPite/Messages/RespFrameScanner.cs b/src/RESPite/Messages/RespFrameScanner.cs new file mode 100644 index 000000000..35650ca1d --- /dev/null +++ b/src/RESPite/Messages/RespFrameScanner.cs @@ -0,0 +1,203 @@ +using System.Buffers; +using System.Buffers.Binary; +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; +using static RESPite.Internal.RespConstants; +namespace RESPite.Messages; + +/// +/// Scans RESP frames. +/// . +[Experimental(Experiments.Respite, UrlFormat = Experiments.UrlFormat)] +public sealed class RespFrameScanner // : IFrameSacanner, IFrameValidator +{ + /// + /// Gets a frame scanner for RESP2 request/response connections, or RESP3 connections. + /// + public static RespFrameScanner Default { get; } = new(false); + + /// + /// Gets a frame scanner that identifies RESP2 pub/sub messages. + /// + public static RespFrameScanner Subscription { get; } = new(true); + private RespFrameScanner(bool pubsub) => _pubsub = pubsub; + private readonly bool _pubsub; + + private static readonly uint FastNull = UnsafeCpuUInt32("_\r\n\0"u8), + SingleCharScalarMask = CpuUInt32(0xFF00FFFF), + SingleDigitInteger = UnsafeCpuUInt32(":\0\r\n"u8), + EitherBoolean = UnsafeCpuUInt32("#\0\r\n"u8), + FirstThree = CpuUInt32(0xFFFFFF00); + private static readonly ulong OK = UnsafeCpuUInt64("+OK\r\n\0\0\0"u8), + PONG = UnsafeCpuUInt64("+PONG\r\n\0"u8), + DoubleCharScalarMask = CpuUInt64(0xFF0000FFFF000000), + DoubleDigitInteger = UnsafeCpuUInt64(":\0\0\r\n"u8), + FirstFive = CpuUInt64(0xFFFFFFFFFF000000), + FirstSeven = CpuUInt64(0xFFFFFFFFFFFFFF00); + + private const OperationStatus UseReader = (OperationStatus)(-1); + private static OperationStatus TryFastRead(ReadOnlySpan data, ref RespScanState info) + { + // use silly math to detect the most common short patterns without needing + // to access a reader, or use indexof etc; handles: + // +OK\r\n + // +PONG\r\n + // :N\r\n for any single-digit N (integer) + // :NN\r\n for any double-digit N (integer) + // #N\r\n for any single-digit N (boolean) + // _\r\n (null) + uint hi, lo; + switch (data.Length) + { + case 0: + case 1: + case 2: + return OperationStatus.NeedMoreData; + case 3: + // assume we're reading as little-endian, so: first byte is low + hi = data[0] | ((uint)data[1] << 8) | ((uint)data[2] << 16); + if (!BitConverter.IsLittleEndian) + { + // compensate if necessary (which: it won't be) + hi = BinaryPrimitives.ReverseEndianness(hi); + } + break; + default: + hi = UnsafeCpuUInt32(data); + break; + } + if ((hi & FirstThree) == FastNull) + { + info.SetComplete(3, RespPrefix.Null); + return OperationStatus.Done; + } + + var masked = hi & SingleCharScalarMask; + if (masked == SingleDigitInteger) + { + info.SetComplete(4, RespPrefix.Integer); + return OperationStatus.Done; + } + else if (masked == EitherBoolean) + { + info.SetComplete(4, RespPrefix.Boolean); + return OperationStatus.Done; + } + + switch (data.Length) + { + case 3: + return OperationStatus.NeedMoreData; + case 4: + return UseReader; + case 5: + lo = ((uint)data[4]) << 24; + break; + case 6: + lo = ((uint)UnsafeCpuUInt16(data, 4)) << 16; + break; + case 7: + lo = ((uint)UnsafeCpuUInt16(data, 4)) << 16 | ((uint)UnsafeCpuByte(data, 6)) << 8; + break; + default: + lo = UnsafeCpuUInt32(data, 4); + break; + } + var u64 = BitConverter.IsLittleEndian ? ((((ulong)lo) << 32) | hi) : ((((ulong)hi) << 32) | lo); + if (((u64 & FirstFive) == OK) | ((u64 & DoubleCharScalarMask) == DoubleDigitInteger)) + { + info.SetComplete(5, RespPrefix.SimpleString); + return OperationStatus.Done; + } + if ((u64 & FirstSeven) == PONG) + { + info.SetComplete(7, RespPrefix.SimpleString); + return OperationStatus.Done; + } + return UseReader; + } + + /// + /// Attempt to read more data as part of the current frame. + /// + public OperationStatus TryRead(ref RespScanState state, in ReadOnlySequence data) + { + if (!_pubsub & state.TotalBytes == 0 & data.IsSingleSegment) + { +#if NET + var status = TryFastRead(data.FirstSpan, ref state); +#else + var status = TryFastRead(data.First.Span, ref state); +#endif + if (status != UseReader) return status; + } + + return TryReadViaReader(ref state, in data); + + static OperationStatus TryReadViaReader(ref RespScanState state, in ReadOnlySequence data) + { + var reader = new RespReader(in data); + var complete = state.TryRead(ref reader, out var consumed); + if (complete) + { + return OperationStatus.Done; + } + return OperationStatus.NeedMoreData; + } + } + + /// + /// Attempt to read more data as part of the current frame. + /// + public OperationStatus TryRead(ref RespScanState state, ReadOnlySpan data) + { + if (!_pubsub & state.TotalBytes == 0) + { +#if NET + var status = TryFastRead(data, ref state); +#else + var status = TryFastRead(data, ref state); +#endif + if (status != UseReader) return status; + } + + return TryReadViaReader(ref state, data); + + static OperationStatus TryReadViaReader(ref RespScanState state, ReadOnlySpan data) + { + var reader = new RespReader(data); + var complete = state.TryRead(ref reader, out var consumed); + if (complete) + { + return OperationStatus.Done; + } + return OperationStatus.NeedMoreData; + } + } + + /// + /// Validate that the supplied message is a valid RESP request, specifically: that it contains a single + /// top-level array payload with bulk-string elements, the first of which is non-empty (the command). + /// + public void ValidateRequest(in ReadOnlySequence message) + { + if (message.IsEmpty) Throw("Empty RESP frame"); + RespReader reader = new(in message); + reader.MoveNext(RespPrefix.Array); + reader.DemandNotNull(); + if (reader.IsStreaming) Throw("Streaming is not supported in this context"); + var count = reader.AggregateLength(); + for (int i = 0; i < count; i++) + { + reader.MoveNext(RespPrefix.BulkString); + reader.DemandNotNull(); + if (reader.IsStreaming) Throw("Streaming is not supported in this context"); + + if (i == 0 && reader.ScalarIsEmpty()) Throw("command must be non-empty"); + } + reader.DemandEnd(); + + static void Throw(string message) => throw new InvalidOperationException(message); + } +} diff --git a/src/RESPite/Messages/RespPrefix.cs b/src/RESPite/Messages/RespPrefix.cs new file mode 100644 index 000000000..d58749120 --- /dev/null +++ b/src/RESPite/Messages/RespPrefix.cs @@ -0,0 +1,100 @@ +using System.Diagnostics.CodeAnalysis; + +namespace RESPite.Messages; + +/// +/// RESP protocol prefix. +/// +[Experimental(Experiments.Respite, UrlFormat = Experiments.UrlFormat)] +public enum RespPrefix : byte +{ + /// + /// Invalid. + /// + None = 0, + + /// + /// Simple strings: +OK\r\n. + /// + SimpleString = (byte)'+', + + /// + /// Simple errors: -ERR message\r\n. + /// + SimpleError = (byte)'-', + + /// + /// Integers: :123\r\n. + /// + Integer = (byte)':', + + /// + /// String with support for binary data: $7\r\nmessage\r\n. + /// + BulkString = (byte)'$', + + /// + /// Multiple inner messages: *1\r\n+message\r\n. + /// + Array = (byte)'*', + + /// + /// Null strings/arrays: _\r\n. + /// + Null = (byte)'_', + + /// + /// Boolean values: #T\r\n. + /// + Boolean = (byte)'#', + + /// + /// Floating-point number: ,123.45\r\n. + /// + Double = (byte)',', + + /// + /// Large integer number: (12...89\r\n. + /// + BigInteger = (byte)'(', + + /// + /// Error with support for binary data: !7\r\nmessage\r\n. + /// + BulkError = (byte)'!', + + /// + /// String that should be interpreted verbatim: =11\r\ntxt:message\r\n. + /// + VerbatimString = (byte)'=', + + /// + /// Multiple sub-items that represent a map. + /// + Map = (byte)'%', + + /// + /// Multiple sub-items that represent a set. + /// + Set = (byte)'~', + + /// + /// Out-of band messages. + /// + Push = (byte)'>', + + /// + /// Continuation of streaming scalar values. + /// + StreamContinuation = (byte)';', + + /// + /// End sentinel for streaming aggregate values. + /// + StreamTerminator = (byte)'.', + + /// + /// Metadata about the next element. + /// + Attribute = (byte)'|', +} diff --git a/src/RESPite/Messages/RespReader.AggregateEnumerator.cs b/src/RESPite/Messages/RespReader.AggregateEnumerator.cs new file mode 100644 index 000000000..412ef6ab5 --- /dev/null +++ b/src/RESPite/Messages/RespReader.AggregateEnumerator.cs @@ -0,0 +1,279 @@ +using System.Collections; +using System.ComponentModel; +using System.Diagnostics.CodeAnalysis; + +#pragma warning disable IDE0079 // Remove unnecessary suppression +#pragma warning disable CS0282 // There is no defined ordering between fields in multiple declarations of partial struct +#pragma warning restore IDE0079 // Remove unnecessary suppression + +namespace RESPite.Messages; + +public ref partial struct RespReader +{ + /// + /// Reads the sub-elements associated with an aggregate value. For convenience, when + /// using foreach () the reader + /// is advanced into the child element ready for reading, which bypasses attributes. If attributes + /// are required from child elements, the iterator can be advanced manually (not via + /// foreach using an optional attribute-reader in the call. + /// + public readonly AggregateEnumerator AggregateChildren() => new(in this); + + /// + /// Reads the sub-elements associated with an aggregate value. + /// + public ref struct AggregateEnumerator + { + // Note that _reader is the overall reader that can see outside this aggregate, as opposed + // to Current which is the sub-tree of the current element *only* + private RespReader _reader; + private int _remaining; + + /// + /// Create a new enumerator for the specified . + /// + /// The reader containing the data for this operation. + public AggregateEnumerator(scoped in RespReader reader) + { + reader.DemandAggregate(); + _remaining = reader.IsStreaming ? -1 : reader._length; + _reader = reader; + Value = default; + } + + /// + public readonly AggregateEnumerator GetEnumerator() => this; + + /// + [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] +#if DEBUG +#if NET8_0 // strictly net8; net10 and our polyfill have .Message + [Experimental("SERDBG")] +#else + [Experimental("SERDBG", Message = $"Prefer {nameof(Value)}")] +#endif +#endif + public RespReader Current => Value; + + /// + /// Gets the current element associated with this reader. + /// + public RespReader Value; // intentionally a field, because of ref-semantics + + /// + /// Move to the next child if possible, and move the child element into the next node. + /// + public bool MoveNext(RespPrefix prefix) + { + bool result = MoveNextRaw(); + if (result) + { + Value.MoveNext(prefix); + } + return result; + } + + /// + /// Move to the next child if possible, and move the child element into the next node. + /// + /// The type of data represented by this reader. + public bool MoveNext(RespPrefix prefix, RespAttributeReader respAttributeReader, ref T attributes) + { + bool result = MoveNextRaw(respAttributeReader, ref attributes); + if (result) + { + Value.MoveNext(prefix); + } + return result; + } + + /// + /// Move to the next child and leave the reader *ahead of* the first element, + /// allowing us to read attribute data. + /// + /// If you are not consuming attribute data, is preferred. + public bool MoveNextRaw() + { + object? attributes = null; + return MoveNextCore(null, ref attributes); + } + + /// + /// Move to the next child and move into the first element (skipping attributes etc), leaving it ready to consume. + /// + public bool MoveNext() + { + object? attributes = null; + if (MoveNextCore(null, ref attributes)) + { + Value.MoveNext(); + return true; + } + return false; + } + + /// + /// Move to the next child (capturing attribute data) and leave the reader *ahead of* the first element, + /// allowing us to also read attribute data of the child. + /// + /// The type of attribute data represented by this reader. + /// If you are not consuming attribute data, is preferred. + public bool MoveNextRaw(RespAttributeReader respAttributeReader, ref T attributes) + => MoveNextCore(respAttributeReader, ref attributes); + + /// > + private bool MoveNextCore(RespAttributeReader? attributeReader, ref T attributes) + { + if (_remaining == 0) + { + Value = default; + return false; + } + + // in order to provide access to attributes etc, we want Current to be positioned + // *before* the next element; for that, we'll take a snapshot before we read + _reader.MovePastCurrent(); + var snapshot = _reader.Clone(); + + if (!(attributeReader is null + ? _reader.TryReadNextSkipAttributes(skipStreamTerminator: false) + : _reader.TryReadNextProcessAttributes(attributeReader, ref attributes, false))) + { + if (_remaining != 0) ThrowEof(); // incomplete aggregate, simple or streaming + _remaining = 0; + Value = default; + return false; + } + + if (_remaining > 0) + { + // non-streaming, decrement + _remaining--; + } + else if (_reader.Prefix == RespPrefix.StreamTerminator) + { + // end of streaming aggregate + _remaining = 0; + Value = default; + return false; + } + + // move past that sub-tree and trim the "snapshot" state, giving + // us a scoped reader that is *just* that sub-tree + _reader.SkipChildren(); + snapshot.TrimToTotal(_reader.BytesConsumed); + + Value = snapshot; + return true; + } + + /// + /// Move to the end of this aggregate and export the state of the . + /// + /// The reader positioned at the end of the data; this is commonly + /// used to update a tree reader, to get to the next data after the aggregate. + public void MovePast(out RespReader reader) + { + while (MoveNextRaw()) { } + reader = _reader; + } + + /// + /// Moves to the next element, and moves into that element (skipping attributes etc), leaving it ready to consume. + /// + public void DemandNext() + { + if (!MoveNext()) ThrowEof(); + } + + public T ReadOne(Projection projection) + { + DemandNext(); + return projection(ref Value); + } + + public void FillAll(scoped Span target, Projection projection) + { + FillAll(target, ref projection, static (ref projection, ref reader) => projection(ref reader)); + } + + public void FillAll(scoped Span target, ref TState state, Projection projection) +#if NET10_0_OR_GREATER + where TState : allows ref struct +#endif + { + for (int i = 0; i < target.Length; i++) + { + DemandNext(); + target[i] = projection(ref state, ref Value); + } + } + + public void FillAll( + scoped Span target, + Projection first, + Projection second, + Func combine) + { + for (int i = 0; i < target.Length; i++) + { + DemandNext(); + + var x = first(ref Value); + + DemandNext(); + + var y = second(ref Value); + target[i] = combine(x, y); + } + } + + public void FillAll( + scoped Span target, + ref TState state, + Projection first, + Projection second, + Func combine) +#if NET10_0_OR_GREATER + where TState : allows ref struct +#endif + { + for (int i = 0; i < target.Length; i++) + { + DemandNext(); + + var x = first(ref state, ref Value); + + DemandNext(); + + var y = second(ref state, ref Value); + target[i] = combine(state, x, y); + } + } + } + + internal void TrimToTotal(long length) => TrimToRemaining(length - BytesConsumed); + + internal void TrimToRemaining(long bytes) + { + if (_prefix != RespPrefix.None || bytes < 0) Throw(); + + var current = CurrentAvailable; + if (bytes <= current) + { + UnsafeTrimCurrentBy(current - (int)bytes); + _remainingTailLength = 0; + return; + } + + bytes -= current; + if (bytes <= _remainingTailLength) + { + _remainingTailLength = bytes; + return; + } + + Throw(); + static void Throw() => throw new ArgumentOutOfRangeException(nameof(bytes)); + } +} diff --git a/src/RESPite/Messages/RespReader.Debug.cs b/src/RESPite/Messages/RespReader.Debug.cs new file mode 100644 index 000000000..71d5a44af --- /dev/null +++ b/src/RESPite/Messages/RespReader.Debug.cs @@ -0,0 +1,59 @@ +using System.Buffers; +using System.Diagnostics; +using System.Text; + +#pragma warning disable IDE0079 // Remove unnecessary suppression +#pragma warning disable CS0282 // There is no defined ordering between fields in multiple declarations of partial struct +#pragma warning restore IDE0079 // Remove unnecessary suppression + +namespace RESPite.Messages; + +[DebuggerDisplay($"{{{nameof(GetDebuggerDisplay)}(),nq}}")] +public ref partial struct RespReader +{ + internal bool DebugEquals(in RespReader other) + => _prefix == other._prefix + && _length == other._length + && _flags == other._flags + && _bufferIndex == other._bufferIndex + && _positionBase == other._positionBase + && _remainingTailLength == other._remainingTailLength; + + internal new string ToString() => $"{Prefix} ({_flags}); length {_length}, {TotalAvailable} remaining"; + + internal void DebugReset() + { + _bufferIndex = 0; + _length = 0; + _flags = 0; + _prefix = RespPrefix.None; + } + +#if DEBUG + internal bool VectorizeDisabled { get; set; } +#endif + + private partial ReadOnlySpan ActiveBuffer { get; } + + internal readonly string BufferUtf8() + { + var clone = Clone(); + var active = clone.ActiveBuffer; + var totalLen = checked((int)(active.Length + clone._remainingTailLength)); + var oversized = ArrayPool.Shared.Rent(totalLen); + Span target = oversized.AsSpan(0, totalLen); + + while (!target.IsEmpty) + { + active.CopyTo(target); + target = target.Slice(active.Length); + if (!clone.TryMoveToNextSegment()) break; + active = clone.ActiveBuffer; + } + if (!target.IsEmpty) throw new EndOfStreamException(); + + var s = Encoding.UTF8.GetString(oversized, 0, totalLen); + ArrayPool.Shared.Return(oversized); + return s; + } +} diff --git a/src/RESPite/Messages/RespReader.ScalarEnumerator.cs b/src/RESPite/Messages/RespReader.ScalarEnumerator.cs new file mode 100644 index 000000000..9e8ffbe70 --- /dev/null +++ b/src/RESPite/Messages/RespReader.ScalarEnumerator.cs @@ -0,0 +1,105 @@ +using System.Buffers; +using System.Collections; + +#pragma warning disable IDE0079 // Remove unnecessary suppression +#pragma warning disable CS0282 // There is no defined ordering between fields in multiple declarations of partial struct +#pragma warning restore IDE0079 // Remove unnecessary suppression + +namespace RESPite.Messages; + +public ref partial struct RespReader +{ + /// + /// Gets the chunks associated with a scalar value. + /// + public readonly ScalarEnumerator ScalarChunks() => new(in this); + + /// + /// Allows enumeration of chunks in a scalar value; this includes simple values + /// that span multiple segments, and streaming + /// scalar RESP values. + /// + public ref struct ScalarEnumerator + { + /// + public readonly ScalarEnumerator GetEnumerator() => this; + + private RespReader _reader; + + private ReadOnlySpan _current; + private ReadOnlySequenceSegment? _tail; + private int _offset, _remaining; + + /// + /// Create a new enumerator for the specified . + /// + /// The reader containing the data for this operation. + public ScalarEnumerator(scoped in RespReader reader) + { + reader.DemandScalar(); + _reader = reader; + InitSegment(); + } + + private void InitSegment() + { + _current = _reader.CurrentSpan(); + _tail = _reader._tail; + _offset = CurrentLength = 0; + _remaining = _reader._length; + if (_reader.TotalAvailable < _remaining) ThrowEof(); + } + + /// + public bool MoveNext() + { + while (true) // for each streaming element + { + _offset += CurrentLength; + while (_remaining > 0) // for each span in the current element + { + // look in the active span + var take = Math.Min(_remaining, _current.Length - _offset); + if (take > 0) // more in the current chunk + { + _remaining -= take; + CurrentLength = take; + return true; + } + + // otherwise, we expect more tail data + if (_tail is null) ThrowEof(); + + _current = _tail.Memory.Span; + _offset = 0; + _tail = _tail.Next; + } + + if (!_reader.MoveNextStreamingScalar()) break; + InitSegment(); + } + + CurrentLength = 0; + return false; + } + + /// + public readonly ReadOnlySpan Current => _current.Slice(_offset, CurrentLength); + + /// + /// Gets the or . + /// + public int CurrentLength { readonly get; private set; } + + /// + /// Move to the end of this aggregate and export the state of the . + /// + /// The reader positioned at the end of the data; this is commonly + /// used to update a tree reader, to get to the next data after the aggregate. + public void MovePast(out RespReader reader) + { + while (MoveNext()) { } + reader = _reader; + } + } +} diff --git a/src/RESPite/Messages/RespReader.Span.cs b/src/RESPite/Messages/RespReader.Span.cs new file mode 100644 index 000000000..cfea585c4 --- /dev/null +++ b/src/RESPite/Messages/RespReader.Span.cs @@ -0,0 +1,86 @@ +#define USE_UNSAFE_SPAN + +using System.Diagnostics; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; + +#pragma warning disable IDE0079 // Remove unnecessary suppression +#pragma warning disable CS0282 // There is no defined ordering between fields in multiple declarations of partial struct +#pragma warning restore IDE0079 // Remove unnecessary suppression + +namespace RESPite.Messages; + +/* + How we actually implement the underlying buffer depends on the capabilities of the runtime. + */ + +#if NET8_0_OR_GREATER && USE_UNSAFE_SPAN + +public ref partial struct RespReader +{ + // intent: avoid lots of slicing by dealing with everything manually, and accepting the "don't get it wrong" rule + private ref byte _bufferRoot; + private int _bufferLength; + + private partial void UnsafeTrimCurrentBy(int count) + { + Debug.Assert(count >= 0 && count <= _bufferLength, "Unsafe trim length"); + _bufferLength -= count; + } + + private readonly partial ref byte UnsafeCurrent + { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + get => ref Unsafe.Add(ref _bufferRoot, _bufferIndex); + } + + private readonly partial int CurrentLength + { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + get => _bufferLength; + } + + private readonly partial ReadOnlySpan CurrentSpan() => MemoryMarshal.CreateReadOnlySpan( + ref UnsafeCurrent, CurrentAvailable); + + private readonly partial ReadOnlySpan UnsafePastPrefix() => MemoryMarshal.CreateReadOnlySpan( + ref Unsafe.Add(ref _bufferRoot, _bufferIndex + 1), + _bufferLength - (_bufferIndex + 1)); + + private partial void SetCurrent(ReadOnlySpan value) + { + _bufferRoot = ref MemoryMarshal.GetReference(value); + _bufferLength = value.Length; + } + private partial ReadOnlySpan ActiveBuffer => MemoryMarshal.CreateReadOnlySpan(ref _bufferRoot, _bufferLength); +} +#else +public ref partial struct RespReader // much more conservative - uses slices etc +{ + private ReadOnlySpan _buffer; + + private partial void UnsafeTrimCurrentBy(int count) + { + _buffer = _buffer.Slice(0, _buffer.Length - count); + } + + private readonly partial ref byte UnsafeCurrent + { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + get => ref Unsafe.AsRef(in _buffer[_bufferIndex]); // hack around CS8333 + } + + private readonly partial int CurrentLength + { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + get => _buffer.Length; + } + + private readonly partial ReadOnlySpan UnsafePastPrefix() => _buffer.Slice(_bufferIndex + 1); + + private readonly partial ReadOnlySpan CurrentSpan() => _buffer.Slice(_bufferIndex); + + private partial void SetCurrent(ReadOnlySpan value) => _buffer = value; + private partial ReadOnlySpan ActiveBuffer => _buffer; +} +#endif diff --git a/src/RESPite/Messages/RespReader.Utils.cs b/src/RESPite/Messages/RespReader.Utils.cs new file mode 100644 index 000000000..9aca671fb --- /dev/null +++ b/src/RESPite/Messages/RespReader.Utils.cs @@ -0,0 +1,341 @@ +using System.Buffers.Text; +using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using RESPite.Internal; + +#pragma warning disable IDE0079 // Remove unnecessary suppression +#pragma warning disable CS0282 // There is no defined ordering between fields in multiple declarations of partial struct +#pragma warning restore IDE0079 // Remove unnecessary suppression + +namespace RESPite.Messages; + +public ref partial struct RespReader +{ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private void UnsafeAssertClLf(int offset) => UnsafeAssertClLf(ref UnsafeCurrent, offset); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private readonly void UnsafeAssertClLf(scoped ref byte source, int offset) + { + if (Unsafe.ReadUnaligned(ref Unsafe.Add(ref source, offset)) != RespConstants.CrLfUInt16) + { + ThrowProtocolFailure($"Expected CR/LF ({offset}={(char)Unsafe.Add(ref source, offset)})"); + } + } + + private enum LengthPrefixResult + { + NeedMoreData, + Length, + Null, + Streaming, + } + + /// + /// Asserts that the current element is a scalar type. + /// + public readonly void DemandScalar() + { + if (!IsScalar) Throw(Prefix); + static void Throw(RespPrefix prefix) => throw new InvalidOperationException($"This operation requires a scalar element, got {prefix}"); + } + + /// + /// Asserts that the current element is a scalar type. + /// + public readonly void DemandAggregate() + { + if (!IsAggregate) Throw(Prefix); + static void Throw(RespPrefix prefix) => throw new InvalidOperationException($"This operation requires an aggregate element, got {prefix}"); + } + + private readonly LengthPrefixResult TryReadLengthPrefix(ReadOnlySpan bytes, out int value, out int byteCount) + { + var end = bytes.IndexOf(RespConstants.CrlfBytes); + if (end < 0) + { + byteCount = value = 0; + if (bytes.Length >= RespConstants.MaxRawBytesInt32 + 2) + { + ThrowProtocolFailure("Unterminated or over-length integer"); // should have failed; report failure to prevent infinite loop + } + return LengthPrefixResult.NeedMoreData; + } + byteCount = end + 2; + switch (end) + { + case 0: + ThrowProtocolFailure("Length prefix expected"); + goto case default; // not reached, just satisfying definite assignment + case 1 when bytes[0] == (byte)'?': + value = 0; + return LengthPrefixResult.Streaming; + default: + if (end > RespConstants.MaxRawBytesInt32 || !(Utf8Parser.TryParse(bytes, out value, out var consumed) && consumed == end)) + { + ThrowProtocolFailure("Unable to parse integer"); + value = 0; + } + if (value < 0) + { + if (value == -1) + { + value = 0; + return LengthPrefixResult.Null; + } + ThrowProtocolFailure("Invalid negative length prefix"); + } + return LengthPrefixResult.Length; + } + } + + /// + /// Create an isolated copy of this reader, which can be advanced independently. + /// + public readonly RespReader Clone() => this; // useful for performing streaming operations without moving the primary + + [MethodImpl(MethodImplOptions.NoInlining), DoesNotReturn] + private readonly void ThrowProtocolFailure(string message) + => throw new InvalidOperationException($"RESP protocol failure around offset {_positionBase}-{BytesConsumed}: {message}"); // protocol exception? + + [MethodImpl(MethodImplOptions.NoInlining), DoesNotReturn] + internal static void ThrowEof() => throw new EndOfStreamException(); + + [MethodImpl(MethodImplOptions.NoInlining), DoesNotReturn] + private static void ThrowFormatException() => throw new FormatException(); + + private int RawTryReadByte() + { + if (_bufferIndex < CurrentLength || TryMoveToNextSegment()) + { + var result = UnsafeCurrent; + _bufferIndex++; + return result; + } + return -1; + } + + private int RawPeekByte() + { + return (CurrentLength < _bufferIndex || TryMoveToNextSegment()) ? UnsafeCurrent : -1; + } + + private bool RawAssertCrLf() + { + if (CurrentAvailable >= 2) + { + UnsafeAssertClLf(0); + _bufferIndex += 2; + return true; + } + else + { + int next = RawTryReadByte(); + if (next < 0) return false; + if (next == '\r') + { + next = RawTryReadByte(); + if (next < 0) return false; + if (next == '\n') return true; + } + ThrowProtocolFailure("Expected CR/LF"); + return false; + } + } + + private LengthPrefixResult RawTryReadLengthPrefix() + { + _length = 0; + if (!RawTryFindCrLf(out int end)) + { + if (TotalAvailable >= RespConstants.MaxRawBytesInt32 + 2) + { + ThrowProtocolFailure("Unterminated or over-length integer"); // should have failed; report failure to prevent infinite loop + } + return LengthPrefixResult.NeedMoreData; + } + + switch (end) + { + case 0: + ThrowProtocolFailure("Length prefix expected"); + goto case default; // not reached, just satisfying definite assignment + case 1: + var b = (byte)RawTryReadByte(); + RawAssertCrLf(); + if (b == '?') + { + return LengthPrefixResult.Streaming; + } + else + { + _length = ParseSingleDigit(b); + return LengthPrefixResult.Length; + } + default: + if (end > RespConstants.MaxRawBytesInt32) + { + ThrowProtocolFailure("Unable to parse integer"); + } + Span bytes = stackalloc byte[end]; + RawFillBytes(bytes); + RawAssertCrLf(); + if (!(Utf8Parser.TryParse(bytes, out _length, out var consumed) && consumed == end)) + { + ThrowProtocolFailure("Unable to parse integer"); + } + + if (_length < 0) + { + if (_length == -1) + { + _length = 0; + return LengthPrefixResult.Null; + } + ThrowProtocolFailure("Invalid negative length prefix"); + } + + return LengthPrefixResult.Length; + } + } + + private void RawFillBytes(scoped Span target) + { + do + { + var current = CurrentSpan(); + if (current.Length >= target.Length) + { + // more than enough, need to trim + current.Slice(0, target.Length).CopyTo(target); + _bufferIndex += target.Length; + return; // we're done + } + else + { + // take what we can + current.CopyTo(target); + target = target.Slice(current.Length); + // we could move _bufferIndex here, but we're about to trash that in TryMoveToNextSegment + } + } + while (TryMoveToNextSegment()); + ThrowEof(); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static int ParseSingleDigit(byte value) + { + return value switch + { + (byte)'0' or (byte)'1' or (byte)'2' or (byte)'3' or (byte)'4' or (byte)'5' or (byte)'6' or (byte)'7' or (byte)'8' or (byte)'9' => value - (byte)'0', + _ => Invalid(value), + }; + + [MethodImpl(MethodImplOptions.NoInlining), DoesNotReturn] + static int Invalid(byte value) => throw new FormatException($"Unable to parse integer: '{(char)value}'"); + } + + private readonly bool RawTryAssertInlineScalarPayloadCrLf() + { + Debug.Assert(IsInlineScalar, "should be inline scalar"); + + var reader = Clone(); + var len = reader._length; + if (len == 0) return reader.RawAssertCrLf(); + + do + { + var current = reader.CurrentSpan(); + if (current.Length >= len) + { + reader._bufferIndex += len; + return reader.RawAssertCrLf(); // we're done + } + else + { + // take what we can + len -= current.Length; + // we could move _bufferIndex here, but we're about to trash that in TryMoveToNextSegment + } + } + while (reader.TryMoveToNextSegment()); + return false; // EOF + } + + private readonly bool RawTryFindCrLf(out int length) + { + length = 0; + RespReader reader = Clone(); + do + { + var span = reader.CurrentSpan(); + var index = span.IndexOf((byte)'\r'); + if (index >= 0) + { + checked + { + length += index; + } + // move past the CR and assert the LF + reader._bufferIndex += index + 1; + var next = reader.RawTryReadByte(); + if (next < 0) break; // we don't know + if (next != '\n') ThrowProtocolFailure("CR/LF expected"); + + return true; + } + checked + { + length += span.Length; + } + } + while (reader.TryMoveToNextSegment()); + length = 0; + return false; + } + + private string GetDebuggerDisplay() + { + return ToString(); + } + + internal readonly int GetInitialScanCount(out ushort streamingAggregateDepth) + { + // this is *similar* to GetDelta, but: without any discount for attributes + switch (_flags & (RespFlags.IsAggregate | RespFlags.IsStreaming)) + { + case RespFlags.IsAggregate: + streamingAggregateDepth = 0; + return _length - 1; + case RespFlags.IsAggregate | RespFlags.IsStreaming: + streamingAggregateDepth = 1; + return 0; + default: + streamingAggregateDepth = 0; + return -1; + } + } + + /// + /// Get the raw RESP payload. + /// + public readonly byte[] Serialize() + { + var reader = Clone(); + int remaining = checked((int)reader.TotalAvailable); + var arr = new byte[remaining]; + Span target = arr; + while (remaining > 0) + { + var span = reader.CurrentSpan(); + span.CopyTo(arr); + remaining -= span.Length; + target = target.Slice(span.Length); + if (!reader.TryMoveToNextSegment()) break; + } + if (remaining != 0 | !target.IsEmpty) ThrowEof(); + return arr; + } +} diff --git a/src/RESPite/Messages/RespReader.cs b/src/RESPite/Messages/RespReader.cs new file mode 100644 index 000000000..b2288b574 --- /dev/null +++ b/src/RESPite/Messages/RespReader.cs @@ -0,0 +1,2037 @@ +using System.Buffers; +using System.Buffers.Text; +using System.ComponentModel; +using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; +using System.Globalization; +using System.Runtime.CompilerServices; +using System.Text; +using RESPite.Internal; + +#if NET +using System.Runtime.Intrinsics; +using System.Runtime.Intrinsics.X86; +#endif + +#pragma warning disable IDE0079 // Remove unnecessary suppression +#pragma warning disable CS0282 // There is no defined ordering between fields in multiple declarations of partial struct +#pragma warning restore IDE0079 // Remove unnecessary suppression + +namespace RESPite.Messages; + +/// +/// Provides low level RESP parsing functionality. +/// +[Experimental(Experiments.Respite, UrlFormat = Experiments.UrlFormat)] +public ref partial struct RespReader +{ + [Flags] + private enum RespFlags : byte + { + None = 0, + IsScalar = 1 << 0, // simple strings, bulk strings, etc + IsAggregate = 1 << 1, // arrays, maps, sets, etc + IsNull = 1 << 2, // explicit null RESP types, or bulk-strings/aggregates with length -1 + IsInlineScalar = 1 << 3, // a non-null scalar, i.e. with payload+CrLf + IsAttribute = 1 << 4, // is metadata for following elements + IsStreaming = 1 << 5, // unknown length + IsError = 1 << 6, // an explicit error reported inside the protocol + } + + // relates to the element we're currently reading + private RespFlags _flags; + private RespPrefix _prefix; + + private int _length; // for null: 0; for scalars: the length of the payload; for aggregates: the child count + + // the current buffer that we're observing + private int _bufferIndex; // after TryRead, this should be positioned immediately before the actual data + + // the position in a multi-segment payload + private long _positionBase; // total data we've already moved past in *previous* buffers + private ReadOnlySequenceSegment? _tail; // the next tail node + private long _remainingTailLength; // how much more can we consume from the tail? + + public long ProtocolBytesRemaining => TotalAvailable; + + private readonly int CurrentAvailable => CurrentLength - _bufferIndex; + + private readonly long TotalAvailable => CurrentAvailable + _remainingTailLength; + private partial void UnsafeTrimCurrentBy(int count); + private readonly partial ref byte UnsafeCurrent { get; } + private readonly partial int CurrentLength { get; } + private partial void SetCurrent(ReadOnlySpan value); + private RespPrefix UnsafePeekPrefix() => (RespPrefix)UnsafeCurrent; + private readonly partial ReadOnlySpan UnsafePastPrefix(); + private readonly partial ReadOnlySpan CurrentSpan(); + + /// + /// Get the scalar value as a single-segment span. + /// + /// True if this is a non-streaming scalar element that covers a single span only, otherwise False. + /// If a scalar reports False, can be used to iterate the entire payload. + /// When True, the contents of the scalar value. + public readonly bool TryGetSpan(out ReadOnlySpan value) + { + if (IsInlineScalar && CurrentAvailable >= _length) + { + value = CurrentSpan().Slice(0, _length); + return true; + } + + value = default; + return IsNullScalar; + } + + /// + /// Returns the position after the end of the current element. + /// + public readonly long BytesConsumed => _positionBase + _bufferIndex + TrailingLength; + + /// + /// Body length of scalar values, plus any terminating sentinels. + /// + private readonly int TrailingLength => (_flags & RespFlags.IsInlineScalar) == 0 ? 0 : (_length + 2); + + /// + /// Gets the RESP kind of the current element. + /// + public readonly RespPrefix Prefix => _prefix; + + /// + /// The payload length of this scalar element (includes combined length for streaming scalars). + /// + public readonly int ScalarLength() => + IsInlineScalar ? _length : IsNullScalar ? 0 : checked((int)ScalarLengthSlow()); + + /// + /// Indicates whether this scalar value is zero-length. + /// + public readonly bool ScalarIsEmpty() => + IsInlineScalar ? _length == 0 : (IsNullScalar || !ScalarChunks().MoveNext()); + + /// + /// Indicates whether this aggregate value is zero-length. + /// + public readonly bool AggregateIsEmpty() => AggregateLengthIs(0); + + /// + /// The payload length of this scalar element (includes combined length for streaming scalars). + /// + public readonly long ScalarLongLength() => IsInlineScalar ? _length : IsNullScalar ? 0 : ScalarLengthSlow(); + + /// + /// Indicates whether the payload length of this scalar element is exactly the specified value. + /// + public readonly bool ScalarLengthIs(int count) + => IsInlineScalar ? _length == count : (IsNullScalar ? count == 0 : ScalarLengthIsSlow(count)); + + private readonly long ScalarLengthSlow() + { + DemandScalar(); + long length = 0; + var iterator = ScalarChunks(); + while (iterator.MoveNext()) + { + length += iterator.CurrentLength; + } + + return length; + } + + private readonly bool ScalarLengthIsSlow(int expected) + { + DemandScalar(); + int length = 0; + var iterator = ScalarChunks(); + while (length <= expected && iterator.MoveNext()) // short-circuit if we've read enough to know + { + length += iterator.CurrentLength; + } + + return length == expected; + } + + /// + /// The number of child elements associated with an aggregate. + /// + /// For + /// and aggregates, this is twice the value reported in the RESP protocol, + /// i.e. a map of the form %2\r\n... will report 4 as the length. + /// Note that if the data could be streaming (), it may be preferable to use + /// the API, using the API to update the outer reader. + public readonly int AggregateLength() => + (_flags & (RespFlags.IsAggregate | RespFlags.IsStreaming)) == RespFlags.IsAggregate + ? _length : AggregateLengthSlow(); + + /// + /// Indicates whether the number of child elements associated with an aggregate is exactly the specified value. + /// + /// For + /// and aggregates, this is twice the value reported in the RESP protocol, + /// i.e. a map of the form %2\r\n... will report 4 as the length. + public readonly bool AggregateLengthIs(int count) + => (_flags & (RespFlags.IsAggregate | RespFlags.IsStreaming)) == RespFlags.IsAggregate + ? _length == count : AggregateLengthIsSlow(count); + + public delegate T Projection(ref RespReader value); + + public delegate TResult Projection(ref TState state, ref RespReader value) +#if NET10_0_OR_GREATER + where TState : allows ref struct +#endif + ; + + public void FillAll(scoped Span target, Projection projection) + { + DemandNotNull(); + AggregateChildren().FillAll(target, projection); + } + + public void FillAll(scoped Span target, ref TState state, Projection projection) + { + DemandNotNull(); + AggregateChildren().FillAll(target, ref state, projection); + } + + private readonly int AggregateLengthSlow() + { + switch (_flags & (RespFlags.IsAggregate | RespFlags.IsStreaming)) + { + case RespFlags.IsAggregate: + return _length; + case RespFlags.IsAggregate | RespFlags.IsStreaming: + break; + default: + DemandAggregate(); // we expect this to throw + break; + } + + int count = 0; + var reader = Clone(); + while (true) + { + if (!reader.TryReadNextSkipAttributes(skipStreamTerminator: false)) ThrowEof(); + if (reader.Prefix == RespPrefix.StreamTerminator) + { + return count; + } + + reader.SkipChildren(); + count++; + } + } + + private readonly bool AggregateLengthIsSlow(int expected) + { + switch (_flags & (RespFlags.IsAggregate | RespFlags.IsStreaming)) + { + case RespFlags.IsAggregate: + return _length == expected; + case RespFlags.IsAggregate | RespFlags.IsStreaming: + break; + default: + DemandAggregate(); // we expect this to throw + break; + } + + int count = 0; + var reader = Clone(); + while (count <= expected) // short-circuit if we've read enough to know + { + if (!reader.TryReadNextSkipAttributes(skipStreamTerminator: false)) ThrowEof(); + if (reader.Prefix == RespPrefix.StreamTerminator) + { + break; + } + + reader.SkipChildren(); + count++; + } + return count == expected; + } + + /// + /// Indicates whether this is a scalar value, i.e. with a potential payload body. + /// + public readonly bool IsScalar => (_flags & RespFlags.IsScalar) != 0; + + internal readonly bool IsInlineScalar => (_flags & RespFlags.IsInlineScalar) != 0; + + internal readonly bool IsNullScalar => + (_flags & (RespFlags.IsScalar | RespFlags.IsNull)) == (RespFlags.IsScalar | RespFlags.IsNull); + + /// + /// Indicates whether this is an aggregate value, i.e. represents a collection of sub-values. + /// + public readonly bool IsAggregate => (_flags & RespFlags.IsAggregate) != 0; + + internal readonly bool IsNonNullAggregate + => (_flags & (RespFlags.IsAggregate | RespFlags.IsNull)) == RespFlags.IsAggregate; + + /// + /// Indicates whether this is a null value; this could be an explicit , + /// or a scalar or aggregate a negative reported length. + /// + public readonly bool IsNull => (_flags & RespFlags.IsNull) != 0; + + /// + /// Indicates whether this is an attribute value, i.e. metadata relating to later element data. + /// + public readonly bool IsAttribute => (_flags & RespFlags.IsAttribute) != 0; + + /// + /// Indicates whether this represents streaming content, where the or is not known in advance. + /// + public readonly bool IsStreaming => (_flags & RespFlags.IsStreaming) != 0; + + /// + /// Equivalent to both and . + /// + internal readonly bool IsStreamingScalar => (_flags & (RespFlags.IsScalar | RespFlags.IsStreaming)) == + (RespFlags.IsScalar | RespFlags.IsStreaming); + + /// + /// Indicates errors reported inside the protocol. + /// + public readonly bool IsError => (_flags & RespFlags.IsError) != 0; + + /// + /// Gets the effective change (in terms of how many RESP nodes we expect to see) from consuming this element. + /// For simple scalars, this is -1 because we have one less node to read; for simple aggregates, this is + /// AggregateLength-1 because we will have consumed one element, but now need to read the additional + /// child elements. Attributes report 0, since they supplement data + /// we still need to consume. The final terminator for streaming data reports a delta of -1, otherwise: 0. + /// + /// This does not account for being nested inside a streaming aggregate; the caller must deal with that manually. + internal int Delta() => + (_flags & (RespFlags.IsScalar | RespFlags.IsAggregate | RespFlags.IsStreaming | RespFlags.IsAttribute)) switch + { + RespFlags.IsScalar | RespFlags.IsAggregate=> -1, // null has this + RespFlags.IsScalar => -1, + RespFlags.IsAggregate => _length - 1, + RespFlags.IsAggregate | RespFlags.IsAttribute => _length, + _ => 0, + }; + + /// + /// Assert that this is the final element in the current payload. + /// + /// If additional elements are available. + public void DemandEnd() + { +#pragma warning disable CS0618 // avoid TryReadNext unless you know what you're doing + while (IsStreamingScalar) + { + if (!TryReadNext()) ThrowEof(); + } + + if (TryReadNext()) + { + Throw(Prefix); + } +#pragma warning restore CS0618 + + static void Throw(RespPrefix prefix) => + throw new InvalidOperationException($"Expected end of payload, but found {prefix}"); + } + + private bool TryReadNextSkipAttributes(bool skipStreamTerminator) + { +#pragma warning disable CS0618 // avoid TryReadNext unless you know what you're doing + while (TryReadNext()) + { + if (IsAttribute) + { + SkipChildren(); + } + else if (skipStreamTerminator & Prefix is RespPrefix.StreamTerminator) + { + // skip terminator + } + else + { + return true; + } + } +#pragma warning restore CS0618 + return false; + } + + private bool TryReadNextProcessAttributes(RespAttributeReader respAttributeReader, ref T attributes, bool skipStreamTerminator) + { +#pragma warning disable CS0618 // avoid TryReadNext unless you know what you're doing + while (TryReadNext()) +#pragma warning restore CS0618 + { + if (IsAttribute) + { + respAttributeReader.Read(ref this, ref attributes); + } + else if (skipStreamTerminator & Prefix is RespPrefix.StreamTerminator) + { + // skip terminator + } + else + { + return true; + } + } + + return false; + } + + /// + /// Move to the next content element; this skips attribute metadata, checking for RESP error messages by default. + /// + /// If the data is exhausted before a streaming scalar is exhausted. + /// If the data contains an explicit error element. + public bool TryMoveNext() + { + while (IsStreamingScalar) // close out the current streaming scalar + { + if (!TryReadNextSkipAttributes(false)) ThrowEof(); + } + + if (TryReadNextSkipAttributes(true)) + { + if (IsError) ThrowError(); + return true; + } + + return false; + } + + /// + /// Move to the next content element; this skips attribute metadata, checking for RESP error messages by default. + /// + /// Whether to check and throw for error messages. + /// If the data is exhausted before a streaming scalar is exhausted. + /// If the data contains an explicit error element. + public bool TryMoveNext(bool checkError) + { + while (IsStreamingScalar) // close out the current streaming scalar + { + if (!TryReadNextSkipAttributes(false)) ThrowEof(); + } + + if (TryReadNextSkipAttributes(true)) + { + if (checkError && IsError) ThrowError(); + return true; + } + + return false; + } + + /// + /// Move to the next content element; this skips attribute metadata, checking for RESP error messages by default. + /// + /// Parser for attribute data preceding the data. + /// The state for attributes encountered. + /// If the data is exhausted before a streaming scalar is exhausted. + /// If the data contains an explicit error element. + /// The type of data represented by this reader. + public bool TryMoveNext(RespAttributeReader respAttributeReader, ref T attributes) + { + while (IsStreamingScalar) // close out the current streaming scalar + { + if (!TryReadNextSkipAttributes(false)) ThrowEof(); + } + + if (TryReadNextProcessAttributes(respAttributeReader, ref attributes, true)) + { + if (IsError) ThrowError(); + return true; + } + + return false; + } + + /// + /// Move to the next content element, asserting that it is of the expected type; this skips attribute metadata, checking for RESP error messages by default. + /// + /// The expected data type. + /// If the data is exhausted before a streaming scalar is exhausted. + /// If the data contains an explicit error element. + /// If the data is not of the expected type. + public bool TryMoveNext(RespPrefix prefix) + { + bool result = TryMoveNext(); + if (result) Demand(prefix); + return result; + } + + /// + /// Move to the next content element; this skips attribute metadata, checking for RESP error messages by default. + /// + /// If the data is exhausted before content is found. + /// If the data contains an explicit error element. + public void MoveNext() + { + if (!TryMoveNext()) ThrowEof(); + } + + /// + /// Move to the next content element; this skips attribute metadata, checking for RESP error messages by default. + /// + /// Parser for attribute data preceding the data. + /// The state for attributes encountered. + /// If the data is exhausted before content is found. + /// If the data contains an explicit error element. + /// The type of data represented by this reader. + public void MoveNext(RespAttributeReader respAttributeReader, ref T attributes) + { + if (!TryMoveNext(respAttributeReader, ref attributes)) ThrowEof(); + } + + private bool MoveNextStreamingScalar() + { + if (IsStreamingScalar) + { +#pragma warning disable CS0618 // avoid TryReadNext unless you know what you're doing + while (TryReadNext()) +#pragma warning restore CS0618 + { + if (IsAttribute) + { + SkipChildren(); + } + else + { + if (Prefix != RespPrefix.StreamContinuation) + ThrowProtocolFailure("Streaming continuation expected"); + return _length > 0; + } + } + + ThrowEof(); // we should have found something! + } + + return false; + } + + /// + /// Move to the next content element () and assert that it is a scalar (). + /// + /// If the data is exhausted before content is found. + /// If the data contains an explicit error element. + /// If the data is not a scalar type. + public void MoveNextScalar() + { + MoveNext(); + DemandScalar(); + } + + /// + /// Move to the next content element () and assert that it is an aggregate (). + /// + /// If the data is exhausted before content is found. + /// If the data contains an explicit error element. + /// If the data is not an aggregate type. + public void MoveNextAggregate() + { + MoveNext(); + DemandAggregate(); + } + + /// + /// Move to the next content element () and assert that it of type specified + /// in . + /// + /// The expected data type. + /// Parser for attribute data preceding the data. + /// The state for attributes encountered. + /// If the data is exhausted before content is found. + /// If the data contains an explicit error element. + /// If the data is not of the expected type. + /// The type of data represented by this reader. + public void MoveNext(RespPrefix prefix, RespAttributeReader respAttributeReader, ref T attributes) + { + MoveNext(respAttributeReader, ref attributes); + Demand(prefix); + } + + /// + /// Move to the next content element () and assert that it of type specified + /// in . + /// + /// The expected data type. + /// If the data is exhausted before content is found. + /// If the data contains an explicit error element. + /// If the data is not of the expected type. + public void MoveNext(RespPrefix prefix) + { + MoveNext(); + Demand(prefix); + } + + internal void Demand(RespPrefix prefix) + { + if (Prefix != prefix) Throw(prefix, Prefix); + + static void Throw(RespPrefix expected, RespPrefix actual) => + throw new InvalidOperationException($"Expected {expected} element, but found {actual}."); + } + + private readonly void ThrowError() => throw new RespException(ReadString()!); + + /// + /// Skip all sub elements of the current node; this includes both aggregate children and scalar streaming elements. + /// + public void SkipChildren() + { + // if this is a simple non-streaming scalar, then: there's nothing complex to do; otherwise, re-use the + // frame scanner logic to seek past the noise (this way, we avoid recursion etc) + switch (_flags & (RespFlags.IsScalar | RespFlags.IsAggregate | RespFlags.IsStreaming)) + { + case RespFlags.None: + // no current element + break; + case RespFlags.IsScalar: + // simple scalar + MovePastCurrent(); + break; + default: + // something more complex + RespScanState state = new(in this); + if (!state.TryRead(ref this, out _)) ThrowEof(); + break; + } + } + + /// + /// Reads the current element as a string value. + /// + public readonly string? ReadString() => ReadString(out _); + + /// + /// Reads the current element as a string value. + /// + public readonly string? ReadString(out string prefix) + { + byte[] pooled = []; + try + { + var span = Buffer(ref pooled, stackalloc byte[256]); + prefix = ""; + if (span.IsEmpty) + { + return IsNull ? null : ""; + } + + if (Prefix == RespPrefix.VerbatimString + && span.Length >= 4 && span[3] == ':') + { + // "the first three bytes provide information about the format of the following string, + // which can be txt for plain text, or mkd for markdown. The fourth byte is always :. + // Then the real string follows." + var prefixValue = RespConstants.UnsafeCpuUInt32(span); + if (prefixValue == PrefixTxt) + { + prefix = "txt"; + } + else if (prefixValue == PrefixMkd) + { + prefix = "mkd"; + } + else + { + prefix = RespConstants.UTF8.GetString(span.Slice(0, 3)); + } + + span = span.Slice(4); + } + + return RespConstants.UTF8.GetString(span); + } + finally + { + ArrayPool.Shared.Return(pooled); + } + } + + private static readonly uint + PrefixTxt = RespConstants.UnsafeCpuUInt32("txt:"u8), + PrefixMkd = RespConstants.UnsafeCpuUInt32("mkd:"u8); + + /// + /// Reads the current element as a string value. + /// + public readonly byte[]? ReadByteArray() + { + byte[] pooled = []; + try + { + var span = Buffer(ref pooled, stackalloc byte[256]); + if (span.IsEmpty) + { + return IsNull ? null : []; + } + + return span.ToArray(); + } + finally + { + ArrayPool.Shared.Return(pooled); + } + } + + /// + /// Reads the current element using a general purpose text parser. + /// + /// The type of data being parsed. + internal readonly T ParseBytes(Parser parser) + { + byte[] pooled = []; + var span = Buffer(ref pooled, stackalloc byte[256]); + try + { + return parser(span); + } + finally + { + ArrayPool.Shared.Return(pooled); + } + } + + /// + /// Reads the current element using a general purpose text parser. + /// + /// The type of data being parsed. + /// State required by the parser. + internal readonly T ParseBytes(Parser parser, TState? state) + { + byte[] pooled = []; + var span = Buffer(ref pooled, stackalloc byte[256]); + try + { + return parser(span, default); + } + finally + { + ArrayPool.Shared.Return(pooled); + } + } + + public readonly unsafe bool TryParseScalar( + delegate* managed, out T, bool> parser, out T value) + { + // Fast path: try to get the span directly + return TryGetSpan(out var span) ? parser(span, out value) : TryParseSlow(parser, out value); + } + + [MethodImpl(MethodImplOptions.NoInlining)] + private readonly unsafe bool TryParseSlow( + delegate* managed, out T, bool> parser, + out T value) + { + byte[] pooled = []; + try + { + var span = Buffer(ref pooled, stackalloc byte[256]); + return parser(span, out value); + } + finally + { + ArrayPool.Shared.Return(pooled); + } + } + + /// + /// Tries to read the current scalar element using a parser callback. + /// + /// The type of data being parsed. + /// The parser callback. + /// The parsed value if successful. + /// true if parsing succeeded; otherwise, false. +#pragma warning disable RS0016, RS0027 // public API + [Experimental(Experiments.Respite, UrlFormat = Experiments.UrlFormat)] + [MethodImpl(MethodImplOptions.AggressiveInlining)] +#if DEBUG + [Obsolete("Please prefer the function-pointer API for library-internal use.")] +#endif + public readonly bool TryParseScalar(ScalarParser parser, out T value) +#pragma warning restore RS0016, RS0027 // public API + { + // Fast path: try to get the span directly + return TryGetSpan(out var span) ? parser(span, out value) : TryParseSlow(parser, out value); + } + + [MethodImpl(MethodImplOptions.NoInlining)] + private readonly bool TryParseSlow(ScalarParser parser, out T value) + { + byte[] pooled = []; + try + { + var span = Buffer(ref pooled, stackalloc byte[256]); + return parser(span, out value); + } + finally + { + ArrayPool.Shared.Return(pooled); + } + } + + /// + /// Buffers the current scalar value into the provided target span. + /// + /// The target span to buffer data into. + /// + /// A span containing the buffered data. If the scalar data fits entirely within , + /// returns a slice of containing all the data. If the scalar data is larger than + /// , returns filled with the first target.Length bytes + /// of the scalar data (the remaining data is not buffered). + /// + /// + /// This method first attempts to use to avoid copying. If the data is non-contiguous + /// (e.g., streaming scalars or data spanning multiple buffer segments), it will copy data into . + /// When the source data exceeds 's capacity, only the first target.Length bytes + /// are copied and returned. + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal readonly ReadOnlySpan Buffer(Span target) + { + if (TryGetSpan(out var simple)) + { + return simple; + } + +#if NET + return BufferSlow(ref Unsafe.NullRef(), target, usePool: false); +#else + byte[] pooled = []; + return BufferSlow(ref pooled, target, usePool: false); +#endif + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal readonly ReadOnlySpan Buffer(scoped ref byte[] pooled, Span target = default) + => TryGetSpan(out var simple) ? simple : BufferSlow(ref pooled, target, true); + + [MethodImpl(MethodImplOptions.NoInlining)] + private readonly ReadOnlySpan BufferSlow(scoped ref byte[] pooled, Span target, bool usePool) + { + DemandScalar(); + + if (IsInlineScalar && usePool) + { + // grow to the correct size in advance, if needed + var length = ScalarLength(); + if (length > target.Length) + { + var bigger = ArrayPool.Shared.Rent(length); + ArrayPool.Shared.Return(pooled); + target = pooled = bigger; + } + } + + var iterator = ScalarChunks(); + ReadOnlySpan current; + int offset = 0; + while (iterator.MoveNext()) + { + // will the current chunk fit? + current = iterator.Current; + if (current.TryCopyTo(target.Slice(offset))) + { + // fits into the current buffer + offset += current.Length; + } + else if (!usePool) + { + // rent disallowed; fill what we can + var available = target.Slice(offset); + current.Slice(0, available.Length).CopyTo(available); + return target; // we filled it + } + else + { + // rent a bigger buffer, copy and recycle + var bigger = ArrayPool.Shared.Rent(offset + current.Length); + if (offset != 0) + { + target.Slice(0, offset).CopyTo(bigger); + } + + ArrayPool.Shared.Return(pooled); + target = pooled = bigger; + current.CopyTo(target.Slice(offset)); + } + } + + return target.Slice(0, offset); + } + + /// + /// Reads the current element using a general purpose byte parser. + /// + /// The type of data being parsed. + internal readonly T ParseChars(Parser parser) + { + byte[] bArr = []; + char[] cArr = []; + try + { + var bSpan = Buffer(ref bArr, stackalloc byte[128]); + var maxChars = RespConstants.UTF8.GetMaxCharCount(bSpan.Length); + Span cSpan = maxChars <= 128 ? stackalloc char[128] : (cArr = ArrayPool.Shared.Rent(maxChars)); + int chars = RespConstants.UTF8.GetChars(bSpan, cSpan); + return parser(cSpan.Slice(0, chars)); + } + finally + { + ArrayPool.Shared.Return(bArr); + ArrayPool.Shared.Return(cArr); + } + } + + /// + /// Reads the current element using a general purpose byte parser. + /// + /// The type of data being parsed. + /// State required by the parser. + internal readonly T ParseChars(Parser parser, TState? state) + { + byte[] bArr = []; + char[] cArr = []; + try + { + var bSpan = Buffer(ref bArr, stackalloc byte[128]); + var maxChars = RespConstants.UTF8.GetMaxCharCount(bSpan.Length); + Span cSpan = maxChars <= 128 ? stackalloc char[128] : (cArr = ArrayPool.Shared.Rent(maxChars)); + int chars = RespConstants.UTF8.GetChars(bSpan, cSpan); + return parser(cSpan.Slice(0, chars), state); + } + finally + { + ArrayPool.Shared.Return(bArr); + ArrayPool.Shared.Return(cArr); + } + } + +#if NET8_0_OR_GREATER + /// + /// Reads the current element using . + /// + /// The type of data being parsed. +#pragma warning disable RS0016, RS0027 // back-compat overload + public readonly T ParseChars(IFormatProvider? formatProvider = null) where T : ISpanParsable +#pragma warning restore RS0016, RS0027 // back-compat overload + { + byte[] bArr = []; + char[] cArr = []; + try + { + var bSpan = Buffer(ref bArr, stackalloc byte[128]); + var maxChars = RespConstants.UTF8.GetMaxCharCount(bSpan.Length); + Span cSpan = maxChars <= 128 ? stackalloc char[128] : (cArr = ArrayPool.Shared.Rent(maxChars)); + int chars = RespConstants.UTF8.GetChars(bSpan, cSpan); + return T.Parse(cSpan.Slice(0, chars), formatProvider ?? CultureInfo.InvariantCulture); + } + finally + { + ArrayPool.Shared.Return(bArr); + ArrayPool.Shared.Return(cArr); + } + } +#endif + +#if NET8_0_OR_GREATER + /// + /// Reads the current element using . + /// + /// The type of data being parsed. +#pragma warning disable RS0016, RS0027 // back-compat overload + public readonly T ParseBytes(IFormatProvider? formatProvider = null) where T : IUtf8SpanParsable +#pragma warning restore RS0016, RS0027 // back-compat overload + { + byte[] bArr = []; + try + { + var bSpan = Buffer(ref bArr, stackalloc byte[128]); + return T.Parse(bSpan, formatProvider ?? CultureInfo.InvariantCulture); + } + finally + { + ArrayPool.Shared.Return(bArr); + } + } +#endif + + /// + /// General purpose parsing callback. + /// + /// The type of source data being parsed. + /// State required by the parser. + /// The output type of data being parsed. + // is this needed? + internal delegate TValue Parser(scoped ReadOnlySpan value, TState? state); + + /// + /// General purpose parsing callback. + /// + /// The type of source data being parsed. + /// The output type of data being parsed. + // is this needed? + internal delegate TValue Parser(scoped ReadOnlySpan value); + + /// + /// Scalar parsing callback that returns a boolean indicating success. + /// + /// The type of source data being parsed. + /// The output type of data being parsed. + public delegate bool ScalarParser(scoped ReadOnlySpan value, out TValue result); + + /// + /// Initializes a new instance of the struct. + /// + /// The raw contents to parse with this instance. + public RespReader(ReadOnlySpan value) + { + _length = 0; + _flags = RespFlags.None; + _prefix = RespPrefix.None; + SetCurrent(value); + + _remainingTailLength = _positionBase = 0; + _tail = null; + } + + private void MovePastCurrent() + { + // skip past the trailing portion of a value, if any + var skip = TrailingLength; + if (_bufferIndex + skip <= CurrentLength) + { + _bufferIndex += skip; // available in the current buffer + } + else + { + AdvanceSlow(skip); + } + + // reset the current state + _length = 0; + _flags = 0; + _prefix = RespPrefix.None; + } + + /// + public RespReader(scoped in ReadOnlySequence value) +#if NET + : this(value.FirstSpan) +#else + : this(value.First.Span) +#endif + { + if (!value.IsSingleSegment) + { + _remainingTailLength = value.Length - CurrentLength; + _tail = (value.Start.GetObject() as ReadOnlySequenceSegment)?.Next ?? MissingNext(); + } + + [MethodImpl(MethodImplOptions.NoInlining), DoesNotReturn] + static ReadOnlySequenceSegment MissingNext() => + throw new ArgumentException("Unable to extract tail segment", nameof(value)); + } + + /// + /// Attempt to move to the next RESP element. + /// + /// Unless you are intentionally handling errors, attributes and streaming data, should be preferred. + [EditorBrowsable(EditorBrowsableState.Never), Browsable(false)] + [Obsolete("Unless you are manually handling errors, attributes and streaming data, TryMoveNext() should be preferred.", false)] + public unsafe bool TryReadNext() + { + MovePastCurrent(); + +#if NET + // check what we have available; don't worry about zero/fetching the next segment; this is only + // for SIMD lookup, and zero would only apply when data ends exactly on segment boundaries, which + // is incredible niche + var available = CurrentAvailable; + + if (Avx2.IsSupported && Bmi1.IsSupported && available >= sizeof(uint)) + { + // read the first 4 bytes + ref byte origin = ref UnsafeCurrent; + var comparand = Unsafe.ReadUnaligned(ref origin); + + // broadcast those 4 bytes into a vector, mask to get just the first and last byte, and apply a SIMD equality test with our known cases + var eqs = + Avx2.CompareEqual(Avx2.And(Avx2.BroadcastScalarToVector256(&comparand), Raw.FirstLastMask), Raw.CommonRespPrefixes); + + // reinterpret that as floats, and pick out the sign bits (which will be 1 for "equal", 0 for "not equal"); since the + // test cases are mutually exclusive, we expect zero or one matches, so: lzcount tells us which matched + var index = + Bmi1.TrailingZeroCount((uint)Avx.MoveMask(Unsafe.As, Vector256>(ref eqs))); + int len; +#if DEBUG + if (VectorizeDisabled) index = uint.MaxValue; // just to break the switch +#endif + switch (index) + { + case Raw.CommonRespIndex_Success when available >= 5 && Unsafe.Add(ref origin, 4) == (byte)'\n': + _prefix = RespPrefix.SimpleString; + _length = 2; + _bufferIndex++; + _flags = RespFlags.IsScalar | RespFlags.IsInlineScalar; + return true; + case Raw.CommonRespIndex_SingleDigitInteger when Unsafe.Add(ref origin, 2) == (byte)'\r': + _prefix = RespPrefix.Integer; + _length = 1; + _bufferIndex++; + _flags = RespFlags.IsScalar | RespFlags.IsInlineScalar; + return true; + case Raw.CommonRespIndex_DoubleDigitInteger when available >= 5 && Unsafe.Add(ref origin, 4) == (byte)'\n': + _prefix = RespPrefix.Integer; + _length = 2; + _bufferIndex++; + _flags = RespFlags.IsScalar | RespFlags.IsInlineScalar; + return true; + case Raw.CommonRespIndex_SingleDigitString when Unsafe.Add(ref origin, 2) == (byte)'\r': + if (comparand == RespConstants.BulkStringStreaming) + { + _flags = RespFlags.IsScalar | RespFlags.IsStreaming; + } + else + { + len = ParseSingleDigit(Unsafe.Add(ref origin, 1)); + if (available < len + 6) break; // need more data + + UnsafeAssertClLf(4 + len); + _length = len; + _flags = RespFlags.IsScalar | RespFlags.IsInlineScalar; + } + _prefix = RespPrefix.BulkString; + _bufferIndex += 4; + return true; + case Raw.CommonRespIndex_DoubleDigitString when available >= 5 && Unsafe.Add(ref origin, 4) == (byte)'\n': + if (comparand == RespConstants.BulkStringNull) + { + _length = 0; + _flags = RespFlags.IsScalar | RespFlags.IsNull; + } + else + { + len = ParseDoubleDigitsNonNegative(ref Unsafe.Add(ref origin, 1)); + if (available < len + 7) break; // need more data + + UnsafeAssertClLf(5 + len); + _length = len; + _flags = RespFlags.IsScalar | RespFlags.IsInlineScalar; + } + _prefix = RespPrefix.BulkString; + _bufferIndex += 5; + return true; + case Raw.CommonRespIndex_SingleDigitArray when Unsafe.Add(ref origin, 2) == (byte)'\r': + if (comparand == RespConstants.ArrayStreaming) + { + _flags = RespFlags.IsAggregate | RespFlags.IsStreaming; + } + else + { + _flags = RespFlags.IsAggregate; + _length = ParseSingleDigit(Unsafe.Add(ref origin, 1)); + } + _prefix = RespPrefix.Array; + _bufferIndex += 4; + return true; + case Raw.CommonRespIndex_DoubleDigitArray when available >= 5 && Unsafe.Add(ref origin, 4) == (byte)'\n': + if (comparand == RespConstants.ArrayNull) + { + _flags = RespFlags.IsAggregate | RespFlags.IsNull; + } + else + { + _length = ParseDoubleDigitsNonNegative(ref Unsafe.Add(ref origin, 1)); + _flags = RespFlags.IsAggregate; + } + _prefix = RespPrefix.Array; + _bufferIndex += 5; + return true; + case Raw.CommonRespIndex_Error: + len = UnsafePastPrefix().IndexOf(RespConstants.CrlfBytes); + if (len < 0) break; // need more data + + _prefix = RespPrefix.SimpleError; + _flags = RespFlags.IsScalar | RespFlags.IsInlineScalar | RespFlags.IsError; + _length = len; + _bufferIndex++; + return true; + } + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + static int ParseDoubleDigitsNonNegative(ref byte value) => (10 * ParseSingleDigit(value)) + ParseSingleDigit(Unsafe.Add(ref value, 1)); +#endif + + // no fancy vectorization, but: we can still try to find the payload the fast way in a single segment + if (_bufferIndex + 3 <= CurrentLength) // shortest possible RESP fragment is length 3 + { + var remaining = UnsafePastPrefix(); + switch (_prefix = UnsafePeekPrefix()) + { + case RespPrefix.SimpleString: + case RespPrefix.SimpleError: + case RespPrefix.Integer: + case RespPrefix.Boolean: + case RespPrefix.Double: + case RespPrefix.BigInteger: + // CRLF-terminated + _length = remaining.IndexOf(RespConstants.CrlfBytes); + if (_length < 0) break; // can't find, need more data + _bufferIndex++; // payload follows prefix directly + _flags = RespFlags.IsScalar | RespFlags.IsInlineScalar; + if (_prefix == RespPrefix.SimpleError) _flags |= RespFlags.IsError; + return true; + case RespPrefix.BulkError: + case RespPrefix.BulkString: + case RespPrefix.VerbatimString: + // length prefix with value payload; first, the length + switch (TryReadLengthPrefix(remaining, out _length, out int consumed)) + { + case LengthPrefixResult.Length: + // still need to valid terminating CRLF + if (remaining.Length < consumed + _length + 2) break; // need more data + UnsafeAssertClLf(1 + consumed + _length); + + _flags = RespFlags.IsScalar | RespFlags.IsInlineScalar; + break; + case LengthPrefixResult.Null: + _flags = RespFlags.IsScalar | RespFlags.IsNull; + break; + case LengthPrefixResult.Streaming: + _flags = RespFlags.IsScalar | RespFlags.IsStreaming; + break; + } + + if (_flags == 0) break; // will need more data to know + if (_prefix == RespPrefix.BulkError) _flags |= RespFlags.IsError; + _bufferIndex += 1 + consumed; + return true; + case RespPrefix.StreamContinuation: + // length prefix, possibly with value payload; first, the length + switch (TryReadLengthPrefix(remaining, out _length, out consumed)) + { + case LengthPrefixResult.Length when _length == 0: + // EOF, no payload + _flags = RespFlags + .IsScalar; // don't claim as streaming, we want this to count towards delta-decrement + break; + case LengthPrefixResult.Length: + // still need to valid terminating CRLF + if (remaining.Length < consumed + _length + 2) break; // need more data + UnsafeAssertClLf(1 + consumed + _length); + + _flags = RespFlags.IsScalar | RespFlags.IsInlineScalar | RespFlags.IsStreaming; + break; + case LengthPrefixResult.Null: + case LengthPrefixResult.Streaming: + ThrowProtocolFailure("Invalid streaming scalar length prefix"); + break; + } + + if (_flags == 0) break; // will need more data to know + _bufferIndex += 1 + consumed; + return true; + case RespPrefix.Array: + case RespPrefix.Set: + case RespPrefix.Map: + case RespPrefix.Push: + case RespPrefix.Attribute: + // length prefix without value payload (child values follow) + switch (TryReadLengthPrefix(remaining, out _length, out consumed)) + { + case LengthPrefixResult.Length: + _flags = RespFlags.IsAggregate; + if (AggregateLengthNeedsDoubling()) _length *= 2; + break; + case LengthPrefixResult.Null: + _flags = RespFlags.IsAggregate | RespFlags.IsNull; + break; + case LengthPrefixResult.Streaming: + _flags = RespFlags.IsAggregate | RespFlags.IsStreaming; + break; + } + + if (_flags == 0) break; // will need more data to know + if (_prefix is RespPrefix.Attribute) _flags |= RespFlags.IsAttribute; + _bufferIndex += consumed + 1; + return true; + case RespPrefix.Null: // null + // note we already checked we had 3 bytes + UnsafeAssertClLf(1); + // treat as both scalar and aggregate; this might seem weird, but makes + // sense when considering how .IsScalar and .IsAggregate are typically used, + // and that a pure null can apply to either + _flags = RespFlags.IsScalar | RespFlags.IsAggregate | RespFlags.IsNull; + _bufferIndex += 3; // skip prefix+terminator + return true; + case RespPrefix.StreamTerminator: + // note we already checked we had 3 bytes + UnsafeAssertClLf(1); + _flags = RespFlags.IsAggregate; // don't claim as streaming - this counts towards delta + _bufferIndex += 3; // skip prefix+terminator + return true; + default: + ThrowProtocolFailure("Unexpected protocol prefix: " + _prefix); + return false; + } + } + + return TryReadNextSlow(ref this); + } + + private static bool TryReadNextSlow(ref RespReader live) + { + // in the case of failure, we don't want to apply any changes, + // so we work against an isolated copy until we're happy + live.MovePastCurrent(); + RespReader isolated = live; + + int next = isolated.RawTryReadByte(); + if (next < 0) return false; + + switch (isolated._prefix = (RespPrefix)next) + { + case RespPrefix.SimpleString: + case RespPrefix.SimpleError: + case RespPrefix.Integer: + case RespPrefix.Boolean: + case RespPrefix.Double: + case RespPrefix.BigInteger: + // CRLF-terminated + if (!isolated.RawTryFindCrLf(out isolated._length)) return false; + isolated._flags = RespFlags.IsScalar | RespFlags.IsInlineScalar; + if (isolated._prefix == RespPrefix.SimpleError) isolated._flags |= RespFlags.IsError; + break; + case RespPrefix.BulkError: + case RespPrefix.BulkString: + case RespPrefix.VerbatimString: + // length prefix with value payload + switch (isolated.RawTryReadLengthPrefix()) + { + case LengthPrefixResult.Length: + // still need to valid terminating CRLF + isolated._flags = RespFlags.IsScalar | RespFlags.IsInlineScalar; + if (!isolated.RawTryAssertInlineScalarPayloadCrLf()) return false; + break; + case LengthPrefixResult.Null: + isolated._flags = RespFlags.IsScalar | RespFlags.IsNull; + break; + case LengthPrefixResult.Streaming: + isolated._flags = RespFlags.IsScalar | RespFlags.IsStreaming; + break; + case LengthPrefixResult.NeedMoreData: + return false; + default: + live.ThrowProtocolFailure("Unexpected length prefix"); + return false; + } + + if (isolated._prefix == RespPrefix.BulkError) isolated._flags |= RespFlags.IsError; + break; + case RespPrefix.Array: + case RespPrefix.Set: + case RespPrefix.Map: + case RespPrefix.Push: + case RespPrefix.Attribute: + // length prefix without value payload (child values follow) + switch (isolated.RawTryReadLengthPrefix()) + { + case LengthPrefixResult.Length: + isolated._flags = RespFlags.IsAggregate; + if (isolated.AggregateLengthNeedsDoubling()) isolated._length *= 2; + break; + case LengthPrefixResult.Null: + isolated._flags = RespFlags.IsAggregate | RespFlags.IsNull; + break; + case LengthPrefixResult.Streaming: + isolated._flags = RespFlags.IsAggregate | RespFlags.IsStreaming; + break; + case LengthPrefixResult.NeedMoreData: + return false; + default: + isolated.ThrowProtocolFailure("Unexpected length prefix"); + return false; + } + + if (isolated._prefix is RespPrefix.Attribute) isolated._flags |= RespFlags.IsAttribute; + break; + case RespPrefix.Null: // null + if (!isolated.RawAssertCrLf()) return false; + isolated._flags = RespFlags.IsScalar | RespFlags.IsNull; + break; + case RespPrefix.StreamTerminator: + if (!isolated.RawAssertCrLf()) return false; + isolated._flags = RespFlags.IsAggregate; // don't claim as streaming - this counts towards delta + break; + case RespPrefix.StreamContinuation: + // length prefix, possibly with value payload; first, the length + switch (isolated.RawTryReadLengthPrefix()) + { + case LengthPrefixResult.Length when isolated._length == 0: + // EOF, no payload + isolated._flags = + RespFlags + .IsScalar; // don't claim as streaming, we want this to count towards delta-decrement + break; + case LengthPrefixResult.Length: + // still need to valid terminating CRLF + isolated._flags = RespFlags.IsScalar | RespFlags.IsInlineScalar | RespFlags.IsStreaming; + if (!isolated.RawTryAssertInlineScalarPayloadCrLf()) return false; // need more data + break; + case LengthPrefixResult.Null: + case LengthPrefixResult.Streaming: + isolated.ThrowProtocolFailure("Invalid streaming scalar length prefix"); + break; + case LengthPrefixResult.NeedMoreData: + default: + return false; + } + + break; + default: + isolated.ThrowProtocolFailure("Unexpected protocol prefix: " + isolated._prefix); + return false; + } + + // commit the speculative changes back, and accept + live = isolated; + return true; + } + + private void AdvanceSlow(long bytes) + { + while (bytes > 0) + { + var available = CurrentLength - _bufferIndex; + if (bytes <= available) + { + _bufferIndex += (int)bytes; + return; + } + + bytes -= available; + + if (!TryMoveToNextSegment()) Throw(); + } + + [DoesNotReturn] + static void Throw() => throw new EndOfStreamException( + "Unexpected end of payload; this is unexpected because we already validated that it was available!"); + } + + private bool AggregateLengthNeedsDoubling() => _prefix is RespPrefix.Map or RespPrefix.Attribute; + + private bool TryMoveToNextSegment() + { + while (_tail is not null && _remainingTailLength > 0) + { + var memory = _tail.Memory; + _tail = _tail.Next; + if (!memory.IsEmpty) + { + var span = memory.Span; // check we can get this before mutating anything + _positionBase += CurrentLength; + if (span.Length > _remainingTailLength) + { + span = span.Slice(0, (int)_remainingTailLength); + _remainingTailLength = 0; + } + else + { + _remainingTailLength -= span.Length; + } + + SetCurrent(span); + _bufferIndex = 0; + return true; + } + } + + return false; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal readonly bool IsOK() // go mad with this, because it is used so often + { + if (TryGetSpan(out var span) && span.Length == 2) + { + var u16 = Unsafe.ReadUnaligned(ref UnsafeCurrent); + return u16 == RespConstants.OKUInt16 | u16 == RespConstants.OKUInt16_LC; + } + + return IsSlow(RespConstants.OKBytes, RespConstants.OKBytes_LC); + } + + /// + /// Indicates whether the current element is a scalar with a value that matches the provided . + /// + /// The payload value to verify. + public readonly bool Is(ReadOnlySpan value) + => TryGetSpan(out var span) ? span.SequenceEqual(value) : IsSlow(value); + + /// + /// Indicates whether the current element is a scalar with a value that starts with the provided . + /// + /// The payload value to verify. + public readonly bool StartsWith(ReadOnlySpan value) + => TryGetSpan(out var span) ? span.StartsWith(value) : StartsWithSlow(value); + + /// + /// Indicates whether the current element is a scalar with a value that matches the provided . + /// + /// The payload value to verify. + public readonly bool Is(ReadOnlySpan value) + { + var bytes = RespConstants.UTF8.GetMaxByteCount(value.Length); + byte[]? oversized = null; + Span buffer = bytes <= 128 ? stackalloc byte[128] : (oversized = ArrayPool.Shared.Rent(bytes)); + bytes = RespConstants.UTF8.GetBytes(value, buffer); + bool result = Is(buffer.Slice(0, bytes)); + if (oversized is not null) ArrayPool.Shared.Return(oversized); + return result; + } + + internal readonly bool IsInlneCpuUInt32(uint value) + { + if (IsInlineScalar && _length == sizeof(uint)) + { + return CurrentAvailable >= sizeof(uint) + ? Unsafe.ReadUnaligned(ref UnsafeCurrent) == value + : SlowIsInlneCpuUInt32(value); + } + + return false; + } + + private readonly bool SlowIsInlneCpuUInt32(uint value) + { + Debug.Assert(IsInlineScalar && _length == sizeof(uint), "should be inline scalar of length 4"); + Span buffer = stackalloc byte[sizeof(uint)]; + var copy = this; + copy.RawFillBytes(buffer); + return RespConstants.UnsafeCpuUInt32(buffer) == value; + } + + /// + /// Indicates whether the current element is a scalar with a value that matches the provided . + /// + /// The payload value to verify. + public readonly bool Is(byte value) + { + if (IsInlineScalar && _length == 1 && CurrentAvailable >= 1) + { + return UnsafeCurrent == value; + } + + ReadOnlySpan span = [value]; + return IsSlow(span); + } + + private readonly bool IsSlow(ReadOnlySpan testValue0, ReadOnlySpan testValue2) + => IsSlow(testValue0) || IsSlow(testValue2); + + private readonly bool IsSlow(ReadOnlySpan testValue) + { + DemandScalar(); + if (IsNull) return false; // nothing equals null + if (TotalAvailable < testValue.Length) return false; + + if (!IsStreaming && testValue.Length != ScalarLength()) return false; + + var iterator = ScalarChunks(); + while (true) + { + if (testValue.IsEmpty) + { + // nothing left to test; if also nothing left to read, great! + return !iterator.MoveNext(); + } + + if (!iterator.MoveNext()) + { + return false; // test is longer + } + + var current = iterator.Current; + if (testValue.Length < current.Length) return false; // payload is longer + + if (!current.SequenceEqual(testValue.Slice(0, current.Length))) return false; // payload is different + + testValue = testValue.Slice(current.Length); // validated; continue + } + } + + private readonly bool StartsWithSlow(ReadOnlySpan testValue) + { + DemandScalar(); + if (IsNull) return false; // nothing equals null + if (testValue.IsEmpty) return true; // every non-null scalar starts-with empty + if (TotalAvailable < testValue.Length) return false; + + if (!IsStreaming && testValue.Length < ScalarLength()) return false; + + var iterator = ScalarChunks(); + while (true) + { + if (testValue.IsEmpty) + { + return true; + } + + if (!iterator.MoveNext()) + { + return false; // test is longer + } + + var current = iterator.Current; + if (testValue.Length <= current.Length) + { + // current fragment exhausts the test data; check it with StartsWith + return testValue.StartsWith(current); + } + + // current fragment is longer than the test data; the overlap must match exactly + if (!current.SequenceEqual(testValue.Slice(0, current.Length))) return false; // payload is different + + testValue = testValue.Slice(current.Length); // validated; continue + } + } + + /// + /// Copy the current scalar value out into the supplied , or as much as can be copied. + /// + /// The destination for the copy operation. + /// The number of bytes successfully copied. + public readonly int CopyTo(scoped Span target) + { + if (TryGetSpan(out var value)) + { + if (target.Length < value.Length) value = value.Slice(0, target.Length); + + value.CopyTo(target); + return value.Length; + } + + int totalBytes = 0; + var iterator = ScalarChunks(); + while (iterator.MoveNext()) + { + value = iterator.Current; + if (target.Length <= value.Length) + { + value.Slice(0, target.Length).CopyTo(target); + return totalBytes + target.Length; + } + + value.CopyTo(target); + target = target.Slice(value.Length); + totalBytes += value.Length; + } + + return totalBytes; + } + + /// + /// Copy the current scalar value out into the supplied , or as much as can be copied. + /// + /// The destination for the copy operation. + /// The number of bytes successfully copied. + public readonly int CopyTo(IBufferWriter target) + { + if (TryGetSpan(out var value)) + { + target.Write(value); + return value.Length; + } + + int totalBytes = 0; + var iterator = ScalarChunks(); + while (iterator.MoveNext()) + { + value = iterator.Current; + target.Write(value); + totalBytes += value.Length; + } + + return totalBytes; + } + + /// + /// Asserts that the current element is not null. + /// + public void DemandNotNull() + { + if (IsNull) Throw(); + static void Throw() => throw new InvalidOperationException("A non-null element was expected"); + } + + /// + /// Read the current element as a value. + /// + [SuppressMessage("Style", "IDE0018:Inline variable declaration", Justification = "No it can't - conditional")] + public readonly long ReadInt64() + { + var span = Buffer(stackalloc byte[RespConstants.MaxRawBytesInt64 + 1]); + long value; + if (!(span.Length <= RespConstants.MaxRawBytesInt64 + && Utf8Parser.TryParse(span, out value, out int bytes) + && bytes == span.Length)) + { + ThrowFormatException(); + value = 0; + } + + return value; + } + + /// + /// Try to read the current element as a value. + /// + public readonly bool TryReadInt64(out long value) + { + var span = Buffer(stackalloc byte[RespConstants.MaxRawBytesInt64 + 1]); + if (span.Length <= RespConstants.MaxRawBytesInt64) + { + return Utf8Parser.TryParse(span, out value, out int bytes) & bytes == span.Length; + } + + value = 0; + return false; + } + + /// + /// Read the current element as a value. + /// + [SuppressMessage("Style", "IDE0018:Inline variable declaration", Justification = "No it can't - conditional")] + public readonly int ReadInt32() + { + var span = Buffer(stackalloc byte[RespConstants.MaxRawBytesInt32 + 1]); + int value; + if (!(span.Length <= RespConstants.MaxRawBytesInt32 + && Utf8Parser.TryParse(span, out value, out int bytes) + && bytes == span.Length)) + { + ThrowFormatException(); + value = 0; + } + + return value; + } + + /// + /// Try to read the current element as a value. + /// + public readonly bool TryReadInt32(out int value) + { + var span = Buffer(stackalloc byte[RespConstants.MaxRawBytesInt32 + 1]); + if (span.Length <= RespConstants.MaxRawBytesInt32) + { + return Utf8Parser.TryParse(span, out value, out int bytes) & bytes == span.Length; + } + + value = 0; + return false; + } + + /// + /// Read the current element as a value. + /// + public readonly double ReadDouble() + { + var span = Buffer(stackalloc byte[RespConstants.MaxRawBytesNumber + 1]); + + if (span.Length <= RespConstants.MaxRawBytesNumber + && Utf8Parser.TryParse(span, out double value, out int bytes) + && bytes == span.Length) + { + return value; + } + + switch (span.Length) + { + case 3 when "inf"u8.SequenceEqual(span): + return double.PositiveInfinity; + case 3 when "nan"u8.SequenceEqual(span): + return double.NaN; + case 4 when "+inf"u8.SequenceEqual(span): // not actually mentioned in spec, but: we'll allow it + return double.PositiveInfinity; + case 4 when "-inf"u8.SequenceEqual(span): + return double.NegativeInfinity; + } + + ThrowFormatException(); + return 0; + } + + /// + /// Try to read the current element as a value. + /// + public readonly bool TryReadDouble(out double value, bool allowTokens = true) + { + var span = Buffer(stackalloc byte[RespConstants.MaxRawBytesNumber + 1]); + + if (Utf8Parser.TryParse(span, out value, out int bytes) + && bytes == span.Length) + { + return true; + } + + if (allowTokens) + { + switch (span.Length) + { + case 3 when "inf"u8.SequenceEqual(span): + value = double.PositiveInfinity; + return true; + case 3 when "nan"u8.SequenceEqual(span): + value = double.NaN; + return true; + case 4 when "+inf"u8.SequenceEqual(span): // not actually mentioned in spec, but: we'll allow it + value = double.PositiveInfinity; + return true; + case 4 when "-inf"u8.SequenceEqual(span): + value = double.NegativeInfinity; + return true; + } + } + + value = 0; + return false; + } + + /// + /// Note this uses a stackalloc buffer; requesting too much may overflow the stack. + /// + internal readonly bool UnsafeTryReadShortAscii(out string value, int maxLength = 127) + { + var span = Buffer(stackalloc byte[maxLength + 1]); + value = ""; + if (span.IsEmpty) return true; + + if (span.Length <= maxLength) + { + // check for anything that looks binary or unicode + foreach (var b in span) + { + // allow [SPACE]-thru-[DEL], plus CR/LF + if (!(b < 127 & (b >= 32 | (b is 12 or 13)))) + { + return false; + } + } + + value = Encoding.UTF8.GetString(span); + return true; + } + + return false; + } + + /// + /// Read the current element as a value. + /// + [SuppressMessage("Style", "IDE0018:Inline variable declaration", Justification = "No it can't - conditional")] + public readonly decimal ReadDecimal() + { + var span = Buffer(stackalloc byte[RespConstants.MaxRawBytesNumber + 1]); + decimal value; + if (!(span.Length <= RespConstants.MaxRawBytesNumber + && Utf8Parser.TryParse(span, out value, out int bytes) + && bytes == span.Length)) + { + ThrowFormatException(); + value = 0; + } + + return value; + } + + /// + /// Read the current element as a value. + /// + public readonly bool ReadBoolean() + { + var span = Buffer(stackalloc byte[2]); + switch (span.Length) + { + case 1: + switch (span[0]) + { + case (byte)'0' when Prefix == RespPrefix.Integer: return false; + case (byte)'1' when Prefix == RespPrefix.Integer: return true; + case (byte)'f' when Prefix == RespPrefix.Boolean: return false; + case (byte)'t' when Prefix == RespPrefix.Boolean: return true; + } + + break; + case 2 when Prefix == RespPrefix.SimpleString && IsOK(): return true; + } + + ThrowFormatException(); + return false; + } + + /// + /// Parse a scalar value as an enum of type . + /// + /// The value to report if the value is not recognized. + /// The type of enum being parsed. + public readonly T ReadEnum(T unknownValue = default) where T : struct, Enum + { +#if NET + return ParseChars(static (chars, state) => Enum.TryParse(chars, true, out T value) ? value : state, unknownValue); +#else + return Enum.TryParse(ReadString(), true, out T value) ? value : unknownValue; +#endif + } + +#pragma warning disable RS0026 // unambiguous due to signature + /// + /// Reads an aggregate as an array of elements without changing the position. + /// + /// The type of data to be projected. + public TResult[]? ReadArray(Projection projection, bool scalar = false) + { + var copy = this; + return copy.ReadPastArray(projection, scalar); + } + + /// + /// Reads an aggregate as an array of elements without changing the position. + /// + /// Additional state required by the projection. + /// The type of data to be projected. + public TResult[]? ReadArray(ref TState state, Projection projection, bool scalar = false) +#if NET10_0_OR_GREATER + where TState : allows ref struct +#endif + { + var copy = this; + return copy.ReadPastArray(ref state, projection, scalar); + } + + /// + /// Reads an aggregate as an array of elements, moving past the data as a side effect. + /// + /// The type of data to be projected. + public TResult[]? ReadPastArray(Projection projection, bool scalar = false) + => ReadPastArray(ref projection, static (ref projection, ref reader) => projection(ref reader), scalar); + + /// + /// Reads an aggregate as an array of elements, moving past the data as a side effect. + /// + /// Additional state required by the projection. + /// The type of data to be projected. + public TResult[]? ReadPastArray(ref TState state, Projection projection, bool scalar = false) +#if NET10_0_OR_GREATER + where TState : allows ref struct +#endif +#pragma warning restore RS0026 + { + DemandAggregate(); + if (IsNull) return null; + var len = AggregateLength(); + if (len == 0) return []; + var result = new TResult[len]; + if (scalar) + { + // if the data to be consumed is simple (scalar), we can use + // a simpler path that doesn't need to worry about RESP subtrees + for (int i = 0; i < result.Length; i++) + { + MoveNextScalar(); + result[i] = projection(ref state, ref this); + } + } + else + { + var agg = AggregateChildren(); + agg.FillAll(result, ref state, projection); + agg.MovePast(out this); + } + + return result; + } + + public TResult[]? ReadPairArray( + Projection first, + Projection second, + Func combine, + bool scalar = true) + { + DemandAggregate(); + if (IsNull) return null; + int sourceLength = AggregateLength(); + if (sourceLength is 0 or 1) return []; + var result = new TResult[sourceLength >> 1]; + if (scalar) + { + // if the data to be consumed is simple (scalar), we can use + // a simpler path that doesn't need to worry about RESP subtrees + for (int i = 0; i < result.Length; i++) + { + MoveNextScalar(); + var x = first(ref this); + MoveNextScalar(); + var y = second(ref this); + result[i] = combine(x, y); + } + // if we have an odd number of source elements, skip the last one + if ((sourceLength & 1) != 0) MoveNextScalar(); + } + else + { + var agg = AggregateChildren(); + agg.FillAll(result, first, second, combine); + agg.MovePast(out this); + } + return result; + } + internal TResult[]? ReadLeasedPairArray( + Projection first, + Projection second, + Func combine, + out int count, + bool scalar = true) + { + DemandAggregate(); + if (IsNull) + { + count = 0; + return null; + } + int sourceLength = AggregateLength(); + count = sourceLength >> 1; + if (count is 0) return []; + + var oversized = ArrayPool.Shared.Rent(count); + var result = oversized.AsSpan(0, count); + if (scalar) + { + // if the data to be consumed is simple (scalar), we can use + // a simpler path that doesn't need to worry about RESP subtrees + for (int i = 0; i < result.Length; i++) + { + MoveNextScalar(); + var x = first(ref this); + MoveNextScalar(); + var y = second(ref this); + result[i] = combine(x, y); + } + // if we have an odd number of source elements, skip the last one + if ((sourceLength & 1) != 0) MoveNextScalar(); + } + else + { + var agg = AggregateChildren(); + agg.FillAll(result, first, second, combine); + agg.MovePast(out this); + } + return oversized; + } +} diff --git a/src/RESPite/Messages/RespScanState.cs b/src/RESPite/Messages/RespScanState.cs new file mode 100644 index 000000000..37cd3f8b6 --- /dev/null +++ b/src/RESPite/Messages/RespScanState.cs @@ -0,0 +1,163 @@ +using System.Buffers; +using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; + +namespace RESPite.Messages; + +/// +/// Holds state used for RESP frame parsing, i.e. detecting the RESP for an entire top-level message. +/// +[Experimental(Experiments.Respite, UrlFormat = Experiments.UrlFormat)] +public struct RespScanState +{ + /* + The key point of ScanState is to skim over a RESP stream with minimal frame processing, to find the + end of a single top-level RESP message. We start by expecting 1 message, and then just read, with the + rules that the end of a message subtracts one, and aggregates add N. Streaming scalars apply zero offset + until the scalar stream terminator. Attributes also apply zero offset. + Note that streaming aggregates change the rules - when at least one streaming aggregate is in effect, + no offsets are applied until we get back out of the outermost streaming aggregate - we achieve this + by simply counting the streaming aggregate depth, which is usually zero. + Note that in reality streaming (scalar and aggregates) and attributes are non-existent; in addition + to being specific to RESP3, no known server currently implements these parts of the RESP3 specification, + so everything here is theoretical, but: works according to the spec. + */ + private int _delta; // when this becomes -1, we have fully read a top-level message; + private ushort _streamingAggregateDepth; + private RespPrefix _prefix; + + public RespPrefix Prefix => _prefix; + + private long _totalBytes; +#if DEBUG + private int _elementCount; + + /// + public override string ToString() => $"{_prefix}, consumed: {_totalBytes} bytes, {_elementCount} nodes, complete: {IsComplete} ({_delta + 1} outstanding)"; +#else + /// + public override string ToString() => _prefix.ToString(); +#endif + + /// + public override bool Equals([NotNullWhen(true)] object? obj) => throw new NotSupportedException(); + + /// + public override int GetHashCode() => throw new NotSupportedException(); + + /// + /// Gets whether an entire top-level RESP message has been consumed. + /// + public bool IsComplete => _delta == -1; + + /// + /// Gets the total length of the payload read (or read so far, if it is not yet complete); this combines payloads from multiple + /// TryRead operations. + /// + public long TotalBytes => _totalBytes; + + // used when spotting common replies - we entirely bypass the usual reader/delta mechanism + internal void SetComplete(int totalBytes, RespPrefix prefix) + { + _totalBytes = totalBytes; + _delta = -1; + _prefix = prefix; +#if DEBUG + _elementCount = 1; +#endif + } + + /// + /// The amount of data, in bytes, to read before attempting to read the next frame. + /// + public const int MinBytes = 3; // minimum legal RESP frame is: _\r\n + + /// + /// Create a new value that can parse the supplied node (and subtree). + /// + internal RespScanState(in RespReader reader) + { + Debug.Assert(reader.Prefix != RespPrefix.None, "missing RESP prefix"); + _totalBytes = 0; + _delta = reader.GetInitialScanCount(out _streamingAggregateDepth); + } + + /// + /// Scan as far as possible, stopping when an entire top-level RESP message has been consumed or the data is exhausted. + /// + /// True if a top-level RESP message has been consumed. + public bool TryRead(ref RespReader reader, out long bytesRead) + { + bytesRead = ReadCore(ref reader, reader.BytesConsumed); + return IsComplete; + } + + /// + /// Scan as far as possible, stopping when an entire top-level RESP message has been consumed or the data is exhausted. + /// + /// True if a top-level RESP message has been consumed. + public bool TryRead(ReadOnlySpan value, out int bytesRead) + { + var reader = new RespReader(value); + bytesRead = (int)ReadCore(ref reader); + return IsComplete; + } + + /// + /// Scan as far as possible, stopping when an entire top-level RESP message has been consumed or the data is exhausted. + /// + /// True if a top-level RESP message has been consumed. + public bool TryRead(in ReadOnlySequence value, out long bytesRead) + { + var reader = new RespReader(in value); + bytesRead = ReadCore(ref reader); + return IsComplete; + } + + /// + /// Scan as far as possible, stopping when an entire top-level RESP message has been consumed or the data is exhausted. + /// + /// The number of bytes consumed in this operation. + private long ReadCore(ref RespReader reader, long startOffset = 0) + { +#pragma warning disable CS0618 // avoid TryReadNext unless you know what you're doing + while (_delta >= 0 && reader.TryReadNext()) +#pragma warning restore CS0618 + { +#if DEBUG + _elementCount++; +#endif + if (!reader.IsAttribute & _prefix == RespPrefix.None) + { + _prefix = reader.Prefix; + } + + if (reader.IsNonNullAggregate) ApplyAggregateRules(ref reader); + + if (_streamingAggregateDepth == 0) _delta += reader.Delta(); + } + + var bytesRead = reader.BytesConsumed - startOffset; + _totalBytes += bytesRead; + return bytesRead; + } + + private void ApplyAggregateRules(ref RespReader reader) + { + Debug.Assert(reader.IsAggregate, "RESP aggregate expected"); + if (reader.IsStreaming) + { + // entering an aggregate stream + if (_streamingAggregateDepth == ushort.MaxValue) ThrowTooDeep(); + _streamingAggregateDepth++; + } + else if (reader.Prefix == RespPrefix.StreamTerminator) + { + // exiting an aggregate stream + if (_streamingAggregateDepth == 0) ThrowUnexpectedTerminator(); + _streamingAggregateDepth--; + } + static void ThrowTooDeep() => throw new InvalidOperationException("Maximum streaming aggregate depth exceeded."); + static void ThrowUnexpectedTerminator() => throw new InvalidOperationException("Unexpected streaming aggregate terminator."); + } +} diff --git a/src/RESPite/PublicAPI/PublicAPI.Shipped.txt b/src/RESPite/PublicAPI/PublicAPI.Shipped.txt new file mode 100644 index 000000000..ab058de62 --- /dev/null +++ b/src/RESPite/PublicAPI/PublicAPI.Shipped.txt @@ -0,0 +1 @@ +#nullable enable diff --git a/src/RESPite/PublicAPI/PublicAPI.Unshipped.txt b/src/RESPite/PublicAPI/PublicAPI.Unshipped.txt new file mode 100644 index 000000000..9ce6685bc --- /dev/null +++ b/src/RESPite/PublicAPI/PublicAPI.Unshipped.txt @@ -0,0 +1,214 @@ +#nullable enable +[SER004]const RESPite.Buffers.CycleBuffer.GetAnything = 0 -> int +[SER004]const RESPite.Buffers.CycleBuffer.GetFullPagesOnly = -1 -> int +[SER004]override RESPite.AsciiHash.Equals(object? other) -> bool +[SER004]override RESPite.AsciiHash.GetHashCode() -> int +[SER004]override RESPite.AsciiHash.ToString() -> string! +[SER004]RESPite.AsciiHash +[SER004]RESPite.AsciiHash.AsciiHash() -> void +[SER004]RESPite.AsciiHash.AsciiHash(byte[]! arr) -> void +[SER004]RESPite.AsciiHash.AsciiHash(byte[]! arr, int index, int length) -> void +[SER004]RESPite.AsciiHash.AsciiHash(System.ReadOnlySpan value) -> void +[SER004]RESPite.AsciiHash.BufferLength.get -> int +[SER004]RESPite.AsciiHash.Equals(in RESPite.AsciiHash other) -> bool +[SER004]RESPite.AsciiHash.IsCI(System.ReadOnlySpan value) -> bool +[SER004]RESPite.AsciiHash.IsCS(System.ReadOnlySpan value) -> bool +[SER004]RESPite.AsciiHash.Length.get -> int +[SER004]RESPite.AsciiHash.Span.get -> System.ReadOnlySpan +[SER004]RESPite.AsciiHashAttribute +[SER004]RESPite.AsciiHashAttribute.AsciiHashAttribute(string! token = "") -> void +[SER004]RESPite.AsciiHashAttribute.CaseSensitive.get -> bool +[SER004]RESPite.AsciiHashAttribute.CaseSensitive.set -> void +[SER004]RESPite.AsciiHashAttribute.Token.get -> string! +[SER004]RESPite.AsciiHash.AsciiHash(string? value) -> void +[SER004]RESPite.AsciiHash.IsEmpty.get -> bool +[SER004]RESPite.Buffers.CycleBuffer +[SER004]RESPite.Buffers.CycleBuffer.Commit(int count) -> void +[SER004]RESPite.Buffers.CycleBuffer.CommittedIsEmpty.get -> bool +[SER004]RESPite.Buffers.CycleBuffer.CycleBuffer() -> void +[SER004]RESPite.Buffers.CycleBuffer.DiscardCommitted(int count) -> void +[SER004]RESPite.Buffers.CycleBuffer.DiscardCommitted(long count) -> void +[SER004]RESPite.Buffers.CycleBuffer.GetAllCommitted() -> System.Buffers.ReadOnlySequence +[SER004]RESPite.Buffers.CycleBuffer.GetCommittedLength() -> long +[SER004]RESPite.Buffers.CycleBuffer.GetUncommittedMemory(int hint = 0) -> System.Memory +[SER004]RESPite.Buffers.CycleBuffer.GetUncommittedSpan(int hint = 0) -> System.Span +[SER004]RESPite.Buffers.CycleBuffer.PageSize.get -> int +[SER004]RESPite.Buffers.CycleBuffer.Pool.get -> System.Buffers.MemoryPool! +[SER004]RESPite.Buffers.CycleBuffer.Release() -> void +[SER004]RESPite.Buffers.CycleBuffer.TryGetCommitted(out System.ReadOnlySpan span) -> bool +[SER004]RESPite.Buffers.CycleBuffer.TryGetFirstCommittedMemory(int minBytes, out System.ReadOnlyMemory memory) -> bool +[SER004]RESPite.Buffers.CycleBuffer.TryGetFirstCommittedSpan(int minBytes, out System.ReadOnlySpan span) -> bool +[SER004]RESPite.Buffers.CycleBuffer.UncommittedAvailable.get -> int +[SER004]RESPite.Buffers.CycleBuffer.Write(in System.Buffers.ReadOnlySequence value) -> void +[SER004]RESPite.Buffers.CycleBuffer.Write(System.ReadOnlySpan value) -> void +[SER004]RESPite.Buffers.ICycleBufferCallback +[SER004]RESPite.Buffers.ICycleBufferCallback.PageComplete() -> void +[SER004]RESPite.Messages.RespReader.AggregateEnumerator.FillAll(scoped System.Span target, RESPite.Messages.RespReader.Projection! projection) -> void +[SER004]RESPite.Messages.RespReader.AggregateEnumerator.FillAll(scoped System.Span target, ref TState state, RESPite.Messages.RespReader.Projection! first, RESPite.Messages.RespReader.Projection! second, System.Func! combine) -> void +[SER004]RESPite.Messages.RespReader.AggregateEnumerator.FillAll(scoped System.Span target, ref TState state, RESPite.Messages.RespReader.Projection! projection) -> void +[SER004]RESPite.Messages.RespReader.AggregateEnumerator.MoveNextRaw() -> bool +[SER004]RESPite.Messages.RespReader.AggregateEnumerator.MoveNextRaw(RESPite.Messages.RespAttributeReader! respAttributeReader, ref T attributes) -> bool +[SER004]RESPite.Messages.RespReader.AggregateIsEmpty() -> bool +[SER004]RESPite.Messages.RespReader.AggregateLengthIs(int count) -> bool +[SER004]RESPite.Messages.RespReader.Clone() -> RESPite.Messages.RespReader +[SER004]RESPite.Messages.RespReader.FillAll(scoped System.Span target, ref TState state, RESPite.Messages.RespReader.Projection! projection) -> void +[SER004]RESPite.Messages.RespReader.Projection +[SER004]RESPite.Messages.RespReader.ReadArray(ref TState state, RESPite.Messages.RespReader.Projection! projection, bool scalar = false) -> TResult[]? +[SER004]RESPite.Messages.RespReader.ReadPastArray(ref TState state, RESPite.Messages.RespReader.Projection! projection, bool scalar = false) -> TResult[]? +[SER004]RESPite.Messages.RespReader.ScalarParser +[SER004]RESPite.Messages.RespReader.TryParseScalar(delegate*, out T, bool> parser, out T value) -> bool +[SER004]RESPite.Messages.RespReader.TryParseScalar(RESPite.Messages.RespReader.ScalarParser! parser, out T value) -> bool +[SER004]static RESPite.AsciiHash.CaseInsensitiveEqualityComparer.get -> System.Collections.Generic.IEqualityComparer! +[SER004]static RESPite.AsciiHash.CaseSensitiveEqualityComparer.get -> System.Collections.Generic.IEqualityComparer! +[SER004]static RESPite.AsciiHash.EqualsCI(System.ReadOnlySpan first, System.ReadOnlySpan second) -> bool +[SER004]static RESPite.AsciiHash.EqualsCI(System.ReadOnlySpan first, System.ReadOnlySpan second) -> bool +[SER004]static RESPite.AsciiHash.EqualsCS(System.ReadOnlySpan first, System.ReadOnlySpan second) -> bool +[SER004]static RESPite.AsciiHash.EqualsCS(System.ReadOnlySpan first, System.ReadOnlySpan second) -> bool +[SER004]static RESPite.AsciiHash.Hash(scoped System.ReadOnlySpan value, out long cs, out long uc) -> void +[SER004]static RESPite.AsciiHash.Hash(scoped System.ReadOnlySpan value, out long cs0, out long uc0, out long cs1, out long uc1) -> void +[SER004]static RESPite.AsciiHash.Hash(scoped System.ReadOnlySpan value, out long cs, out long uc) -> void +[SER004]static RESPite.AsciiHash.Hash(scoped System.ReadOnlySpan value, out long cs0, out long uc0, out long cs1, out long uc1) -> void +[SER004]static RESPite.AsciiHash.HashCS(scoped System.ReadOnlySpan value) -> long +[SER004]static RESPite.AsciiHash.HashCS(scoped System.ReadOnlySpan value, out long cs0, out long cs1) -> void +[SER004]static RESPite.AsciiHash.HashCS(scoped System.ReadOnlySpan value) -> long +[SER004]static RESPite.AsciiHash.HashCS(scoped System.ReadOnlySpan value, out long cs0, out long cs1) -> void +[SER004]static RESPite.AsciiHash.HashUC(scoped System.ReadOnlySpan value) -> long +[SER004]static RESPite.AsciiHash.HashUC(scoped System.ReadOnlySpan value, out long cs0, out long cs1) -> void +[SER004]static RESPite.AsciiHash.HashUC(scoped System.ReadOnlySpan value) -> long +[SER004]static RESPite.AsciiHash.HashUC(scoped System.ReadOnlySpan value, out long cs0, out long cs1) -> void +[SER004]static RESPite.AsciiHash.SequenceEqualsCI(System.ReadOnlySpan first, System.ReadOnlySpan second) -> bool +[SER004]static RESPite.AsciiHash.SequenceEqualsCI(System.ReadOnlySpan first, System.ReadOnlySpan second) -> bool +[SER004]static RESPite.AsciiHash.SequenceEqualsCS(System.ReadOnlySpan first, System.ReadOnlySpan second) -> bool +[SER004]static RESPite.AsciiHash.SequenceEqualsCS(System.ReadOnlySpan first, System.ReadOnlySpan second) -> bool +[SER004]static RESPite.AsciiHash.ToLower(System.Span span) -> void +[SER004]static RESPite.AsciiHash.ToUpper(System.Span span) -> void +[SER004]const RESPite.Messages.RespScanState.MinBytes = 3 -> int +[SER004]override RESPite.Messages.RespScanState.Equals(object? obj) -> bool +[SER004]override RESPite.Messages.RespScanState.GetHashCode() -> int +[SER004]override RESPite.Messages.RespScanState.ToString() -> string! +[SER004]RESPite.Messages.RespAttributeReader +[SER004]RESPite.Messages.RespAttributeReader.RespAttributeReader() -> void +[SER004]RESPite.Messages.RespFrameScanner +[SER004]RESPite.Messages.RespFrameScanner.TryRead(ref RESPite.Messages.RespScanState state, in System.Buffers.ReadOnlySequence data) -> System.Buffers.OperationStatus +[SER004]RESPite.Messages.RespFrameScanner.TryRead(ref RESPite.Messages.RespScanState state, System.ReadOnlySpan data) -> System.Buffers.OperationStatus +[SER004]RESPite.Messages.RespFrameScanner.ValidateRequest(in System.Buffers.ReadOnlySequence message) -> void +[SER004]RESPite.Messages.RespPrefix +[SER004]RESPite.Messages.RespPrefix.Array = 42 -> RESPite.Messages.RespPrefix +[SER004]RESPite.Messages.RespPrefix.Attribute = 124 -> RESPite.Messages.RespPrefix +[SER004]RESPite.Messages.RespPrefix.BigInteger = 40 -> RESPite.Messages.RespPrefix +[SER004]RESPite.Messages.RespPrefix.Boolean = 35 -> RESPite.Messages.RespPrefix +[SER004]RESPite.Messages.RespPrefix.BulkError = 33 -> RESPite.Messages.RespPrefix +[SER004]RESPite.Messages.RespPrefix.BulkString = 36 -> RESPite.Messages.RespPrefix +[SER004]RESPite.Messages.RespPrefix.Double = 44 -> RESPite.Messages.RespPrefix +[SER004]RESPite.Messages.RespPrefix.Integer = 58 -> RESPite.Messages.RespPrefix +[SER004]RESPite.Messages.RespPrefix.Map = 37 -> RESPite.Messages.RespPrefix +[SER004]RESPite.Messages.RespPrefix.None = 0 -> RESPite.Messages.RespPrefix +[SER004]RESPite.Messages.RespPrefix.Null = 95 -> RESPite.Messages.RespPrefix +[SER004]RESPite.Messages.RespPrefix.Push = 62 -> RESPite.Messages.RespPrefix +[SER004]RESPite.Messages.RespPrefix.Set = 126 -> RESPite.Messages.RespPrefix +[SER004]RESPite.Messages.RespPrefix.SimpleError = 45 -> RESPite.Messages.RespPrefix +[SER004]RESPite.Messages.RespPrefix.SimpleString = 43 -> RESPite.Messages.RespPrefix +[SER004]RESPite.Messages.RespPrefix.StreamContinuation = 59 -> RESPite.Messages.RespPrefix +[SER004]RESPite.Messages.RespPrefix.StreamTerminator = 46 -> RESPite.Messages.RespPrefix +[SER004]RESPite.Messages.RespPrefix.VerbatimString = 61 -> RESPite.Messages.RespPrefix +[SER004]RESPite.Messages.RespReader +[SER004]RESPite.Messages.RespReader.AggregateChildren() -> RESPite.Messages.RespReader.AggregateEnumerator +[SER004]RESPite.Messages.RespReader.AggregateEnumerator +[SER004]RESPite.Messages.RespReader.AggregateEnumerator.AggregateEnumerator() -> void +[SER004]RESPite.Messages.RespReader.AggregateEnumerator.AggregateEnumerator(scoped in RESPite.Messages.RespReader reader) -> void +[SER004]RESPite.Messages.RespReader.AggregateEnumerator.Current.get -> RESPite.Messages.RespReader +[SER004]RESPite.Messages.RespReader.AggregateEnumerator.DemandNext() -> void +[SER004]RESPite.Messages.RespReader.AggregateEnumerator.FillAll(scoped System.Span target, RESPite.Messages.RespReader.Projection! first, RESPite.Messages.RespReader.Projection! second, System.Func! combine) -> void +[SER004]RESPite.Messages.RespReader.AggregateEnumerator.GetEnumerator() -> RESPite.Messages.RespReader.AggregateEnumerator +[SER004]RESPite.Messages.RespReader.AggregateEnumerator.MoveNext() -> bool +[SER004]RESPite.Messages.RespReader.AggregateEnumerator.MoveNext(RESPite.Messages.RespPrefix prefix) -> bool +[SER004]RESPite.Messages.RespReader.AggregateEnumerator.MoveNext(RESPite.Messages.RespPrefix prefix, RESPite.Messages.RespAttributeReader! respAttributeReader, ref T attributes) -> bool +[SER004]RESPite.Messages.RespReader.AggregateEnumerator.MovePast(out RESPite.Messages.RespReader reader) -> void +[SER004]RESPite.Messages.RespReader.AggregateEnumerator.ReadOne(RESPite.Messages.RespReader.Projection! projection) -> T +[SER004]RESPite.Messages.RespReader.AggregateEnumerator.Value -> RESPite.Messages.RespReader +[SER004]RESPite.Messages.RespReader.AggregateLength() -> int +[SER004]RESPite.Messages.RespReader.BytesConsumed.get -> long +[SER004]RESPite.Messages.RespReader.CopyTo(scoped System.Span target) -> int +[SER004]RESPite.Messages.RespReader.CopyTo(System.Buffers.IBufferWriter! target) -> int +[SER004]RESPite.Messages.RespReader.DemandAggregate() -> void +[SER004]RESPite.Messages.RespReader.DemandEnd() -> void +[SER004]RESPite.Messages.RespReader.DemandNotNull() -> void +[SER004]RESPite.Messages.RespReader.DemandScalar() -> void +[SER004]RESPite.Messages.RespReader.FillAll(scoped System.Span target, RESPite.Messages.RespReader.Projection! projection) -> void +[SER004]RESPite.Messages.RespReader.Is(byte value) -> bool +[SER004]RESPite.Messages.RespReader.Is(System.ReadOnlySpan value) -> bool +[SER004]RESPite.Messages.RespReader.Is(System.ReadOnlySpan value) -> bool +[SER004]RESPite.Messages.RespReader.IsAggregate.get -> bool +[SER004]RESPite.Messages.RespReader.IsAttribute.get -> bool +[SER004]RESPite.Messages.RespReader.IsError.get -> bool +[SER004]RESPite.Messages.RespReader.IsNull.get -> bool +[SER004]RESPite.Messages.RespReader.IsScalar.get -> bool +[SER004]RESPite.Messages.RespReader.IsStreaming.get -> bool +[SER004]RESPite.Messages.RespReader.MoveNext() -> void +[SER004]RESPite.Messages.RespReader.MoveNext(RESPite.Messages.RespPrefix prefix) -> void +[SER004]RESPite.Messages.RespReader.MoveNext(RESPite.Messages.RespAttributeReader! respAttributeReader, ref T attributes) -> void +[SER004]RESPite.Messages.RespReader.MoveNext(RESPite.Messages.RespPrefix prefix, RESPite.Messages.RespAttributeReader! respAttributeReader, ref T attributes) -> void +[SER004]RESPite.Messages.RespReader.MoveNextAggregate() -> void +[SER004]RESPite.Messages.RespReader.MoveNextScalar() -> void +[SER004]RESPite.Messages.RespReader.Prefix.get -> RESPite.Messages.RespPrefix +[SER004]RESPite.Messages.RespReader.Projection +[SER004]RESPite.Messages.RespReader.ProtocolBytesRemaining.get -> long +[SER004]RESPite.Messages.RespReader.ReadArray(RESPite.Messages.RespReader.Projection! projection, bool scalar = false) -> TResult[]? +[SER004]RESPite.Messages.RespReader.ReadBoolean() -> bool +[SER004]RESPite.Messages.RespReader.ReadByteArray() -> byte[]? +[SER004]RESPite.Messages.RespReader.ReadDecimal() -> decimal +[SER004]RESPite.Messages.RespReader.ReadDouble() -> double +[SER004]RESPite.Messages.RespReader.ReadEnum(T unknownValue = default(T)) -> T +[SER004]RESPite.Messages.RespReader.ReadInt32() -> int +[SER004]RESPite.Messages.RespReader.ReadInt64() -> long +[SER004]RESPite.Messages.RespReader.ReadPairArray(RESPite.Messages.RespReader.Projection! first, RESPite.Messages.RespReader.Projection! second, System.Func! combine, bool scalar = true) -> TResult[]? +[SER004]RESPite.Messages.RespReader.ReadPastArray(RESPite.Messages.RespReader.Projection! projection, bool scalar = false) -> TResult[]? +[SER004]RESPite.Messages.RespReader.ReadString() -> string? +[SER004]RESPite.Messages.RespReader.ReadString(out string! prefix) -> string? +[SER004]RESPite.Messages.RespReader.RespReader() -> void +[SER004]RESPite.Messages.RespReader.RespReader(scoped in System.Buffers.ReadOnlySequence value) -> void +[SER004]RESPite.Messages.RespReader.RespReader(System.ReadOnlySpan value) -> void +[SER004]RESPite.Messages.RespReader.ScalarChunks() -> RESPite.Messages.RespReader.ScalarEnumerator +[SER004]RESPite.Messages.RespReader.ScalarEnumerator +[SER004]RESPite.Messages.RespReader.ScalarEnumerator.Current.get -> System.ReadOnlySpan +[SER004]RESPite.Messages.RespReader.ScalarEnumerator.CurrentLength.get -> int +[SER004]RESPite.Messages.RespReader.ScalarEnumerator.GetEnumerator() -> RESPite.Messages.RespReader.ScalarEnumerator +[SER004]RESPite.Messages.RespReader.ScalarEnumerator.MoveNext() -> bool +[SER004]RESPite.Messages.RespReader.ScalarEnumerator.MovePast(out RESPite.Messages.RespReader reader) -> void +[SER004]RESPite.Messages.RespReader.ScalarEnumerator.ScalarEnumerator() -> void +[SER004]RESPite.Messages.RespReader.ScalarEnumerator.ScalarEnumerator(scoped in RESPite.Messages.RespReader reader) -> void +[SER004]RESPite.Messages.RespReader.ScalarIsEmpty() -> bool +[SER004]RESPite.Messages.RespReader.ScalarLength() -> int +[SER004]RESPite.Messages.RespReader.ScalarLengthIs(int count) -> bool +[SER004]RESPite.Messages.RespReader.ScalarLongLength() -> long +[SER004]RESPite.Messages.RespReader.SkipChildren() -> void +[SER004]RESPite.Messages.RespReader.StartsWith(System.ReadOnlySpan value) -> bool +[SER004]RESPite.Messages.RespReader.TryGetSpan(out System.ReadOnlySpan value) -> bool +[SER004]RESPite.Messages.RespReader.TryMoveNext() -> bool +[SER004]RESPite.Messages.RespReader.TryMoveNext(bool checkError) -> bool +[SER004]RESPite.Messages.RespReader.TryMoveNext(RESPite.Messages.RespPrefix prefix) -> bool +[SER004]RESPite.Messages.RespReader.TryMoveNext(RESPite.Messages.RespAttributeReader! respAttributeReader, ref T attributes) -> bool +[SER004]RESPite.Messages.RespReader.TryReadDouble(out double value, bool allowTokens = true) -> bool +[SER004]RESPite.Messages.RespReader.TryReadInt32(out int value) -> bool +[SER004]RESPite.Messages.RespReader.TryReadInt64(out long value) -> bool +[SER004]RESPite.Messages.RespReader.TryReadNext() -> bool +[SER004]RESPite.Messages.RespScanState +[SER004]RESPite.Messages.RespScanState.IsComplete.get -> bool +[SER004]RESPite.Messages.RespScanState.Prefix.get -> RESPite.Messages.RespPrefix +[SER004]RESPite.Messages.RespScanState.RespScanState() -> void +[SER004]RESPite.Messages.RespScanState.TotalBytes.get -> long +[SER004]RESPite.Messages.RespScanState.TryRead(in System.Buffers.ReadOnlySequence value, out long bytesRead) -> bool +[SER004]RESPite.Messages.RespScanState.TryRead(ref RESPite.Messages.RespReader reader, out long bytesRead) -> bool +[SER004]RESPite.Messages.RespScanState.TryRead(System.ReadOnlySpan value, out int bytesRead) -> bool +[SER004]RESPite.RespException +[SER004]RESPite.RespException.RespException(string! message) -> void +[SER004]static RESPite.Buffers.CycleBuffer.Create(System.Buffers.MemoryPool? pool = null, int pageSize = 8192, RESPite.Buffers.ICycleBufferCallback? callback = null) -> RESPite.Buffers.CycleBuffer +[SER004]static RESPite.Messages.RespFrameScanner.Default.get -> RESPite.Messages.RespFrameScanner! +[SER004]static RESPite.Messages.RespFrameScanner.Subscription.get -> RESPite.Messages.RespFrameScanner! +[SER004]virtual RESPite.Messages.RespAttributeReader.Read(ref RESPite.Messages.RespReader reader, ref T value) -> void +[SER004]virtual RESPite.Messages.RespAttributeReader.ReadKeyValuePair(scoped System.ReadOnlySpan key, ref RESPite.Messages.RespReader reader, ref T value) -> bool +[SER004]virtual RESPite.Messages.RespAttributeReader.ReadKeyValuePairs(ref RESPite.Messages.RespReader reader, ref T value) -> int +[SER004]virtual RESPite.Messages.RespReader.Projection.Invoke(ref RESPite.Messages.RespReader value) -> T +[SER004]virtual RESPite.Messages.RespReader.Projection.Invoke(ref TState state, ref RESPite.Messages.RespReader value) -> TResult +[SER004]virtual RESPite.Messages.RespReader.ScalarParser.Invoke(scoped System.ReadOnlySpan value, out TValue result) -> bool +[SER004]RESPite.Messages.RespReader.Serialize() -> byte[]! \ No newline at end of file diff --git a/src/RESPite/PublicAPI/net8.0/PublicAPI.Shipped.txt b/src/RESPite/PublicAPI/net8.0/PublicAPI.Shipped.txt new file mode 100644 index 000000000..ab058de62 --- /dev/null +++ b/src/RESPite/PublicAPI/net8.0/PublicAPI.Shipped.txt @@ -0,0 +1 @@ +#nullable enable diff --git a/src/RESPite/PublicAPI/net8.0/PublicAPI.Unshipped.txt b/src/RESPite/PublicAPI/net8.0/PublicAPI.Unshipped.txt new file mode 100644 index 000000000..c43af2e5e --- /dev/null +++ b/src/RESPite/PublicAPI/net8.0/PublicAPI.Unshipped.txt @@ -0,0 +1,3 @@ +#nullable enable +[SER004]RESPite.Messages.RespReader.ParseBytes(System.IFormatProvider? formatProvider = null) -> T +[SER004]RESPite.Messages.RespReader.ParseChars(System.IFormatProvider? formatProvider = null) -> T \ No newline at end of file diff --git a/src/RESPite/RESPite.csproj b/src/RESPite/RESPite.csproj new file mode 100644 index 000000000..fef03625b --- /dev/null +++ b/src/RESPite/RESPite.csproj @@ -0,0 +1,51 @@ + + + + true + net461;netstandard2.0;net472;net6.0;net8.0;net10.0 + enable + enable + false + 2025 - $([System.DateTime]::Now.Year) Marc Gravell + readme.md + + + + + + + + + + + + + + + + RespReader.cs + + + BlockBufferSerializer.cs + + + BlockBufferSerializer.cs + + + BlockBufferSerializer.cs + + + + + + + + + + + + + + + + diff --git a/src/RESPite/RespException.cs b/src/RESPite/RespException.cs new file mode 100644 index 000000000..6b5fd7c72 --- /dev/null +++ b/src/RESPite/RespException.cs @@ -0,0 +1,11 @@ +using System.Diagnostics.CodeAnalysis; + +namespace RESPite; + +/// +/// Represents a RESP error message. +/// +[Experimental(Experiments.Respite, UrlFormat = Experiments.UrlFormat)] +public sealed class RespException(string message) : Exception(message) +{ +} diff --git a/src/RESPite/Shared/AsciiHash.Comparers.cs b/src/RESPite/Shared/AsciiHash.Comparers.cs new file mode 100644 index 000000000..7b69a15a4 --- /dev/null +++ b/src/RESPite/Shared/AsciiHash.Comparers.cs @@ -0,0 +1,37 @@ +namespace RESPite; + +public readonly partial struct AsciiHash +{ + public static IEqualityComparer CaseSensitiveEqualityComparer => CaseSensitiveComparer.Instance; + public static IEqualityComparer CaseInsensitiveEqualityComparer => CaseInsensitiveComparer.Instance; + + private sealed class CaseSensitiveComparer : IEqualityComparer + { + private CaseSensitiveComparer() { } + public static readonly CaseSensitiveComparer Instance = new(); + + public bool Equals(AsciiHash x, AsciiHash y) + { + var len = x.Length; + return (len == y.Length & x._hashCS == y._hashCS) + && (len <= MaxBytesHashed || x.Span.SequenceEqual(y.Span)); + } + + public int GetHashCode(AsciiHash obj) => obj._hashCS.GetHashCode(); + } + + private sealed class CaseInsensitiveComparer : IEqualityComparer + { + private CaseInsensitiveComparer() { } + public static readonly CaseInsensitiveComparer Instance = new(); + + public bool Equals(AsciiHash x, AsciiHash y) + { + var len = x.Length; + return (len == y.Length & x._hashUC == y._hashUC) + && (len <= MaxBytesHashed || SequenceEqualsCI(x.Span, y.Span)); + } + + public int GetHashCode(AsciiHash obj) => obj._hashUC.GetHashCode(); + } +} diff --git a/src/RESPite/Shared/AsciiHash.Instance.cs b/src/RESPite/Shared/AsciiHash.Instance.cs new file mode 100644 index 000000000..53db4ff27 --- /dev/null +++ b/src/RESPite/Shared/AsciiHash.Instance.cs @@ -0,0 +1,73 @@ +using System.Buffers.Binary; +using System.Diagnostics.CodeAnalysis; +using System.Text; + +namespace RESPite; + +public readonly partial struct AsciiHash : IEquatable +{ + // ReSharper disable InconsistentNaming + private readonly long _hashCS, _hashUC; + // ReSharper restore InconsistentNaming + private readonly int _index, _length; + private readonly byte[] _arr; + + public int Length => _length; + + /// + /// The optimal buffer length (with padding) to use for this value. + /// + public int BufferLength => (Length + 1 + 7) & ~7; // an extra byte, then round up to word-size + + public ReadOnlySpan Span => new(_arr ?? [], _index, _length); + public bool IsEmpty => Length == 0; + + public AsciiHash(ReadOnlySpan value) : this(value.ToArray(), 0, value.Length) { } + public AsciiHash(string? value) : this(value is null ? [] : Encoding.ASCII.GetBytes(value)) { } + + /// + public override int GetHashCode() => _hashCS.GetHashCode(); + + /// + public override string ToString() => _length == 0 ? "" : Encoding.ASCII.GetString(_arr, _index, _length); + + /// + public override bool Equals(object? other) => other is AsciiHash hash && Equals(hash); + + /// + public bool Equals(in AsciiHash other) + { + return (_length == other.Length & _hashCS == other._hashCS) + && (_length <= MaxBytesHashed || Span.SequenceEqual(other.Span)); + } + + bool IEquatable.Equals(AsciiHash other) => Equals(other); + + public AsciiHash(byte[] arr) : this(arr, 0, -1) { } + + public AsciiHash(byte[] arr, int index, int length) + { + _arr = arr ?? []; + _index = index; + _length = length < 0 ? (_arr.Length - index) : length; + + var span = new ReadOnlySpan(_arr, _index, _length); + Hash(span, out _hashCS, out _hashUC); + } + + public bool IsCS(ReadOnlySpan value) + { + var cs = HashCS(value); + var len = _length; + if (cs != _hashCS | value.Length != len) return false; + return len <= MaxBytesHashed || Span.SequenceEqual(value); + } + + public bool IsCI(ReadOnlySpan value) + { + var uc = HashUC(value); + var len = _length; + if (uc != _hashUC | value.Length != len) return false; + return len <= MaxBytesHashed || SequenceEqualsCI(Span, value); + } +} diff --git a/src/RESPite/Shared/AsciiHash.Public.cs b/src/RESPite/Shared/AsciiHash.Public.cs new file mode 100644 index 000000000..dd31cb415 --- /dev/null +++ b/src/RESPite/Shared/AsciiHash.Public.cs @@ -0,0 +1,10 @@ +namespace RESPite; + +// in the shared file, these are declared without accessibility modifiers +public sealed partial class AsciiHashAttribute +{ +} + +public readonly partial struct AsciiHash +{ +} diff --git a/src/RESPite/Shared/AsciiHash.cs b/src/RESPite/Shared/AsciiHash.cs new file mode 100644 index 000000000..37b3c5734 --- /dev/null +++ b/src/RESPite/Shared/AsciiHash.cs @@ -0,0 +1,294 @@ +using System; +using System.Buffers.Binary; +using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; + +namespace RESPite; + +#pragma warning disable SA1205 // deliberately omit accessibility - see AsciiHash.Public.cs + +/// +/// This type is intended to provide fast hashing functions for small ASCII strings, for example well-known +/// RESP literals that are usually identifiable by their length and initial bytes; it is not intended +/// for general purpose hashing, and the behavior is undefined for non-ASCII literals. +/// All matches must also perform a sequence equality check. +/// +/// See HastHashGenerator.md for more information and intended usage. +[AttributeUsage( + AttributeTargets.Class | AttributeTargets.Method | AttributeTargets.Field | AttributeTargets.Enum, + AllowMultiple = false, + Inherited = false)] +[Conditional("DEBUG")] // evaporate in release +[Experimental(Experiments.Respite, UrlFormat = Experiments.UrlFormat)] +sealed partial class AsciiHashAttribute(string token = "") : Attribute +{ + /// + /// The token expected when parsing data, if different from the implied value. The implied + /// value is the name, replacing underscores for hyphens, so: 'a_b' becomes 'a-b'. + /// + public string Token => token; + + /// + /// Indicates whether a parse operation is case-sensitive. Not used in other contexts. + /// + public bool CaseSensitive { get; set; } = true; +} + +// note: instance members are in AsciiHash.Instance.cs. +[Experimental(Experiments.Respite, UrlFormat = Experiments.UrlFormat)] +readonly partial struct AsciiHash +{ + /// + /// In-place ASCII upper-case conversion. + /// + public static void ToUpper(Span span) + { + foreach (ref var b in span) + { + if (b >= 'a' && b <= 'z') + b = (byte)(b & ~0x20); + } + } + + /// + /// In-place ASCII lower-case conversion. + /// + public static void ToLower(Span span) + { + foreach (ref var b in span) + { + if (b >= 'a' && b <= 'z') + b |= (byte)(b & ~0x20); + } + } + + internal const int MaxBytesHashed = sizeof(long); + + public static bool EqualsCS(ReadOnlySpan first, ReadOnlySpan second) + { + var len = first.Length; + if (len != second.Length) return false; + // for very short values, the CS hash performs CS equality + return len <= MaxBytesHashed ? HashCS(first) == HashCS(second) : first.SequenceEqual(second); + } + + public static bool SequenceEqualsCS(ReadOnlySpan first, ReadOnlySpan second) + => first.SequenceEqual(second); + + public static bool EqualsCI(ReadOnlySpan first, ReadOnlySpan second) + { + var len = first.Length; + if (len != second.Length) return false; + // for very short values, the UC hash performs CI equality + return len <= MaxBytesHashed ? HashUC(first) == HashUC(second) : SequenceEqualsCI(first, second); + } + + public static unsafe bool SequenceEqualsCI(ReadOnlySpan first, ReadOnlySpan second) + { + var len = first.Length; + if (len != second.Length) return false; + + // OK, don't be clever (SIMD, etc); the purpose of FashHash is to compare RESP key tokens, which are + // typically relatively short, think 3-20 bytes. That wouldn't even touch a SIMD vector, so: + // just loop (the exact thing we'd need to do *anyway* in a SIMD implementation, to mop up the non-SIMD + // trailing bytes). + fixed (byte* firstPtr = &MemoryMarshal.GetReference(first)) + { + fixed (byte* secondPtr = &MemoryMarshal.GetReference(second)) + { + const int CS_MASK = 0b0101_1111; + for (int i = 0; i < len; i++) + { + byte x = firstPtr[i]; + var xCI = x & CS_MASK; + if (xCI >= 'A' & xCI <= 'Z') + { + // alpha mismatch + if (xCI != (secondPtr[i] & CS_MASK)) return false; + } + else if (x != secondPtr[i]) + { + // non-alpha mismatch + return false; + } + } + + return true; + } + } + } + + public static bool EqualsCS(ReadOnlySpan first, ReadOnlySpan second) + { + var len = first.Length; + if (len != second.Length) return false; + // for very short values, the CS hash performs CS equality + return len <= MaxBytesHashed ? HashCS(first) == HashCS(second) : first.SequenceEqual(second); + } + + public static bool SequenceEqualsCS(ReadOnlySpan first, ReadOnlySpan second) + => first.SequenceEqual(second); + + public static bool EqualsCI(ReadOnlySpan first, ReadOnlySpan second) + { + var len = first.Length; + if (len != second.Length) return false; + // for very short values, the CS hash performs CS equality; check that first + return len <= MaxBytesHashed ? HashUC(first) == HashUC(second) : SequenceEqualsCI(first, second); + } + + public static unsafe bool SequenceEqualsCI(ReadOnlySpan first, ReadOnlySpan second) + { + var len = first.Length; + if (len != second.Length) return false; + + // OK, don't be clever (SIMD, etc); the purpose of FashHash is to compare RESP key tokens, which are + // typically relatively short, think 3-20 bytes. That wouldn't even touch a SIMD vector, so: + // just loop (the exact thing we'd need to do *anyway* in a SIMD implementation, to mop up the non-SIMD + // trailing bytes). + fixed (char* firstPtr = &MemoryMarshal.GetReference(first)) + { + fixed (char* secondPtr = &MemoryMarshal.GetReference(second)) + { + const int CS_MASK = 0b0101_1111; + for (int i = 0; i < len; i++) + { + int x = (byte)firstPtr[i]; + var xCI = x & CS_MASK; + if (xCI >= 'A' & xCI <= 'Z') + { + // alpha mismatch + if (xCI != (secondPtr[i] & CS_MASK)) return false; + } + else if (x != (byte)secondPtr[i]) + { + // non-alpha mismatch + return false; + } + } + + return true; + } + } + } + + public static void Hash(scoped ReadOnlySpan value, out long cs, out long uc) + { + cs = HashCS(value); + uc = ToUC(cs); + } + + public static void Hash(scoped ReadOnlySpan value, out long cs, out long uc) + { + cs = HashCS(value); + uc = ToUC(cs); + } + + public static long HashUC(scoped ReadOnlySpan value) => ToUC(HashCS(value)); + + public static long HashUC(scoped ReadOnlySpan value) => ToUC(HashCS(value)); + + internal static long ToUC(long hashCS) + { + const long LC_MASK = 0x2020_2020_2020_2020; + // check whether there are any possible lower-case letters; + // this would be anything with the 0x20 bit set + if ((hashCS & LC_MASK) == 0) return hashCS; + + // Something looks possibly lower-case; we can't just mask it off, + // because there are other non-alpha characters in that range. +#if NET + ToUpper(MemoryMarshal.CreateSpan(ref Unsafe.As(ref hashCS), sizeof(long))); + return hashCS; +#else + Span buffer = stackalloc byte[8]; + BinaryPrimitives.WriteInt64LittleEndian(buffer, hashCS); + ToUpper(buffer); + return BinaryPrimitives.ReadInt64LittleEndian(buffer); +#endif + } + + public static long HashCS(scoped ReadOnlySpan value) + { + // at least 8? we can blit + if ((value.Length >> 3) != 0) return BinaryPrimitives.ReadInt64LittleEndian(value); + + // small (<7); manual loop + // note: profiling with unsafe code to pick out elements: much slower + // note: profiling with overstamping a local: 3x slower + ulong tally = 0; + for (int i = 0; i < value.Length; i++) + { + tally |= ((ulong)value[i]) << (i << 3); + } + return (long)tally; + } + + public static long HashCS(scoped ReadOnlySpan value) + { + // note: BDN profiling with Vector64.Narrow showed no benefit + if ((value.Length >> 3) != 0) + { + // slice if necessary, so we can use bounds-elided foreach + if (value.Length != 8) value = value.Slice(0, 8); + } + ulong tally = 0; + for (int i = 0; i < value.Length; i++) + { + tally |= ((ulong)value[i]) << (i << 3); + } + return (long)tally; + } + + public static void HashCS(scoped ReadOnlySpan value, out long cs0, out long cs1) + { + cs0 = HashCS(value); + cs1 = value.Length > MaxBytesHashed ? HashCS(value.Slice(start: MaxBytesHashed)) : 0; + } + + public static void HashCS(scoped ReadOnlySpan value, out long cs0, out long cs1) + { + cs0 = HashCS(value); + cs1 = value.Length > MaxBytesHashed ? HashCS(value.Slice(start: MaxBytesHashed)) : 0; + } + + public static void HashUC(scoped ReadOnlySpan value, out long cs0, out long cs1) + { + cs0 = HashUC(value); + cs1 = value.Length > MaxBytesHashed ? HashUC(value.Slice(start: MaxBytesHashed)) : 0; + } + + public static void HashUC(scoped ReadOnlySpan value, out long cs0, out long cs1) + { + cs0 = HashUC(value); + cs1 = value.Length > MaxBytesHashed ? HashUC(value.Slice(start: MaxBytesHashed)) : 0; + } + + public static void Hash(scoped ReadOnlySpan value, out long cs0, out long uc0, out long cs1, out long uc1) + { + Hash(value, out cs0, out uc0); + if (value.Length > MaxBytesHashed) + { + Hash(value.Slice(start: MaxBytesHashed), out cs1, out uc1); + } + else + { + cs1 = uc1 = 0; + } + } + + public static void Hash(scoped ReadOnlySpan value, out long cs0, out long uc0, out long cs1, out long uc1) + { + Hash(value, out cs0, out uc0); + if (value.Length > MaxBytesHashed) + { + Hash(value.Slice(start: MaxBytesHashed), out cs1, out uc1); + } + else + { + cs1 = uc1 = 0; + } + } +} diff --git a/src/RESPite/Shared/Experiments.cs b/src/RESPite/Shared/Experiments.cs new file mode 100644 index 000000000..b4b9fcee1 --- /dev/null +++ b/src/RESPite/Shared/Experiments.cs @@ -0,0 +1,46 @@ +namespace RESPite +{ + // example usage: + // [Experimental(Experiments.SomeFeature, UrlFormat = Experiments.UrlFormat)] + // where SomeFeature has the next label, for example "SER042", and /docs/exp/SER042.md exists + internal static class Experiments + { + public const string UrlFormat = "https://stackexchange.github.io/StackExchange.Redis/exp/"; + + // ReSharper disable InconsistentNaming + public const string VectorSets = "SER001"; + public const string Server_8_4 = "SER002"; + public const string Server_8_6 = "SER003"; + public const string Respite = "SER004"; + public const string UnitTesting = "SER005"; + // ReSharper restore InconsistentNaming + } +} + +#if !NET8_0_OR_GREATER +#pragma warning disable SA1403 +namespace System.Diagnostics.CodeAnalysis +#pragma warning restore SA1403 +{ + [AttributeUsage( + AttributeTargets.Assembly | + AttributeTargets.Module | + AttributeTargets.Class | + AttributeTargets.Struct | + AttributeTargets.Enum | + AttributeTargets.Constructor | + AttributeTargets.Method | + AttributeTargets.Property | + AttributeTargets.Field | + AttributeTargets.Event | + AttributeTargets.Interface | + AttributeTargets.Delegate, + Inherited = false)] + internal sealed class ExperimentalAttribute(string diagnosticId) : Attribute + { + public string DiagnosticId { get; } = diagnosticId; + public string? UrlFormat { get; set; } + public string? Message { get; set; } + } +} +#endif diff --git a/src/RESPite/Shared/FrameworkShims.Encoding.cs b/src/RESPite/Shared/FrameworkShims.Encoding.cs new file mode 100644 index 000000000..2f2c2e89d --- /dev/null +++ b/src/RESPite/Shared/FrameworkShims.Encoding.cs @@ -0,0 +1,50 @@ +#if !NET +// ReSharper disable once CheckNamespace +namespace System.Text +{ + internal static class EncodingExtensions + { + public static unsafe int GetBytes(this Encoding encoding, ReadOnlySpan source, Span destination) + { + if (source.IsEmpty) return 0; + fixed (byte* bPtr = destination) + { + fixed (char* cPtr = source) + { + return encoding.GetBytes(cPtr, source.Length, bPtr, destination.Length); + } + } + } + + public static unsafe int GetChars(this Encoding encoding, ReadOnlySpan source, Span destination) + { + if (source.IsEmpty) return 0; + fixed (byte* bPtr = source) + { + fixed (char* cPtr = destination) + { + return encoding.GetChars(bPtr, source.Length, cPtr, destination.Length); + } + } + } + + public static unsafe int GetCharCount(this Encoding encoding, ReadOnlySpan source) + { + if (source.IsEmpty) return 0; + fixed (byte* bPtr = source) + { + return encoding.GetCharCount(bPtr, source.Length); + } + } + + public static unsafe string GetString(this Encoding encoding, ReadOnlySpan source) + { + if (source.IsEmpty) return ""; + fixed (byte* bPtr = source) + { + return encoding.GetString(bPtr, source.Length); + } + } + } +} +#endif diff --git a/src/RESPite/Shared/FrameworkShims.Stream.cs b/src/RESPite/Shared/FrameworkShims.Stream.cs new file mode 100644 index 000000000..3a42e5990 --- /dev/null +++ b/src/RESPite/Shared/FrameworkShims.Stream.cs @@ -0,0 +1,107 @@ +using System.Buffers; +using System.Runtime.InteropServices; + +#if !NET +// ReSharper disable once CheckNamespace +namespace System.IO +{ + internal static class StreamExtensions + { + public static void Write(this Stream stream, ReadOnlyMemory value) + { + if (MemoryMarshal.TryGetArray(value, out var segment)) + { + stream.Write(segment.Array!, segment.Offset, segment.Count); + } + else + { + var leased = ArrayPool.Shared.Rent(value.Length); + value.CopyTo(leased); + stream.Write(leased, 0, value.Length); + ArrayPool.Shared.Return(leased); // on success only + } + } + + public static int Read(this Stream stream, Memory value) + { + if (MemoryMarshal.TryGetArray(value, out var segment)) + { + return stream.Read(segment.Array!, segment.Offset, segment.Count); + } + else + { + var leased = ArrayPool.Shared.Rent(value.Length); + int bytes = stream.Read(leased, 0, value.Length); + if (bytes > 0) + { + leased.AsSpan(0, bytes).CopyTo(value.Span); + } + ArrayPool.Shared.Return(leased); // on success only + return bytes; + } + } + + public static ValueTask ReadAsync(this Stream stream, Memory value, CancellationToken cancellationToken) + { + if (MemoryMarshal.TryGetArray(value, out var segment)) + { + return new(stream.ReadAsync(segment.Array!, segment.Offset, segment.Count, cancellationToken)); + } + else + { + var leased = ArrayPool.Shared.Rent(value.Length); + var pending = stream.ReadAsync(leased, 0, value.Length, cancellationToken); + if (!pending.IsCompleted) + { + return Awaited(pending, value, leased); + } + + var bytes = pending.GetAwaiter().GetResult(); + if (bytes > 0) + { + leased.AsSpan(0, bytes).CopyTo(value.Span); + } + ArrayPool.Shared.Return(leased); // on success only + return new(bytes); + + static async ValueTask Awaited(Task pending, Memory value, byte[] leased) + { + var bytes = await pending.ConfigureAwait(false); + if (bytes > 0) + { + leased.AsSpan(0, bytes).CopyTo(value.Span); + } + ArrayPool.Shared.Return(leased); // on success only + return bytes; + } + } + } + + public static ValueTask WriteAsync(this Stream stream, ReadOnlyMemory value, CancellationToken cancellationToken) + { + if (MemoryMarshal.TryGetArray(value, out var segment)) + { + return new(stream.WriteAsync(segment.Array!, segment.Offset, segment.Count, cancellationToken)); + } + else + { + var leased = ArrayPool.Shared.Rent(value.Length); + value.CopyTo(leased); + var pending = stream.WriteAsync(leased, 0, value.Length, cancellationToken); + if (!pending.IsCompleted) + { + return Awaited(pending, leased); + } + pending.GetAwaiter().GetResult(); + ArrayPool.Shared.Return(leased); // on success only + return default; + } + static async ValueTask Awaited(Task pending, byte[] leased) + { + await pending.ConfigureAwait(false); + ArrayPool.Shared.Return(leased); // on success only + } + } + } +} +#endif diff --git a/src/RESPite/Shared/FrameworkShims.cs b/src/RESPite/Shared/FrameworkShims.cs new file mode 100644 index 000000000..ceb344b9e --- /dev/null +++ b/src/RESPite/Shared/FrameworkShims.cs @@ -0,0 +1,15 @@ +#pragma warning disable SA1403 // single namespace + +#if !NET10_0_OR_GREATER +namespace System.Runtime.CompilerServices +{ + // see https://learn.microsoft.com/dotnet/api/system.runtime.compilerservices.overloadresolutionpriorityattribute + [AttributeUsage(AttributeTargets.Constructor | AttributeTargets.Method | AttributeTargets.Property, Inherited = false)] + internal sealed class OverloadResolutionPriorityAttribute(int priority) : Attribute + { + public int Priority => priority; + } +} +#endif + +#pragma warning restore SA1403 diff --git a/src/RESPite/Shared/NullableHacks.cs b/src/RESPite/Shared/NullableHacks.cs new file mode 100644 index 000000000..704437442 --- /dev/null +++ b/src/RESPite/Shared/NullableHacks.cs @@ -0,0 +1,146 @@ +// https://github.com/dotnet/runtime/blob/527f9ae88a0ee216b44d556f9bdc84037fe0ebda/src/libraries/System.Private.CoreLib/src/System/Diagnostics/CodeAnalysis/NullableAttributes.cs + +#pragma warning disable +#define INTERNAL_NULLABLE_ATTRIBUTES + +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +namespace System.Diagnostics.CodeAnalysis +{ +#if !NET + /// Specifies that null is allowed as an input even if the corresponding type disallows it. + [AttributeUsage(AttributeTargets.Field | AttributeTargets.Parameter | AttributeTargets.Property, Inherited = false)] + internal sealed class AllowNullAttribute : Attribute { } + + /// Specifies that null is disallowed as an input even if the corresponding type allows it. + [AttributeUsage(AttributeTargets.Field | AttributeTargets.Parameter | AttributeTargets.Property, Inherited = false)] + internal sealed class DisallowNullAttribute : Attribute { } + + /// Specifies that an output may be null even if the corresponding type disallows it. + [AttributeUsage(AttributeTargets.Field | AttributeTargets.Parameter | AttributeTargets.Property | AttributeTargets.ReturnValue, Inherited = false)] + internal sealed class MaybeNullAttribute : Attribute { } + + /// Specifies that an output will not be null even if the corresponding type allows it. Specifies that an input argument was not null when the call returns. + [AttributeUsage(AttributeTargets.Field | AttributeTargets.Parameter | AttributeTargets.Property | AttributeTargets.ReturnValue, Inherited = false)] + internal sealed class NotNullAttribute : Attribute { } + + /// Specifies that when a method returns , the parameter may be null even if the corresponding type disallows it. + [AttributeUsage(AttributeTargets.Parameter, Inherited = false)] + internal sealed class MaybeNullWhenAttribute : Attribute + { + /// Initializes the attribute with the specified return value condition. + /// + /// The return value condition. If the method returns this value, the associated parameter may be null. + /// + public MaybeNullWhenAttribute(bool returnValue) => ReturnValue = returnValue; + + /// Gets the return value condition. + public bool ReturnValue { get; } + } + + /// Specifies that when a method returns , the parameter will not be null even if the corresponding type allows it. + [AttributeUsage(AttributeTargets.Parameter, Inherited = false)] + internal sealed class NotNullWhenAttribute : Attribute + { + /// Initializes the attribute with the specified return value condition. + /// + /// The return value condition. If the method returns this value, the associated parameter will not be null. + /// + public NotNullWhenAttribute(bool returnValue) => ReturnValue = returnValue; + + /// Gets the return value condition. + public bool ReturnValue { get; } + } + + /// Specifies that the output will be non-null if the named parameter is non-null. + [AttributeUsage(AttributeTargets.Parameter | AttributeTargets.Property | AttributeTargets.ReturnValue, AllowMultiple = true, Inherited = false)] + internal sealed class NotNullIfNotNullAttribute : Attribute + { + /// Initializes the attribute with the associated parameter name. + /// + /// The associated parameter name. The output will be non-null if the argument to the parameter specified is non-null. + /// + public NotNullIfNotNullAttribute(string parameterName) => ParameterName = parameterName; + + /// Gets the associated parameter name. + public string ParameterName { get; } + } + + /// Applied to a method that will never return under any circumstance. + [AttributeUsage(AttributeTargets.Method, Inherited = false)] + internal sealed class DoesNotReturnAttribute : Attribute { } + + /// Specifies that the method will not return if the associated Boolean parameter is passed the specified value. + [AttributeUsage(AttributeTargets.Parameter, Inherited = false)] + internal sealed class DoesNotReturnIfAttribute : Attribute + { + /// Initializes the attribute with the specified parameter value. + /// + /// The condition parameter value. Code after the method will be considered unreachable by diagnostics if the argument to + /// the associated parameter matches this value. + /// + public DoesNotReturnIfAttribute(bool parameterValue) => ParameterValue = parameterValue; + + /// Gets the condition parameter value. + public bool ParameterValue { get; } + } + + /// Specifies that the method or property will ensure that the listed field and property members have not-null values. + [AttributeUsage(AttributeTargets.Method | AttributeTargets.Property, Inherited = false, AllowMultiple = true)] + internal sealed class MemberNotNullAttribute : Attribute + { + /// Initializes the attribute with a field or property member. + /// + /// The field or property member that is promised to be not-null. + /// + public MemberNotNullAttribute(string member) => Members = new[] { member }; + + /// Initializes the attribute with the list of field and property members. + /// + /// The list of field and property members that are promised to be not-null. + /// + public MemberNotNullAttribute(params string[] members) => Members = members; + + /// Gets field or property member names. + public string[] Members { get; } + } + + /// Specifies that the method or property will ensure that the listed field and property members have not-null values when returning with the specified return value condition. + [AttributeUsage(AttributeTargets.Method | AttributeTargets.Property, Inherited = false, AllowMultiple = true)] + internal sealed class MemberNotNullWhenAttribute : Attribute + { + /// Initializes the attribute with the specified return value condition and a field or property member. + /// + /// The return value condition. If the method returns this value, the associated parameter will not be null. + /// + /// + /// The field or property member that is promised to be not-null. + /// + public MemberNotNullWhenAttribute(bool returnValue, string member) + { + ReturnValue = returnValue; + Members = new[] { member }; + } + + /// Initializes the attribute with the specified return value condition and list of field and property members. + /// + /// The return value condition. If the method returns this value, the associated parameter will not be null. + /// + /// + /// The list of field and property members that are promised to be not-null. + /// + public MemberNotNullWhenAttribute(bool returnValue, params string[] members) + { + ReturnValue = returnValue; + Members = members; + } + + /// Gets the return value condition. + public bool ReturnValue { get; } + + /// Gets field or property member names. + public string[] Members { get; } + } +#endif +} diff --git a/src/RESPite/readme.md b/src/RESPite/readme.md new file mode 100644 index 000000000..034cae8d3 --- /dev/null +++ b/src/RESPite/readme.md @@ -0,0 +1,6 @@ +# RESPite + +RESPite is a high-performance low-level RESP (Redis, etc) library, used as the IO core for +StackExchange.Redis v3+. It is also available for direct use from other places! + +For now: you probably shouldn't be using this. \ No newline at end of file diff --git a/src/StackExchange.Redis/APITypes/ClientKillFilter.cs b/src/StackExchange.Redis/APITypes/ClientKillFilter.cs new file mode 100644 index 000000000..3d1883549 --- /dev/null +++ b/src/StackExchange.Redis/APITypes/ClientKillFilter.cs @@ -0,0 +1,179 @@ +using System; +using System.Collections.Generic; +using System.Net; + +namespace StackExchange.Redis; + +/// +/// Filter determining which Redis clients to kill. +/// +/// +public class ClientKillFilter +{ + /// + /// Filter arguments builder for `CLIENT KILL`. + /// + public ClientKillFilter() { } + + /// + /// The ID of the client to kill. + /// + public long? Id { get; private set; } + + /// + /// The type of client. + /// + public ClientType? ClientType { get; private set; } + + /// + /// The authenticated ACL username. + /// + public string? Username { get; private set; } + + /// + /// The endpoint to kill. + /// + public EndPoint? Endpoint { get; private set; } + + /// + /// The server endpoint to kill. + /// + public EndPoint? ServerEndpoint { get; private set; } + + /// + /// Whether to skip the current connection. + /// + public bool? SkipMe { get; private set; } + + /// + /// Age of connection in seconds. + /// + public long? MaxAgeInSeconds { get; private set; } + + /// + /// Sets client id filter. + /// + /// Id of the client to kill. + public ClientKillFilter WithId(long? id) + { + Id = id; + return this; + } + + /// + /// Sets client type filter. + /// + /// The type of the client. + public ClientKillFilter WithClientType(ClientType? clientType) + { + ClientType = clientType; + return this; + } + + /// + /// Sets the username filter. + /// + /// Authenticated ACL username. + public ClientKillFilter WithUsername(string? username) + { + Username = username; + return this; + } + + /// + /// Set the endpoint filter. + /// + /// The endpoint to kill. + public ClientKillFilter WithEndpoint(EndPoint? endpoint) + { + Endpoint = endpoint; + return this; + } + + /// + /// Set the server endpoint filter. + /// + /// The server endpoint to kill. + public ClientKillFilter WithServerEndpoint(EndPoint? serverEndpoint) + { + ServerEndpoint = serverEndpoint; + return this; + } + + /// + /// Set the skipMe filter (whether to skip the current connection). + /// + /// Whether to skip the current connection. + public ClientKillFilter WithSkipMe(bool? skipMe) + { + SkipMe = skipMe; + return this; + } + + /// + /// Set the MaxAgeInSeconds filter. + /// + /// Age of connection in seconds. + public ClientKillFilter WithMaxAgeInSeconds(long? maxAgeInSeconds) + { + MaxAgeInSeconds = maxAgeInSeconds; + return this; + } + + internal List ToList(bool withReplicaCommands) + { + var parts = new List(15) + { + RedisLiterals.KILL, + }; + if (Id != null) + { + parts.Add(RedisLiterals.ID); + parts.Add(Id.Value); + } + if (ClientType != null) + { + parts.Add(RedisLiterals.TYPE); + switch (ClientType.Value) + { + case Redis.ClientType.Normal: + parts.Add(RedisLiterals.normal); + break; + case Redis.ClientType.Replica: + parts.Add(withReplicaCommands ? RedisLiterals.replica : RedisLiterals.slave); + break; + case Redis.ClientType.PubSub: + parts.Add(RedisLiterals.pubsub); + break; + default: + throw new ArgumentOutOfRangeException(nameof(ClientType)); + } + } + if (Username != null) + { + parts.Add(RedisLiterals.USERNAME); + parts.Add(Username); + } + if (Endpoint != null) + { + parts.Add(RedisLiterals.ADDR); + parts.Add((RedisValue)Format.ToString(Endpoint)); + } + if (ServerEndpoint != null) + { + parts.Add(RedisLiterals.LADDR); + parts.Add((RedisValue)Format.ToString(ServerEndpoint)); + } + if (SkipMe != null) + { + parts.Add(RedisLiterals.SKIPME); + parts.Add(SkipMe.Value ? RedisLiterals.yes : RedisLiterals.no); + } + if (MaxAgeInSeconds != null) + { + parts.Add(RedisLiterals.MAXAGE); + parts.Add(MaxAgeInSeconds); + } + return parts; + } +} diff --git a/src/StackExchange.Redis/APITypes/GeoEntry.cs b/src/StackExchange.Redis/APITypes/GeoEntry.cs new file mode 100644 index 000000000..b9ecb8d5b --- /dev/null +++ b/src/StackExchange.Redis/APITypes/GeoEntry.cs @@ -0,0 +1,76 @@ +using System; + +namespace StackExchange.Redis; + +/// +/// Describes a GeoEntry element with the corresponding value. +/// GeoEntries are stored in redis as SortedSetEntries. +/// +public readonly struct GeoEntry : IEquatable +{ + /// + /// The name of the GeoEntry. + /// + public RedisValue Member { get; } + + /// + /// Describes the longitude and latitude of a GeoEntry. + /// + public GeoPosition Position { get; } + + /// + /// Initializes a GeoEntry value. + /// + /// The longitude position to use. + /// The latitude position to use. + /// The value to store for this position. + public GeoEntry(double longitude, double latitude, RedisValue member) + { + Member = member; + Position = new GeoPosition(longitude, latitude); + } + + /// + /// The longitude of the GeoEntry. + /// + public double Longitude => Position.Longitude; + + /// + /// The latitude of the GeoEntry. + /// + public double Latitude => Position.Latitude; + + /// + /// A "({Longitude},{Latitude})={Member}" string representation of this entry. + /// + public override string ToString() => $"({Longitude},{Latitude})={Member}"; + + /// + public override int GetHashCode() => Position.GetHashCode() ^ Member.GetHashCode(); + + /// + /// Compares two values for equality. + /// + /// The to compare to. + public override bool Equals(object? obj) => obj is GeoEntry geObj && Equals(geObj); + + /// + /// Compares two values for equality. + /// + /// The to compare to. + public bool Equals(GeoEntry other) => this == other; + + /// + /// Compares two values for equality. + /// + /// The first entry to compare. + /// The second entry to compare. + public static bool operator ==(GeoEntry x, GeoEntry y) => x.Position == y.Position && x.Member == y.Member; + + /// + /// Compares two values for non-equality. + /// + /// The first entry to compare. + /// The second entry to compare. + public static bool operator !=(GeoEntry x, GeoEntry y) => x.Position != y.Position || x.Member != y.Member; +} diff --git a/src/StackExchange.Redis/APITypes/GeoPosition.cs b/src/StackExchange.Redis/APITypes/GeoPosition.cs new file mode 100644 index 000000000..6e53b3e32 --- /dev/null +++ b/src/StackExchange.Redis/APITypes/GeoPosition.cs @@ -0,0 +1,77 @@ +using System; + +namespace StackExchange.Redis; + +/// +/// Describes the longitude and latitude of a GeoEntry. +/// +public readonly struct GeoPosition : IEquatable +{ + internal static string GetRedisUnit(GeoUnit unit) => unit switch + { + GeoUnit.Meters => "m", + GeoUnit.Kilometers => "km", + GeoUnit.Miles => "mi", + GeoUnit.Feet => "ft", + _ => throw new ArgumentOutOfRangeException(nameof(unit)), + }; + + /// + /// The Latitude of the GeoPosition. + /// + public double Latitude { get; } + + /// + /// The Longitude of the GeoPosition. + /// + public double Longitude { get; } + + /// + /// Creates a new GeoPosition. + /// + public GeoPosition(double longitude, double latitude) + { + Longitude = longitude; + Latitude = latitude; + } + + /// + /// A "{long} {lat}" string representation of this position. + /// + public override string ToString() => string.Format("{0} {1}", Longitude, Latitude); + + /// + /// See . + /// Diagonals not an issue in the case of lat/long. + /// + /// + /// Diagonals are not an issue in the case of lat/long. + /// + public override int GetHashCode() => Longitude.GetHashCode() ^ Latitude.GetHashCode(); + + /// + /// Compares two values for equality. + /// + /// The to compare to. + public override bool Equals(object? obj) => obj is GeoPosition gpObj && Equals(gpObj); + + /// + /// Compares two values for equality. + /// + /// The to compare to. + public bool Equals(GeoPosition other) => this == other; + + /// + /// Compares two values for equality. + /// + /// The first position to compare. + /// The second position to compare. + public static bool operator ==(GeoPosition x, GeoPosition y) => x.Longitude == y.Longitude && x.Latitude == y.Latitude; + + /// + /// Compares two values for non-equality. + /// + /// The first position to compare. + /// The second position to compare. + public static bool operator !=(GeoPosition x, GeoPosition y) => x.Longitude != y.Longitude || x.Latitude != y.Latitude; +} diff --git a/src/StackExchange.Redis/APITypes/GeoRadiusOptions.cs b/src/StackExchange.Redis/APITypes/GeoRadiusOptions.cs new file mode 100644 index 000000000..d21254fcd --- /dev/null +++ b/src/StackExchange.Redis/APITypes/GeoRadiusOptions.cs @@ -0,0 +1,55 @@ +using System; +using System.Collections.Generic; + +namespace StackExchange.Redis; + +/// +/// GeoRadius command options. +/// +[Flags] +public enum GeoRadiusOptions +{ + /// + /// No Options. + /// + None = 0, + + /// + /// Redis will return the coordinates of any results. + /// + WithCoordinates = 1, + + /// + /// Redis will return the distance from center for all results. + /// + WithDistance = 2, + + /// + /// Redis will return the geo hash value as an integer. (This is the score in the sorted set). + /// + WithGeoHash = 4, + + /// + /// Populates the commonly used values from the entry (the integer hash is not returned as it is not commonly useful). + /// + Default = WithCoordinates | WithDistance, +} + +internal static class GeoRadiusOptionsExtensions +{ + internal static void AddArgs(this GeoRadiusOptions options, List values) + { + if ((options & GeoRadiusOptions.WithCoordinates) != 0) + { + values.Add(RedisLiterals.WITHCOORD); + } + if ((options & GeoRadiusOptions.WithDistance) != 0) + { + values.Add(RedisLiterals.WITHDIST); + } + if ((options & GeoRadiusOptions.WithGeoHash) != 0) + { + values.Add(RedisLiterals.WITHHASH); + } + } +} diff --git a/src/StackExchange.Redis/APITypes/GeoRadiusResult.cs b/src/StackExchange.Redis/APITypes/GeoRadiusResult.cs new file mode 100644 index 000000000..952ca1625 --- /dev/null +++ b/src/StackExchange.Redis/APITypes/GeoRadiusResult.cs @@ -0,0 +1,48 @@ +namespace StackExchange.Redis; + +/// +/// The result of a GeoRadius command. +/// +public readonly struct GeoRadiusResult +{ + /// + /// Indicate the member being represented. + /// + public override string ToString() => Member.ToString(); + + /// + /// The matched member. + /// + public RedisValue Member { get; } + + /// + /// The distance of the matched member from the center of the geo radius command. + /// + public double? Distance { get; } + + /// + /// The hash value of the matched member as an integer. (The key in the sorted set). + /// + /// Note that this is not the same as the hash returned from GeoHash. + public long? Hash { get; } + + /// + /// The coordinates of the matched member. + /// + public GeoPosition? Position { get; } + + /// + /// Returns a new GeoRadiusResult. + /// + /// The value from the result. + /// The distance from the result. + /// The hash of the result. + /// The GeoPosition of the result. + public GeoRadiusResult(in RedisValue member, double? distance, long? hash, GeoPosition? position) + { + Member = member; + Distance = distance; + Hash = hash; + Position = position; + } +} diff --git a/src/StackExchange.Redis/APITypes/GeoSearchShape.cs b/src/StackExchange.Redis/APITypes/GeoSearchShape.cs new file mode 100644 index 000000000..f2879ccc1 --- /dev/null +++ b/src/StackExchange.Redis/APITypes/GeoSearchShape.cs @@ -0,0 +1,92 @@ +using System.Collections.Generic; + +namespace StackExchange.Redis; + +/// +/// A Shape that you can use for a GeoSearch. +/// +public abstract class GeoSearchShape +{ + /// + /// The unit to use for creating the shape. + /// + protected GeoUnit Unit { get; } + + /// + /// The number of shape arguments. + /// + internal abstract int ArgCount { get; } + + /// + /// constructs a . + /// + /// The geography unit to use. + public GeoSearchShape(GeoUnit unit) + { + Unit = unit; + } + + internal abstract void AddArgs(List args); +} + +/// +/// A circle drawn on a map bounding. +/// +public class GeoSearchCircle : GeoSearchShape +{ + private readonly double _radius; + + /// + /// Creates a Shape. + /// + /// The radius of the circle. + /// The distance unit the circle will use, defaults to Meters. + public GeoSearchCircle(double radius, GeoUnit unit = GeoUnit.Meters) : base(unit) + { + _radius = radius; + } + + internal sealed override int ArgCount => 3; + + /// + /// Gets the s for this shape. + /// + internal sealed override void AddArgs(List args) + { + args.Add(RedisLiterals.BYRADIUS); + args.Add(_radius); + args.Add(Unit.ToLiteral()); + } +} + +/// +/// A box drawn on a map. +/// +public class GeoSearchBox : GeoSearchShape +{ + private readonly double _height; + + private readonly double _width; + + /// + /// Initializes a GeoBox. + /// + /// The height of the box. + /// The width of the box. + /// The distance unit the box will use, defaults to Meters. + public GeoSearchBox(double height, double width, GeoUnit unit = GeoUnit.Meters) : base(unit) + { + _height = height; + _width = width; + } + + internal sealed override int ArgCount => 4; + + internal sealed override void AddArgs(List args) + { + args.Add(RedisLiterals.BYBOX); + args.Add(_width); + args.Add(_height); + args.Add(Unit.ToLiteral()); + } +} diff --git a/src/StackExchange.Redis/APITypes/HashEntry.cs b/src/StackExchange.Redis/APITypes/HashEntry.cs new file mode 100644 index 000000000..c985aeb68 --- /dev/null +++ b/src/StackExchange.Redis/APITypes/HashEntry.cs @@ -0,0 +1,89 @@ +using System; +using System.Collections.Generic; +using System.ComponentModel; + +namespace StackExchange.Redis; + +/// +/// Describes a hash-field (a name/value pair). +/// +public readonly struct HashEntry : IEquatable +{ + internal readonly RedisValue name, value; + + /// + /// Initializes a value. + /// + /// The name for this hash entry. + /// The value for this hash entry. + public HashEntry(RedisValue name, RedisValue value) + { + this.name = name; + this.value = value; + } + + /// + /// The name of the hash field. + /// + public RedisValue Name => name; + + /// + /// The value of the hash field. + /// + public RedisValue Value => value; + + /// + /// The name of the hash field. + /// + [Browsable(false)] + [EditorBrowsable(EditorBrowsableState.Never), Obsolete("Please use Name", false)] + public RedisValue Key => name; + + /// + /// Converts to a key/value pair. + /// + /// The to create a from. + public static implicit operator KeyValuePair(HashEntry value) => + new KeyValuePair(value.name, value.value); + + /// + /// Converts from a key/value pair. + /// + /// The to get a from. + public static implicit operator HashEntry(KeyValuePair value) => + new HashEntry(value.Key, value.Value); + + /// + /// A "{name}: {value}" string representation of this entry. + /// + public override string ToString() => name + ": " + value; + + /// + public override int GetHashCode() => name.GetHashCode() ^ value.GetHashCode(); + + /// + /// Compares two values for equality. + /// + /// The to compare to. + public override bool Equals(object? obj) => obj is HashEntry heObj && Equals(heObj); + + /// + /// Compares two values for equality. + /// + /// The to compare to. + public bool Equals(HashEntry other) => name == other.name && value == other.value; + + /// + /// Compares two values for equality. + /// + /// The first to compare. + /// The second to compare. + public static bool operator ==(HashEntry x, HashEntry y) => x.name == y.name && x.value == y.value; + + /// + /// Compares two values for non-equality. + /// + /// The first to compare. + /// The second to compare. + public static bool operator !=(HashEntry x, HashEntry y) => x.name != y.name || x.value != y.value; +} diff --git a/src/StackExchange.Redis/APITypes/LCSMatchResult.cs b/src/StackExchange.Redis/APITypes/LCSMatchResult.cs new file mode 100644 index 000000000..3aca6357b --- /dev/null +++ b/src/StackExchange.Redis/APITypes/LCSMatchResult.cs @@ -0,0 +1,177 @@ +using System; +using System.ComponentModel; + +// ReSharper disable once CheckNamespace +namespace StackExchange.Redis; + +/// +/// The result of a LongestCommonSubsequence command with IDX feature. +/// Returns a list of the positions of each sub-match. +/// +// ReSharper disable once InconsistentNaming +public readonly struct LCSMatchResult +{ + internal static LCSMatchResult Null { get; } = new LCSMatchResult(Array.Empty(), 0); + + /// + /// Whether this match result contains any matches. + /// + public bool IsEmpty => LongestMatchLength == 0 && (Matches is null || Matches.Length == 0); + + /// + /// The matched positions of all the sub-matched strings. + /// + public LCSMatch[] Matches { get; } + + /// + /// The length of the longest match. + /// + public long LongestMatchLength { get; } + + /// + /// Returns a new . + /// + /// The matched positions in each string. + /// The length of the match. + internal LCSMatchResult(LCSMatch[] matches, long matchLength) + { + Matches = matches; + LongestMatchLength = matchLength; + } + + /// + /// Represents a position range in a string. + /// + // ReSharper disable once InconsistentNaming + public readonly struct LCSPosition : IEquatable + { + /// + /// The start index of the position. + /// + public long Start { get; } + + /// + /// The end index of the position. + /// + public long End { get; } + + /// + /// Returns a new Position. + /// + /// The start index. + /// The end index. + public LCSPosition(long start, long end) + { + Start = start; + End = end; + } + + /// + public override string ToString() => $"[{Start}..{End}]"; + + /// + public override int GetHashCode() + { + unchecked + { + return ((int)Start * 31) + (int)End; + } + } + + /// + public override bool Equals(object? obj) => obj is LCSPosition other && Equals(in other); + + /// + /// Compares this position to another for equality. + /// + [CLSCompliant(false)] + public bool Equals(in LCSPosition other) => Start == other.Start && End == other.End; + + /// + /// Compares this position to another for equality. + /// + bool IEquatable.Equals(LCSPosition other) => Equals(in other); + } + + /// + /// Represents a sub-match of the longest match. i.e first indexes the matched substring in each string. + /// + // ReSharper disable once InconsistentNaming + public readonly struct LCSMatch : IEquatable + { + private readonly LCSPosition _first; + private readonly LCSPosition _second; + + /// + /// The position of the matched substring in the first string. + /// + public LCSPosition First => _first; + + /// + /// The position of the matched substring in the second string. + /// + public LCSPosition Second => _second; + + /// + /// The first index of the matched substring in the first string. + /// + [EditorBrowsable(EditorBrowsableState.Never)] + [Browsable(false)] + public long FirstStringIndex => _first.Start; + + /// + /// The first index of the matched substring in the second string. + /// + [EditorBrowsable(EditorBrowsableState.Never)] + [Browsable(false)] + public long SecondStringIndex => _second.Start; + + /// + /// The length of the match. + /// + public long Length { get; } + + /// + /// Returns a new Match. + /// + /// The position of the matched substring in the first string. + /// The position of the matched substring in the second string. + /// The length of the match. + internal LCSMatch(in LCSPosition first, in LCSPosition second, long length) + { + _first = first; + _second = second; + Length = length; + } + + /// + public override string ToString() => $"First: {_first}, Second: {_second}, Length: {Length}"; + + /// + public override int GetHashCode() + { + unchecked + { + int hash = 17; + hash = (hash * 31) + _first.GetHashCode(); + hash = (hash * 31) + _second.GetHashCode(); + hash = (hash * 31) + Length.GetHashCode(); + return hash; + } + } + + /// + public override bool Equals(object? obj) => obj is LCSMatch other && Equals(in other); + + /// + /// Compares this match to another for equality. + /// + [CLSCompliant(false)] + public bool Equals(in LCSMatch other) => _first.Equals(in other._first) && _second.Equals(in other._second) && Length == other.Length; + + /// + /// Compares this match to another for equality. + /// + bool IEquatable.Equals(LCSMatch other) => Equals(in other); + } +} diff --git a/src/StackExchange.Redis/APITypes/LatencyHistoryEntry.cs b/src/StackExchange.Redis/APITypes/LatencyHistoryEntry.cs new file mode 100644 index 000000000..003708e6a --- /dev/null +++ b/src/StackExchange.Redis/APITypes/LatencyHistoryEntry.cs @@ -0,0 +1,47 @@ +using System; + +namespace StackExchange.Redis; + +/// +/// A latency entry as reported by the built-in LATENCY HISTORY command. +/// +public readonly struct LatencyHistoryEntry +{ + internal static readonly ResultProcessor ToArray = new Processor(); + + private sealed class Processor : ArrayResultProcessor + { + protected override bool TryParse(in RawResult raw, out LatencyHistoryEntry parsed) + { + if (raw.Resp2TypeArray == ResultType.Array) + { + var items = raw.GetItems(); + if (items.Length >= 2 + && items[0].TryGetInt64(out var timestamp) + && items[1].TryGetInt64(out var duration)) + { + parsed = new LatencyHistoryEntry(timestamp, duration); + return true; + } + } + parsed = default; + return false; + } + } + + /// + /// The time at which this entry was recorded. + /// + public DateTime Timestamp { get; } + + /// + /// The latency recorded for this event. + /// + public int DurationMilliseconds { get; } + + internal LatencyHistoryEntry(long timestamp, long duration) + { + Timestamp = RedisBase.UnixEpoch.AddSeconds(timestamp); + DurationMilliseconds = checked((int)duration); + } +} diff --git a/src/StackExchange.Redis/APITypes/LatencyLatestEntry.cs b/src/StackExchange.Redis/APITypes/LatencyLatestEntry.cs new file mode 100644 index 000000000..d1bc70e42 --- /dev/null +++ b/src/StackExchange.Redis/APITypes/LatencyLatestEntry.cs @@ -0,0 +1,60 @@ +using System; + +namespace StackExchange.Redis; + +/// +/// A latency entry as reported by the built-in LATENCY LATEST command. +/// +public readonly struct LatencyLatestEntry +{ + internal static readonly ResultProcessor ToArray = new Processor(); + + private sealed class Processor : ArrayResultProcessor + { + protected override bool TryParse(in RawResult raw, out LatencyLatestEntry parsed) + { + if (raw.Resp2TypeArray == ResultType.Array) + { + var items = raw.GetItems(); + if (items.Length >= 4 + && items[1].TryGetInt64(out var timestamp) + && items[2].TryGetInt64(out var duration) + && items[3].TryGetInt64(out var maxDuration)) + { + parsed = new LatencyLatestEntry(items[0].GetString()!, timestamp, duration, maxDuration); + return true; + } + } + parsed = default; + return false; + } + } + + /// + /// The name of this event. + /// + public string EventName { get; } + + /// + /// The time at which this entry was recorded. + /// + public DateTime Timestamp { get; } + + /// + /// The latency recorded for this event. + /// + public int DurationMilliseconds { get; } + + /// + /// The max latency recorded for all events. + /// + public int MaxDurationMilliseconds { get; } + + internal LatencyLatestEntry(string eventName, long timestamp, long duration, long maxDuration) + { + EventName = eventName; + Timestamp = RedisBase.UnixEpoch.AddSeconds(timestamp); + DurationMilliseconds = checked((int)duration); + MaxDurationMilliseconds = checked((int)maxDuration); + } +} diff --git a/src/StackExchange.Redis/APITypes/ListPopResult.cs b/src/StackExchange.Redis/APITypes/ListPopResult.cs new file mode 100644 index 000000000..149bed68a --- /dev/null +++ b/src/StackExchange.Redis/APITypes/ListPopResult.cs @@ -0,0 +1,35 @@ +using System; + +namespace StackExchange.Redis; + +/// +/// A contiguous portion of a redis list. +/// +public readonly struct ListPopResult +{ + /// + /// A null ListPopResult, indicating no results. + /// + public static ListPopResult Null { get; } = new ListPopResult(RedisKey.Null, Array.Empty()); + + /// + /// Whether this object is null/empty. + /// + public bool IsNull => Key.IsNull && Values == Array.Empty(); + + /// + /// The key of the list that this set of entries came form. + /// + public RedisKey Key { get; } + + /// + /// The values from the list. + /// + public RedisValue[] Values { get; } + + internal ListPopResult(RedisKey key, RedisValue[] values) + { + Key = key; + Values = values; + } +} diff --git a/src/StackExchange.Redis/APITypes/NameValueEntry.cs b/src/StackExchange.Redis/APITypes/NameValueEntry.cs new file mode 100644 index 000000000..3fbafa86e --- /dev/null +++ b/src/StackExchange.Redis/APITypes/NameValueEntry.cs @@ -0,0 +1,81 @@ +using System; +using System.Collections.Generic; + +namespace StackExchange.Redis; + +/// +/// Describes a value contained in a stream (a name/value pair). +/// +public readonly struct NameValueEntry : IEquatable +{ + internal readonly RedisValue name, value; + + /// + /// Initializes a value. + /// + /// The name for this entry. + /// The value for this entry. + public NameValueEntry(RedisValue name, RedisValue value) + { + this.name = name; + this.value = value; + } + + /// + /// The name of the field. + /// + public RedisValue Name => name; + + /// + /// The value of the field. + /// + public RedisValue Value => value; + + /// + /// Converts to a key/value pair. + /// + /// The to create a from. + public static implicit operator KeyValuePair(NameValueEntry value) => + new KeyValuePair(value.name, value.value); + + /// + /// Converts from a key/value pair. + /// + /// The to get a from. + public static implicit operator NameValueEntry(KeyValuePair value) => + new NameValueEntry(value.Key, value.Value); + + /// + /// The "{name}: {value}" string representation. + /// + public override string ToString() => name + ": " + value; + + /// + public override int GetHashCode() => name.GetHashCode() ^ value.GetHashCode(); + + /// + /// Compares two values for equality. + /// + /// The to compare to. + public override bool Equals(object? obj) => obj is NameValueEntry heObj && Equals(heObj); + + /// + /// Compares two values for equality. + /// + /// The to compare to. + public bool Equals(NameValueEntry other) => name == other.name && value == other.value; + + /// + /// Compares two values for equality. + /// + /// The first to compare. + /// The second to compare. + public static bool operator ==(NameValueEntry x, NameValueEntry y) => x.name == y.name && x.value == y.value; + + /// + /// Compares two values for non-equality. + /// + /// The first to compare. + /// The second to compare. + public static bool operator !=(NameValueEntry x, NameValueEntry y) => x.name != y.name || x.value != y.value; +} diff --git a/src/StackExchange.Redis/APITypes/RedisStream.cs b/src/StackExchange.Redis/APITypes/RedisStream.cs new file mode 100644 index 000000000..fbefa1240 --- /dev/null +++ b/src/StackExchange.Redis/APITypes/RedisStream.cs @@ -0,0 +1,23 @@ +namespace StackExchange.Redis; + +/// +/// Describes a Redis Stream with an associated array of entries. +/// +public readonly struct RedisStream +{ + internal RedisStream(RedisKey key, StreamEntry[] entries) + { + Key = key; + Entries = entries; + } + + /// + /// The key for the stream. + /// + public RedisKey Key { get; } + + /// + /// An array of entries contained within the stream. + /// + public StreamEntry[] Entries { get; } +} diff --git a/src/StackExchange.Redis/APITypes/RedisValueWithExpiry.cs b/src/StackExchange.Redis/APITypes/RedisValueWithExpiry.cs new file mode 100644 index 000000000..c64e9aca9 --- /dev/null +++ b/src/StackExchange.Redis/APITypes/RedisValueWithExpiry.cs @@ -0,0 +1,28 @@ +using System; + +namespace StackExchange.Redis; + +/// +/// Describes a value/expiry pair. +/// +public readonly struct RedisValueWithExpiry +{ + /// + /// Creates a from a and a . + /// + public RedisValueWithExpiry(RedisValue value, TimeSpan? expiry) + { + Value = value; + Expiry = expiry; + } + + /// + /// The expiry of this record. + /// + public TimeSpan? Expiry { get; } + + /// + /// The value of this record. + /// + public RedisValue Value { get; } +} diff --git a/src/StackExchange.Redis/APITypes/SortedSetEntry.cs b/src/StackExchange.Redis/APITypes/SortedSetEntry.cs new file mode 100644 index 000000000..e61dc05ed --- /dev/null +++ b/src/StackExchange.Redis/APITypes/SortedSetEntry.cs @@ -0,0 +1,107 @@ +using System; +using System.Collections.Generic; +using System.ComponentModel; + +namespace StackExchange.Redis; + +/// +/// Describes a sorted-set element with the corresponding value. +/// +public readonly struct SortedSetEntry : IEquatable, IComparable, IComparable +{ + internal readonly RedisValue element; + internal readonly double score; + + /// + /// Initializes a value. + /// + /// The to get an entry for. + /// The redis score for . + public SortedSetEntry(RedisValue element, double score) + { + this.element = element; + this.score = score; + } + + /// + /// The unique element stored in the sorted set. + /// + public RedisValue Element => element; + + /// + /// The score against the element. + /// + public double Score => score; + + /// + /// The score against the element. + /// + [Browsable(false)] + [EditorBrowsable(EditorBrowsableState.Never), Obsolete("Please use Score", false)] + public double Value => score; + + /// + /// The unique element stored in the sorted set. + /// + [Browsable(false)] + [EditorBrowsable(EditorBrowsableState.Never), Obsolete("Please use Element", false)] + public RedisValue Key => element; + + /// + /// Converts to a key/value pair. + /// + /// The to get a for. + public static implicit operator KeyValuePair(SortedSetEntry value) => new KeyValuePair(value.element, value.score); + + /// + /// Converts from a key/value pair. + /// + /// The to get a for. + public static implicit operator SortedSetEntry(KeyValuePair value) => new SortedSetEntry(value.Key, value.Value); + + /// + /// A "{element}: {score}" string representation of the entry. + /// + public override string ToString() => element + ": " + score; + + /// + public override int GetHashCode() => element.GetHashCode() ^ score.GetHashCode(); + + /// + /// Compares two values for equality. + /// + /// The to compare to. + public override bool Equals(object? obj) => obj is SortedSetEntry ssObj && Equals(ssObj); + + /// + /// Compares two values for equality. + /// + /// The to compare to. + public bool Equals(SortedSetEntry other) => score == other.score && element == other.element; + + /// + /// Compares two values by score. + /// + /// The to compare to. + public int CompareTo(SortedSetEntry other) => score.CompareTo(other.score); + + /// + /// Compares two values by score. + /// + /// The to compare to. + public int CompareTo(object? obj) => obj is SortedSetEntry ssObj ? CompareTo(ssObj) : -1; + + /// + /// Compares two values for equality. + /// + /// The first to compare. + /// The second to compare. + public static bool operator ==(SortedSetEntry x, SortedSetEntry y) => x.score == y.score && x.element == y.element; + + /// + /// Compares two values for non-equality. + /// + /// The first to compare. + /// The second to compare. + public static bool operator !=(SortedSetEntry x, SortedSetEntry y) => x.score != y.score || x.element != y.element; +} diff --git a/src/StackExchange.Redis/APITypes/SortedSetPopResult.cs b/src/StackExchange.Redis/APITypes/SortedSetPopResult.cs new file mode 100644 index 000000000..dcdc4c01e --- /dev/null +++ b/src/StackExchange.Redis/APITypes/SortedSetPopResult.cs @@ -0,0 +1,35 @@ +using System; + +namespace StackExchange.Redis; + +/// +/// A contiguous portion of a redis sorted set. +/// +public readonly struct SortedSetPopResult +{ + /// + /// A null SortedSetPopResult, indicating no results. + /// + public static SortedSetPopResult Null { get; } = new SortedSetPopResult(RedisKey.Null, Array.Empty()); + + /// + /// Whether this object is null/empty. + /// + public bool IsNull => Key.IsNull && Entries == Array.Empty(); + + /// + /// The key of the sorted set these entries came form. + /// + public RedisKey Key { get; } + + /// + /// The provided entries of the sorted set. + /// + public SortedSetEntry[] Entries { get; } + + internal SortedSetPopResult(RedisKey key, SortedSetEntry[] entries) + { + Key = key; + Entries = entries; + } +} diff --git a/src/StackExchange.Redis/APITypes/StreamAutoClaimIdsOnlyResult.cs b/src/StackExchange.Redis/APITypes/StreamAutoClaimIdsOnlyResult.cs new file mode 100644 index 000000000..114763129 --- /dev/null +++ b/src/StackExchange.Redis/APITypes/StreamAutoClaimIdsOnlyResult.cs @@ -0,0 +1,41 @@ +using System; + +namespace StackExchange.Redis; + +/// +/// Result of the XAUTOCLAIM command with the JUSTID option. +/// +public readonly struct StreamAutoClaimIdsOnlyResult +{ + internal StreamAutoClaimIdsOnlyResult(RedisValue nextStartId, RedisValue[] claimedIds, RedisValue[] deletedIds) + { + NextStartId = nextStartId; + ClaimedIds = claimedIds; + DeletedIds = deletedIds; + } + + /// + /// A null , indicating no results. + /// + public static StreamAutoClaimIdsOnlyResult Null { get; } = new StreamAutoClaimIdsOnlyResult(RedisValue.Null, Array.Empty(), Array.Empty()); + + /// + /// Whether this object is null/empty. + /// + public bool IsNull => NextStartId.IsNull && ClaimedIds == Array.Empty() && DeletedIds == Array.Empty(); + + /// + /// The stream ID to be used in the next call to StreamAutoClaim. + /// + public RedisValue NextStartId { get; } + + /// + /// Array of IDs claimed by the command. + /// + public RedisValue[] ClaimedIds { get; } + + /// + /// Array of message IDs deleted from the stream. + /// + public RedisValue[] DeletedIds { get; } +} diff --git a/src/StackExchange.Redis/APITypes/StreamAutoClaimResult.cs b/src/StackExchange.Redis/APITypes/StreamAutoClaimResult.cs new file mode 100644 index 000000000..09f607f3d --- /dev/null +++ b/src/StackExchange.Redis/APITypes/StreamAutoClaimResult.cs @@ -0,0 +1,41 @@ +using System; + +namespace StackExchange.Redis; + +/// +/// Result of the XAUTOCLAIM command. +/// +public readonly struct StreamAutoClaimResult +{ + internal StreamAutoClaimResult(RedisValue nextStartId, StreamEntry[] claimedEntries, RedisValue[] deletedIds) + { + NextStartId = nextStartId; + ClaimedEntries = claimedEntries; + DeletedIds = deletedIds; + } + + /// + /// A null , indicating no results. + /// + public static StreamAutoClaimResult Null { get; } = new StreamAutoClaimResult(RedisValue.Null, Array.Empty(), Array.Empty()); + + /// + /// Whether this object is null/empty. + /// + public bool IsNull => NextStartId.IsNull && ClaimedEntries == Array.Empty() && DeletedIds == Array.Empty(); + + /// + /// The stream ID to be used in the next call to StreamAutoClaim. + /// + public RedisValue NextStartId { get; } + + /// + /// An array of for the successfully claimed entries. + /// + public StreamEntry[] ClaimedEntries { get; } + + /// + /// An array of message IDs deleted from the stream. + /// + public RedisValue[] DeletedIds { get; } +} diff --git a/src/StackExchange.Redis/APITypes/StreamConsumer.cs b/src/StackExchange.Redis/APITypes/StreamConsumer.cs new file mode 100644 index 000000000..a99778707 --- /dev/null +++ b/src/StackExchange.Redis/APITypes/StreamConsumer.cs @@ -0,0 +1,23 @@ +namespace StackExchange.Redis; + +/// +/// Describes a consumer off a Redis Stream. +/// +public readonly struct StreamConsumer +{ + internal StreamConsumer(RedisValue name, int pendingMessageCount) + { + Name = name; + PendingMessageCount = pendingMessageCount; + } + + /// + /// The name of the consumer. + /// + public RedisValue Name { get; } + + /// + /// The number of messages that have been delivered by not yet acknowledged by the consumer. + /// + public int PendingMessageCount { get; } +} diff --git a/src/StackExchange.Redis/APITypes/StreamConsumerInfo.cs b/src/StackExchange.Redis/APITypes/StreamConsumerInfo.cs new file mode 100644 index 000000000..ab7cd9af1 --- /dev/null +++ b/src/StackExchange.Redis/APITypes/StreamConsumerInfo.cs @@ -0,0 +1,30 @@ +namespace StackExchange.Redis; + +/// +/// Describes a consumer within a consumer group, retrieved using the XINFO CONSUMERS command. . +/// +public readonly struct StreamConsumerInfo +{ + internal StreamConsumerInfo(string name, int pendingMessageCount, long idleTimeInMilliseconds) + { + Name = name; + PendingMessageCount = pendingMessageCount; + IdleTimeInMilliseconds = idleTimeInMilliseconds; + } + + /// + /// The name of the consumer. + /// + public string Name { get; } + + /// + /// The number of pending messages for the consumer. A pending message is one that has been + /// received by the consumer but not yet acknowledged. + /// + public int PendingMessageCount { get; } + + /// + /// The idle time, if any, for the consumer. + /// + public long IdleTimeInMilliseconds { get; } +} diff --git a/src/StackExchange.Redis/APITypes/StreamEntry.cs b/src/StackExchange.Redis/APITypes/StreamEntry.cs new file mode 100644 index 000000000..25f4f690c --- /dev/null +++ b/src/StackExchange.Redis/APITypes/StreamEntry.cs @@ -0,0 +1,83 @@ +using System; + +namespace StackExchange.Redis; + +/// +/// Describes an entry contained in a Redis Stream. +/// +public readonly struct StreamEntry +{ + /// + /// Creates an stream entry. + /// + public StreamEntry(RedisValue id, NameValueEntry[] values) + { + Id = id; + Values = values; + IdleTime = null; + DeliveryCount = 0; + } + + /// + /// Creates a stream entry. + /// + public StreamEntry(RedisValue id, NameValueEntry[] values, TimeSpan? idleTime, int deliveryCount) + { + Id = id; + Values = values; + IdleTime = idleTime; + DeliveryCount = deliveryCount; + } + + /// + /// A null stream entry. + /// + public static StreamEntry Null { get; } = new StreamEntry(RedisValue.Null, Array.Empty()); + + /// + /// The ID assigned to the message. + /// + public RedisValue Id { get; } + + /// + /// The values contained within the message. + /// + public NameValueEntry[] Values { get; } + + /// + /// Search for a specific field by name, returning the value. + /// + public RedisValue this[RedisValue fieldName] + { + get + { + var values = Values; + if (values != null) + { + for (int i = 0; i < values.Length; i++) + { + if (values[i].name == fieldName) + return values[i].value; + } + } + return RedisValue.Null; + } + } + + /// + /// Delivery count - the number of times this entry has been delivered: 0 for new messages that haven't been delivered before, + /// 1+ for claimed messages (previously unacknowledged entries). + /// + public int DeliveryCount { get; } + + /// + /// Idle time in milliseconds - the number of milliseconds elapsed since this entry was last delivered to a consumer. + /// + /// This member is populated when using XREADGROUP with CLAIM. + public TimeSpan? IdleTime { get; } + + /// + /// Indicates that the Redis Stream Entry is null. + /// + public bool IsNull => Id == RedisValue.Null && Values == Array.Empty(); +} diff --git a/src/StackExchange.Redis/APITypes/StreamGroupInfo.cs b/src/StackExchange.Redis/APITypes/StreamGroupInfo.cs new file mode 100644 index 000000000..a357c400e --- /dev/null +++ b/src/StackExchange.Redis/APITypes/StreamGroupInfo.cs @@ -0,0 +1,48 @@ +namespace StackExchange.Redis; + +/// +/// Describes a consumer group retrieved using the XINFO GROUPS command. . +/// +public readonly struct StreamGroupInfo +{ + internal StreamGroupInfo(string name, int consumerCount, int pendingMessageCount, string? lastDeliveredId, long? entriesRead, long? lag) + { + Name = name; + ConsumerCount = consumerCount; + PendingMessageCount = pendingMessageCount; + LastDeliveredId = lastDeliveredId; + EntriesRead = entriesRead; + Lag = lag; + } + + /// + /// The name of the consumer group. + /// + public string Name { get; } + + /// + /// The number of consumers within the consumer group. + /// + public int ConsumerCount { get; } + + /// + /// The total number of pending messages for the consumer group. A pending message is one that has been + /// received by a consumer but not yet acknowledged. + /// + public int PendingMessageCount { get; } + + /// + /// The Id of the last message delivered to the group. + /// + public string? LastDeliveredId { get; } + + /// + /// Total number of entries the group had read. + /// + public long? EntriesRead { get; } + + /// + /// The number of entries in the range between the group's read entries and the stream's entries. + /// + public long? Lag { get; } +} diff --git a/src/StackExchange.Redis/APITypes/StreamInfo.cs b/src/StackExchange.Redis/APITypes/StreamInfo.cs new file mode 100644 index 000000000..1de0526ec --- /dev/null +++ b/src/StackExchange.Redis/APITypes/StreamInfo.cs @@ -0,0 +1,159 @@ +using System.Diagnostics.CodeAnalysis; +using RESPite; + +namespace StackExchange.Redis; + +/// +/// Describes stream information retrieved using the XINFO STREAM command. . +/// +public readonly struct StreamInfo +{ + // OK, I accept that this parameter list / size is getting silly, but: it is too late + // to refactor this as a class. + internal StreamInfo( + int length, + int radixTreeKeys, + int radixTreeNodes, + int groups, + StreamEntry firstEntry, + StreamEntry lastEntry, + RedisValue lastGeneratedId, + RedisValue maxDeletedEntryId, + long entriesAdded, + RedisValue recordedFirstEntryId, + long idmpDuration, + long idmpMaxSize, + long pidsTracked, + long iidsTracked, + long iidsAdded, + long iidsDuplicates) + { + Length = length; + RadixTreeKeys = radixTreeKeys; + RadixTreeNodes = radixTreeNodes; + ConsumerGroupCount = groups; + FirstEntry = firstEntry; + LastEntry = lastEntry; + LastGeneratedId = lastGeneratedId; + + // 7.0 + MaxDeletedEntryId = maxDeletedEntryId; + EntriesAdded = entriesAdded; + RecordedFirstEntryId = recordedFirstEntryId; + + // 8.6 + IdmpDuration = idmpDuration; + IdmpMaxSize = idmpMaxSize; + PidsTracked = pidsTracked; + IidsTracked = iidsTracked; + IidsAdded = iidsAdded; + IidsDuplicates = iidsDuplicates; + } + + /// + /// The number of entries in the stream. + /// + public int Length { get; } + + /// + /// The number of radix tree keys in the stream. + /// + public int RadixTreeKeys { get; } + + /// + /// The number of radix tree nodes in the stream. + /// + public int RadixTreeNodes { get; } + + /// + /// The number of consumers groups in the stream. + /// + public int ConsumerGroupCount { get; } + + /// + /// The first entry in the stream. + /// + public StreamEntry FirstEntry { get; } + + /// + /// The last entry in the stream. + /// + public StreamEntry LastEntry { get; } + + /// + /// The last generated id. + /// + public RedisValue LastGeneratedId { get; } + + /// + /// The first id recorded for the stream. + /// + public RedisValue RecordedFirstEntryId { get; } + + /// + /// The count of all entries added to the stream during its lifetime. + /// + public long EntriesAdded { get; } + + /// + /// The maximal entry ID that was deleted from the stream. + /// + public RedisValue MaxDeletedEntryId { get; } + + /// + /// The duration value configured for the stream’s IDMP map (seconds), or -1 if unavailable. + /// + public long IdmpDuration + { + [Experimental(Experiments.Server_8_6, UrlFormat = Experiments.UrlFormat)] + get; + } + + /// + /// The maxsize value configured for the stream’s IDMP map, or -1 if unavailable. + /// + public long IdmpMaxSize + { + [Experimental(Experiments.Server_8_6, UrlFormat = Experiments.UrlFormat)] + get; + } + + /// + /// The number of idempotent pids currently tracked in the stream, or -1 if unavailable. + /// + public long PidsTracked + { + [Experimental(Experiments.Server_8_6, UrlFormat = Experiments.UrlFormat)] + get; + } + + /// + /// The number of idempotent ids currently tracked in the stream, or -1 if unavailable. + /// This count reflects active iids that haven't expired or been evicted yet. + /// + public long IidsTracked + { + [Experimental(Experiments.Server_8_6, UrlFormat = Experiments.UrlFormat)] + get; + } + + /// + /// The count of all entries with an idempotent iid added to the stream during its lifetime, or -1 if unavailable. + /// This is a cumulative counter that increases with each idempotent entry added. + /// + public long IidsAdded + { + [Experimental(Experiments.Server_8_6, UrlFormat = Experiments.UrlFormat)] + get; + } + + /// + /// The count of all duplicate iids (for all pids) detected during the stream's lifetime, or -1 if unavailable. + /// This is a cumulative counter that increases with each duplicate iid. + /// + public long IidsDuplicates + { + [Experimental(Experiments.Server_8_6, UrlFormat = Experiments.UrlFormat)] + get; + } +} diff --git a/src/StackExchange.Redis/APITypes/StreamPendingInfo.cs b/src/StackExchange.Redis/APITypes/StreamPendingInfo.cs new file mode 100644 index 000000000..b55696f8c --- /dev/null +++ b/src/StackExchange.Redis/APITypes/StreamPendingInfo.cs @@ -0,0 +1,35 @@ +namespace StackExchange.Redis; + +/// +/// Describes basic information about pending messages for a consumer group. +/// +public readonly struct StreamPendingInfo +{ + internal StreamPendingInfo(int pendingMessageCount, RedisValue lowestId, RedisValue highestId, StreamConsumer[] consumers) + { + PendingMessageCount = pendingMessageCount; + LowestPendingMessageId = lowestId; + HighestPendingMessageId = highestId; + Consumers = consumers; + } + + /// + /// The number of pending messages. A pending message is a message that has been consumed but not yet acknowledged. + /// + public int PendingMessageCount { get; } + + /// + /// The lowest message ID in the set of pending messages. + /// + public RedisValue LowestPendingMessageId { get; } + + /// + /// The highest message ID in the set of pending messages. + /// + public RedisValue HighestPendingMessageId { get; } + + /// + /// An array of consumers within the consumer group that have pending messages. + /// + public StreamConsumer[] Consumers { get; } +} diff --git a/src/StackExchange.Redis/APITypes/StreamPendingMessageInfo.cs b/src/StackExchange.Redis/APITypes/StreamPendingMessageInfo.cs new file mode 100644 index 000000000..32cbbcc8d --- /dev/null +++ b/src/StackExchange.Redis/APITypes/StreamPendingMessageInfo.cs @@ -0,0 +1,36 @@ +namespace StackExchange.Redis; + +/// +/// Describes properties of a pending message. +/// A pending message is one that has been received by a consumer but has not yet been acknowledged. +/// +public readonly struct StreamPendingMessageInfo +{ + internal StreamPendingMessageInfo(RedisValue messageId, RedisValue consumerName, long idleTimeInMs, int deliveryCount) + { + MessageId = messageId; + ConsumerName = consumerName; + IdleTimeInMilliseconds = idleTimeInMs; + DeliveryCount = deliveryCount; + } + + /// + /// The ID of the pending message. + /// + public RedisValue MessageId { get; } + + /// + /// The consumer that received the pending message. + /// + public RedisValue ConsumerName { get; } + + /// + /// The time that has passed since the message was last delivered to a consumer. + /// + public long IdleTimeInMilliseconds { get; } + + /// + /// The number of times the message has been delivered to a consumer. + /// + public int DeliveryCount { get; } +} diff --git a/src/StackExchange.Redis/APITypes/StreamPosition.cs b/src/StackExchange.Redis/APITypes/StreamPosition.cs new file mode 100644 index 000000000..b58dfd599 --- /dev/null +++ b/src/StackExchange.Redis/APITypes/StreamPosition.cs @@ -0,0 +1,66 @@ +using System; + +namespace StackExchange.Redis; + +/// +/// Describes a pair consisting of the Stream Key and the from which to begin reading a stream. +/// +public struct StreamPosition +{ + /// + /// Read from the beginning of a stream. + /// + public static RedisValue Beginning => StreamConstants.ReadMinValue; + + /// + /// Read new messages. + /// + public static RedisValue NewMessages => StreamConstants.NewMessages; + + /// + /// Initializes a value. + /// + /// The key for the stream. + /// The position from which to begin reading the stream. + public StreamPosition(RedisKey key, RedisValue position) + { + Key = key; + Position = position; + } + + /// + /// The stream key. + /// + public RedisKey Key { get; } + + /// + /// The offset at which to begin reading the stream. + /// + public RedisValue Position { get; } + + internal static RedisValue Resolve(RedisValue value, RedisCommand command) + { + if (value == NewMessages) + { + return command switch + { + RedisCommand.XREAD => throw new InvalidOperationException("StreamPosition.NewMessages cannot be used with StreamRead."), + RedisCommand.XREADGROUP => StreamConstants.UndeliveredMessages, + RedisCommand.XGROUP => StreamConstants.NewMessages, + // new is only valid for the above + _ => throw new ArgumentException($"Unsupported command in StreamPosition.Resolve: {command}.", nameof(command)), + }; + } + else if (value == StreamPosition.Beginning) + { + switch (command) + { + case RedisCommand.XREAD: + case RedisCommand.XREADGROUP: + case RedisCommand.XGROUP: + return StreamConstants.AllMessages; + } + } + return value; + } +} diff --git a/src/StackExchange.Redis/AssemblyInfoHack.cs b/src/StackExchange.Redis/AssemblyInfoHack.cs index 2936ed56c..50cdc2c1c 100644 --- a/src/StackExchange.Redis/AssemblyInfoHack.cs +++ b/src/StackExchange.Redis/AssemblyInfoHack.cs @@ -1,10 +1,6 @@ -// Yes, this is embarassing. However, in .NET Core the including AssemblyInfo (ifdef'd or not) will screw with +// Yes, this is embarrassing. However, in .NET Core the including AssemblyInfo (ifdef'd or not) will screw with // your version numbers. Therefore, we need to move the attribute out into another file...this file. // When .csproj merges in, this should be able to return to Properties/AssemblyInfo.cs using System; -using System.Runtime.CompilerServices; -[assembly: InternalsVisibleTo("StackExchange.Redis.Server, PublicKey=00240000048000009400000006020000002400005253413100040000010001007791a689e9d8950b44a9a8886baad2ea180e7a8a854f158c9b98345ca5009cdd2362c84f368f1c3658c132b3c0f74e44ff16aeb2e5b353b6e0fe02f923a050470caeac2bde47a2238a9c7125ed7dab14f486a5a64558df96640933b9f2b6db188fc4a820f96dce963b662fa8864adbff38e5b4542343f162ecdc6dad16912fff")] -[assembly: InternalsVisibleTo("StackExchange.Redis.Tests, PublicKey=00240000048000009400000006020000002400005253413100040000010001007791a689e9d8950b44a9a8886baad2ea180e7a8a854f158c9b98345ca5009cdd2362c84f368f1c3658c132b3c0f74e44ff16aeb2e5b353b6e0fe02f923a050470caeac2bde47a2238a9c7125ed7dab14f486a5a64558df96640933b9f2b6db188fc4a820f96dce963b662fa8864adbff38e5b4542343f162ecdc6dad16912fff")] -[assembly: InternalsVisibleTo("NRediSearch.Test, PublicKey=00240000048000009400000006020000002400005253413100040000010001007791a689e9d8950b44a9a8886baad2ea180e7a8a854f158c9b98345ca5009cdd2362c84f368f1c3658c132b3c0f74e44ff16aeb2e5b353b6e0fe02f923a050470caeac2bde47a2238a9c7125ed7dab14f486a5a64558df96640933b9f2b6db188fc4a820f96dce963b662fa8864adbff38e5b4542343f162ecdc6dad16912fff")] [assembly: CLSCompliant(true)] diff --git a/src/StackExchange.Redis/BacklogPolicy.cs b/src/StackExchange.Redis/BacklogPolicy.cs new file mode 100644 index 000000000..baa13ae20 --- /dev/null +++ b/src/StackExchange.Redis/BacklogPolicy.cs @@ -0,0 +1,43 @@ +namespace StackExchange.Redis +{ + /// + /// The backlog policy to use for commands. This policy comes into effect when a connection is unhealthy or unavailable. + /// The policy can choose to backlog commands and wait to try them (within their timeout) against a connection when it comes up, + /// or it could choose to fail fast and throw ASAP. Different apps desire different behaviors with backpressure and how to handle + /// large amounts of load, so this is configurable to optimize the happy path but avoid spiral-of-death queue scenarios for others. + /// + public sealed class BacklogPolicy + { + /// + /// Backlog behavior matching StackExchange.Redis's 2.x line, failing fast and not attempting to queue + /// and retry when a connection is available again. + /// + public static BacklogPolicy FailFast { get; } = new() + { + QueueWhileDisconnected = false, + AbortPendingOnConnectionFailure = true, + }; + + /// + /// Default backlog policy which will allow commands to be issues against an endpoint and queue up. + /// Commands are still subject to their async timeout (which serves as a queue size check). + /// + public static BacklogPolicy Default { get; } = new() + { + QueueWhileDisconnected = true, + AbortPendingOnConnectionFailure = false, + }; + + /// + /// Whether to queue commands while disconnected. + /// True means queue for attempts up until their timeout. + /// means to fail ASAP and queue nothing. + /// + public bool QueueWhileDisconnected { get; init; } + + /// + /// Whether to immediately abandon (with an exception) all pending commands when a connection goes unhealthy. + /// + public bool AbortPendingOnConnectionFailure { get; init; } + } +} diff --git a/src/StackExchange.Redis/BufferReader.cs b/src/StackExchange.Redis/BufferReader.cs index 952a70d0e..22b36ccb6 100644 --- a/src/StackExchange.Redis/BufferReader.cs +++ b/src/StackExchange.Redis/BufferReader.cs @@ -10,17 +10,21 @@ internal enum ConsumeResult Success, NeedMoreData, } + internal ref struct BufferReader { + private long _totalConsumed; + public int OffsetThisSpan { get; private set; } + public int RemainingThisSpan { get; private set; } + + public long TotalConsumed => _totalConsumed; + private ReadOnlySequence.Enumerator _iterator; private ReadOnlySpan _current; public ReadOnlySpan OversizedSpan => _current; public ReadOnlySpan SlicedSpan => _current.Slice(OffsetThisSpan, RemainingThisSpan); - public int OffsetThisSpan { get; private set; } - private int TotalConsumed { get; set; } // hide this; callers should use the snapshot-aware methods instead - public int RemainingThisSpan { get; private set; } public bool IsEmpty => RemainingThisSpan == 0; @@ -37,25 +41,26 @@ private bool FetchNextSegment() _current = _iterator.Current.Span; OffsetThisSpan = 0; RemainingThisSpan = _current.Length; - } while (IsEmpty); // skip empty segments, they don't help us! + } + while (IsEmpty); // skip empty segments, they don't help us! return true; } - public BufferReader(ReadOnlySequence buffer) + public BufferReader(scoped in ReadOnlySequence buffer) { _buffer = buffer; _lastSnapshotPosition = buffer.Start; _lastSnapshotBytes = 0; _iterator = buffer.GetEnumerator(); _current = default; - OffsetThisSpan = RemainingThisSpan = TotalConsumed = 0; + _totalConsumed = OffsetThisSpan = RemainingThisSpan = 0; FetchNextSegment(); } /// - /// Note that in results other than success, no guarantees are made about final state; if you care: snapshot + /// Note that in results other than success, no guarantees are made about final state; if you care: snapshot. /// public ConsumeResult TryConsumeCRLF() { @@ -78,6 +83,7 @@ public ConsumeResult TryConsumeCRLF() return result; } } + public bool TryConsume(int count) { if (count < 0) throw new ArgumentOutOfRangeException(nameof(count)); @@ -87,7 +93,7 @@ public bool TryConsume(int count) if (count <= available) { // consume part of this span - TotalConsumed += count; + _totalConsumed += count; RemainingThisSpan -= count; OffsetThisSpan += count; @@ -96,9 +102,10 @@ public bool TryConsume(int count) } // consume all of this span - TotalConsumed += available; + _totalConsumed += available; count -= available; - } while (FetchNextSegment()); + } + while (FetchNextSegment()); return false; } @@ -110,26 +117,31 @@ public bool TryConsume(int count) // to avoid having to use buffer.Slice on huge ranges private SequencePosition SnapshotPosition() { - var consumed = TotalConsumed; - var delta = consumed - _lastSnapshotBytes; + var delta = _totalConsumed - _lastSnapshotBytes; if (delta == 0) return _lastSnapshotPosition; var pos = _buffer.GetPosition(delta, _lastSnapshotPosition); - _lastSnapshotBytes = consumed; + _lastSnapshotBytes = _totalConsumed; return _lastSnapshotPosition = pos; } + public ReadOnlySequence ConsumeAsBuffer(int count) { if (!TryConsumeAsBuffer(count, out var buffer)) throw new EndOfStreamException(); return buffer; } + public ReadOnlySequence ConsumeToEnd() { var from = SnapshotPosition(); var result = _buffer.Slice(from); - while (FetchNextSegment()) { } // consume all + while (FetchNextSegment()) + { + // consume all + } return result; } + public bool TryConsumeAsBuffer(int count, out ReadOnlySequence buffer) { var from = SnapshotPosition(); @@ -142,6 +154,7 @@ public bool TryConsumeAsBuffer(int count, out ReadOnlySequence buffer) buffer = _buffer.Slice(from, to); return true; } + public void Consume(int count) { if (!TryConsume(count)) throw new EndOfStreamException(); @@ -159,13 +172,14 @@ public void Consume(int count) if (found >= 0) return totalSkipped + found; totalSkipped += span.Length; - } while (reader.FetchNextSegment()); + } + while (reader.FetchNextSegment()); return -1; } + internal static int FindNextCrLf(BufferReader reader) // very deliberately not ref; want snapshot { // is it in the current span? (we need to handle the offsets differently if so) - int totalSkipped = 0; bool haveTrailingCR = false; do @@ -176,7 +190,6 @@ public void Consume(int count) if (haveTrailingCR) { if (span[0] == '\n') return totalSkipped - 1; - haveTrailingCR = false; } int found = span.VectorSafeIndexOfCRLF(); @@ -189,18 +202,6 @@ public void Consume(int count) return -1; } - //internal static bool HasBytes(BufferReader reader, int count) // very deliberately not ref; want snapshot - //{ - // if (count < 0) throw new ArgumentOutOfRangeException(nameof(count)); - // do - // { - // var available = reader.RemainingThisSpan; - // if (count <= available) return true; - // count -= available; - // } while (reader.FetchNextSegment()); - // return false; - //} - public int ConsumeByte() { if (IsEmpty) return -1; @@ -208,6 +209,7 @@ public int ConsumeByte() Consume(1); return value; } + public int PeekByte() => IsEmpty ? -1 : _current[OffsetThisSpan]; public ReadOnlySequence SliceFromCurrent() diff --git a/src/StackExchange.Redis/ChannelMessage.cs b/src/StackExchange.Redis/ChannelMessage.cs new file mode 100644 index 000000000..a29454f0c --- /dev/null +++ b/src/StackExchange.Redis/ChannelMessage.cs @@ -0,0 +1,73 @@ +using System; + +namespace StackExchange.Redis; + +/// +/// Represents a message that is broadcast via publish/subscribe. +/// +public readonly struct ChannelMessage +{ + // this is *smaller* than storing a RedisChannel for the subscribed channel + private readonly ChannelMessageQueue _queue; + + /// + /// The Channel:Message string representation. + /// + public override string ToString() => ((string?)Channel) + ":" + ((string?)Message); + + /// + public override int GetHashCode() => Channel.GetHashCode() ^ Message.GetHashCode(); + + /// + public override bool Equals(object? obj) => obj is ChannelMessage cm + && cm.Channel == Channel && cm.Message == Message; + + internal ChannelMessage(ChannelMessageQueue queue, in RedisChannel channel, in RedisValue value) + { + _queue = queue; + _channel = channel; + _message = value; + } + + /// + /// The channel that the subscription was created from. + /// + public RedisChannel SubscriptionChannel => _queue.Channel; + + private readonly RedisChannel _channel; + + /// + /// The channel that the message was broadcast to. + /// + public RedisChannel Channel => _channel; + + private readonly RedisValue _message; + + /// + /// The value that was broadcast. + /// + public RedisValue Message => _message; + + /// + /// Checks if 2 messages are .Equal(). + /// + public static bool operator ==(ChannelMessage left, ChannelMessage right) => left.Equals(right); + + /// + /// Checks if 2 messages are not .Equal(). + /// + public static bool operator !=(ChannelMessage left, ChannelMessage right) => !left.Equals(right); + + /// + /// If the channel is either a keyspace or keyevent notification, resolve the key and event type. + /// + public bool TryParseKeyNotification(out KeyNotification notification) + => KeyNotification.TryParse(in _channel, in _message, out notification); + + /// + /// If the channel is either a keyspace or keyevent notification *with the requested prefix*, resolve the key and event type, + /// and remove the prefix when reading the key. + /// + public bool TryParseKeyNotification(ReadOnlySpan keyPrefix, out KeyNotification notification) + => KeyNotification.TryParse(keyPrefix, in _channel, in _message, out notification); +} diff --git a/src/StackExchange.Redis/ChannelMessageQueue.cs b/src/StackExchange.Redis/ChannelMessageQueue.cs index 280e4fc57..f7bd9a4a2 100644 --- a/src/StackExchange.Redis/ChannelMessageQueue.cs +++ b/src/StackExchange.Redis/ChannelMessageQueue.cs @@ -1,339 +1,330 @@ using System; -using System.Reflection; +using System.Buffers.Text; +using System.Collections.Generic; using System.Threading; using System.Threading.Channels; using System.Threading.Tasks; -namespace StackExchange.Redis +namespace StackExchange.Redis; + +/// +/// Represents a message queue of ordered pub/sub notifications. +/// +/// +/// To create a ChannelMessageQueue, use +/// or . +/// +public sealed class ChannelMessageQueue : IAsyncEnumerable { + private readonly Channel _queue; + + /// + /// The Channel that was subscribed for this queue. + /// + public RedisChannel Channel { get; } + + private RedisSubscriber? _parent; + + /// + /// The string representation of this channel. + /// + public override string? ToString() => (string?)Channel; + /// - /// Represents a message that is broadcast via pub/sub + /// An awaitable task the indicates completion of the queue (including drain of data). /// - public readonly struct ChannelMessage + public Task Completion => _queue.Reader.Completion; + + internal ChannelMessageQueue(in RedisChannel redisChannel, RedisSubscriber parent) { - private readonly ChannelMessageQueue _queue; // this is *smaller* than storing a RedisChannel for the subsribed channel - /// - /// See Object.ToString - /// - public override string ToString() => ((string)Channel) + ":" + ((string)Message); - - /// - /// See Object.GetHashCode - /// - public override int GetHashCode() => Channel.GetHashCode() ^ Message.GetHashCode(); - - /// - /// See Object.Equals - /// - /// The to compare. - public override bool Equals(object obj) => obj is ChannelMessage cm - && cm.Channel == Channel && cm.Message == Message; - internal ChannelMessage(ChannelMessageQueue queue, in RedisChannel channel, in RedisValue value) - { - _queue = queue; - Channel = channel; - Message = value; - } + Channel = redisChannel; + _parent = parent; + _queue = System.Threading.Channels.Channel.CreateUnbounded(s_ChannelOptions); + } + + private static readonly UnboundedChannelOptions s_ChannelOptions = new UnboundedChannelOptions + { + SingleWriter = true, SingleReader = false, AllowSynchronousContinuations = false, + }; - /// - /// The channel that the subscription was created from - /// - public RedisChannel SubscriptionChannel => _queue.Channel; - - /// - /// The channel that the message was broadcast to - /// - public RedisChannel Channel { get; } - /// - /// The value that was broadcast - /// - public RedisValue Message { get; } + private void Write(in RedisChannel channel, in RedisValue value) + { + var writer = _queue.Writer; + writer.TryWrite(new ChannelMessage(this, channel, value)); } /// - /// Represents a message queue of ordered pub/sub notifications + /// Consume a message from the channel. /// - /// To create a ChannelMessageQueue, use ISubscriber.Subscribe[Async](RedisKey) - public sealed class ChannelMessageQueue + /// The to use. + public ValueTask ReadAsync(CancellationToken cancellationToken = default) + => _queue.Reader.ReadAsync(cancellationToken); + + /// + /// Attempt to synchronously consume a message from the channel. + /// + /// The read from the Channel. + public bool TryRead(out ChannelMessage item) => _queue.Reader.TryRead(out item); + + /// + /// Attempt to query the backlog length of the queue. + /// + /// The (approximate) count of items in the Channel. + public bool TryGetCount(out int count) { - private readonly Channel _queue; - /// - /// The Channel that was subscribed for this queue - /// - public RedisChannel Channel { get; } - private RedisSubscriber _parent; - - /// - /// See Object.ToString - /// - public override string ToString() => (string)Channel; - - /// - /// An awaitable task the indicates completion of the queue (including drain of data) - /// - public Task Completion => _queue.Reader.Completion; - - internal ChannelMessageQueue(in RedisChannel redisChannel, RedisSubscriber parent) + var reader = _queue.Reader; + if (reader.CanCount) { - Channel = redisChannel; - _parent = parent; - _queue = System.Threading.Channels.Channel.CreateUnbounded(s_ChannelOptions); + count = reader.Count; + return true; } - private static readonly UnboundedChannelOptions s_ChannelOptions = new UnboundedChannelOptions - { - SingleWriter = true, - SingleReader = false, - AllowSynchronousContinuations = false, - }; - -#pragma warning disable RCS1231 // Make parameter ref read-only. - uses as a delegate for Action - private void Write(in RedisChannel channel, in RedisValue value) -#pragma warning restore RCS1231 // Make parameter ref read-only. - { - var writer = _queue.Writer; - writer.TryWrite(new ChannelMessage(this, channel, value)); - } + count = 0; + return false; + } - /// - /// Consume a message from the channel. - /// - /// The to use. - public ValueTask ReadAsync(CancellationToken cancellationToken = default) - => _queue.Reader.ReadAsync(cancellationToken); - - /// - /// Attempt to synchronously consume a message from the channel. - /// - /// The read from the Channel. - public bool TryRead(out ChannelMessage item) => _queue.Reader.TryRead(out item); - - /// - /// Attempt to query the backlog length of the queue. - /// - /// The (approximate) count of items in the Channel. - public bool TryGetCount(out int count) - { - // get this using the reflection - try - { - var prop = _queue.GetType().GetProperty("ItemsCountForDebugger", BindingFlags.Instance | BindingFlags.NonPublic); - if (prop != null) - { - count = (int)prop.GetValue(_queue); - return true; - } - } - catch { } - count = default; - return false; - } + private Delegate? _onMessageHandler; - private Delegate _onMessageHandler; - private void AssertOnMessage(Delegate handler) - { - if (handler == null) throw new ArgumentNullException(nameof(handler)); - if (Interlocked.CompareExchange(ref _onMessageHandler, handler, null) != null) - throw new InvalidOperationException("Only a single " + nameof(OnMessage) + " is allowed"); - } + private void AssertOnMessage(Delegate handler) + { + if (handler == null) throw new ArgumentNullException(nameof(handler)); + if (Interlocked.CompareExchange(ref _onMessageHandler, handler, null) != null) + throw new InvalidOperationException("Only a single " + nameof(OnMessage) + " is allowed"); + } - /// - /// Create a message loop that processes messages sequentially. - /// - /// The handler to run when receiving a message. - public void OnMessage(Action handler) - { - AssertOnMessage(handler); + /// + /// Create a message loop that processes messages sequentially. + /// + /// The handler to run when receiving a message. + public void OnMessage(Action handler) + { + AssertOnMessage(handler); - ThreadPool.QueueUserWorkItem( - state => ((ChannelMessageQueue)state).OnMessageSyncImpl().RedisFireAndForget(), this); - } + ThreadPool.QueueUserWorkItem( + state => ((ChannelMessageQueue)state!).OnMessageSyncImpl().RedisFireAndForget(), this); + } - private async Task OnMessageSyncImpl() + private async Task OnMessageSyncImpl() + { + var handler = (Action?)_onMessageHandler; + while (!Completion.IsCompleted) { - var handler = (Action)_onMessageHandler; - while (!Completion.IsCompleted) + ChannelMessage next; + try { - ChannelMessage next; - try { if (!TryRead(out next)) next = await ReadAsync().ForAwait(); } - catch (ChannelClosedException) { break; } // expected - catch (Exception ex) - { - _parent.multiplexer?.OnInternalError(ex); - break; - } - - try { handler(next); } - catch { } // matches MessageCompletable + if (!TryRead(out next)) next = await ReadAsync().ForAwait(); + } + catch (ChannelClosedException) { break; } // expected + catch (Exception ex) + { + _parent?.multiplexer?.OnInternalError(ex); + break; } + + try { handler?.Invoke(next); } + catch { } // matches MessageCompletable } + } - internal static void Combine(ref ChannelMessageQueue head, ChannelMessageQueue queue) + internal static void Combine(ref ChannelMessageQueue? head, ChannelMessageQueue queue) + { + if (queue != null) { - if (queue != null) + // insert at the start of the linked-list + ChannelMessageQueue? old; + do { - // insert at the start of the linked-list - ChannelMessageQueue old; - do - { - old = Volatile.Read(ref head); - queue._next = old; - } while (Interlocked.CompareExchange(ref head, queue, old) != old); + old = Volatile.Read(ref head); + queue._next = old; } + // format and validator disagree on newline... + while (Interlocked.CompareExchange(ref head, queue, old) != old); } + } - /// - /// Create a message loop that processes messages sequentially. - /// - /// The handler to execute when receiving a message. - public void OnMessage(Func handler) - { - AssertOnMessage(handler); + /// + /// Create a message loop that processes messages sequentially. + /// + /// The handler to execute when receiving a message. + public void OnMessage(Func handler) + { + AssertOnMessage(handler); - ThreadPool.QueueUserWorkItem( - state => ((ChannelMessageQueue)state).OnMessageAsyncImpl().RedisFireAndForget(), this); - } + ThreadPool.QueueUserWorkItem( + state => ((ChannelMessageQueue)state!).OnMessageAsyncImpl().RedisFireAndForget(), this); + } - internal static void Remove(ref ChannelMessageQueue head, ChannelMessageQueue queue) + internal static void Remove(ref ChannelMessageQueue? head, ChannelMessageQueue queue) + { + if (queue is null) { - if (queue == null) return; + return; + } - bool found; - do // if we fail due to a conflict, re-do from start + bool found; + // if we fail due to a conflict, re-do from start + do + { + var current = Volatile.Read(ref head); + if (current == null) return; // no queue? nothing to do + if (current == queue) { - var current = Volatile.Read(ref head); - if (current == null) return; // no queue? nothing to do - if (current == queue) + found = true; + // found at the head - then we need to change the head + if (Interlocked.CompareExchange(ref head, Volatile.Read(ref current._next), current) == current) { - found = true; - // found at the head - then we need to change the head - if (Interlocked.CompareExchange(ref head, Volatile.Read(ref current._next), current) == current) - { - return; // success - } + return; // success } - else + } + else + { + ChannelMessageQueue? previous = current; + current = Volatile.Read(ref previous._next); + found = false; + do { - ChannelMessageQueue previous = current; - current = Volatile.Read(ref previous._next); - found = false; - do + if (current == queue) { - if (current == queue) + found = true; + // found it, not at the head; remove the node + if (Interlocked.CompareExchange( + ref previous._next, + Volatile.Read(ref current._next), + current) == current) + { + return; // success + } + else { - found = true; - // found it, not at the head; remove the node - if (Interlocked.CompareExchange(ref previous._next, Volatile.Read(ref current._next), current) == current) - { - return; // success - } - else - { - break; // exit the inner loop, and repeat the outer loop - } + break; // exit the inner loop, and repeat the outer loop } - previous = current; - current = Volatile.Read(ref previous._next); - } while (current != null); + } + + previous = current; + current = Volatile.Read(ref previous!._next); } - } while (found); + // format and validator disagree on newline... + while (current != null); + } } + // format and validator disagree on newline... + while (found); + } - internal static int Count(ref ChannelMessageQueue head) + internal static int Count(ref ChannelMessageQueue? head) + { + var current = Volatile.Read(ref head); + int count = 0; + while (current != null) { - var current = Volatile.Read(ref head); - int count = 0; - while (current != null) - { - count++; - current = Volatile.Read(ref current._next); - } - return count; + count++; + current = Volatile.Read(ref current._next); } - internal static void WriteAll(ref ChannelMessageQueue head, in RedisChannel channel, in RedisValue message) + return count; + } + + internal static void WriteAll(ref ChannelMessageQueue head, in RedisChannel channel, in RedisValue message) + { + var current = Volatile.Read(ref head); + while (current != null) { - var current = Volatile.Read(ref head); - while (current != null) - { - current.Write(channel, message); - current = Volatile.Read(ref current._next); - } + current.Write(channel, message); + current = Volatile.Read(ref current._next); } + } - private ChannelMessageQueue _next; + private ChannelMessageQueue? _next; - private async Task OnMessageAsyncImpl() + private async Task OnMessageAsyncImpl() + { + var handler = (Func?)_onMessageHandler; + while (!Completion.IsCompleted) { - var handler = (Func)_onMessageHandler; - while (!Completion.IsCompleted) + ChannelMessage next; + try { - ChannelMessage next; - try { if (!TryRead(out next)) next = await ReadAsync().ForAwait(); } - catch (ChannelClosedException) { break; } // expected - catch (Exception ex) - { - _parent.multiplexer?.OnInternalError(ex); - break; - } - - try - { - var task = handler(next); - if (task != null && task.Status != TaskStatus.RanToCompletion) await task.ForAwait(); - } - catch { } // matches MessageCompletable + if (!TryRead(out next)) next = await ReadAsync().ForAwait(); + } + catch (ChannelClosedException) { break; } // expected + catch (Exception ex) + { + _parent?.multiplexer?.OnInternalError(ex); + break; } - } - internal static void MarkAllCompleted(ref ChannelMessageQueue head) - { - var current = Interlocked.Exchange(ref head, null); - while (current != null) + try { - current.MarkCompleted(); - current = Volatile.Read(ref current._next); + var task = handler?.Invoke(next); + if (task != null && task.Status != TaskStatus.RanToCompletion) await task.ForAwait(); } + catch { } // matches MessageCompletable } + } - private void MarkCompleted(Exception error = null) + internal static void MarkAllCompleted(ref ChannelMessageQueue? head) + { + var current = Interlocked.Exchange(ref head, null); + while (current != null) { - _parent = null; - _queue.Writer.TryComplete(error); + current.MarkCompleted(); + current = Volatile.Read(ref current._next); } + } + + private void MarkCompleted(Exception? error = null) + { + _parent = null; + _queue.Writer.TryComplete(error); + } - internal void UnsubscribeImpl(Exception error = null, CommandFlags flags = CommandFlags.None) + internal void UnsubscribeImpl(Exception? error = null, CommandFlags flags = CommandFlags.None) + { + var parent = _parent; + _parent = null; + parent?.UnsubscribeAsync(Channel, null, this, flags); + _queue.Writer.TryComplete(error); + } + + internal async Task UnsubscribeAsyncImpl(Exception? error = null, CommandFlags flags = CommandFlags.None) + { + var parent = _parent; + _parent = null; + if (parent != null) { - var parent = _parent; - _parent = null; - if (parent != null) - { - parent.UnsubscribeAsync(Channel, null, this, flags); - } - _queue.Writer.TryComplete(error); + await parent.UnsubscribeAsync(Channel, null, this, flags).ForAwait(); } - internal async Task UnsubscribeAsyncImpl(Exception error = null, CommandFlags flags = CommandFlags.None) + _queue.Writer.TryComplete(error); + } + + /// + /// Stop receiving messages on this channel. + /// + /// The flags to use when unsubscribing. + public void Unsubscribe(CommandFlags flags = CommandFlags.None) => UnsubscribeImpl(null, flags); + + /// + /// Stop receiving messages on this channel. + /// + /// The flags to use when unsubscribing. + public Task UnsubscribeAsync(CommandFlags flags = CommandFlags.None) => UnsubscribeAsyncImpl(null, flags); + + /// +#if NET + public IAsyncEnumerator GetAsyncEnumerator(CancellationToken cancellationToken = default) + // ReSharper disable once MethodSupportsCancellation - provided in GetAsyncEnumerator + => _queue.Reader.ReadAllAsync().GetAsyncEnumerator(cancellationToken); +#else + public async IAsyncEnumerator GetAsyncEnumerator(CancellationToken cancellationToken = default) + { + while (await _queue.Reader.WaitToReadAsync(cancellationToken).ConfigureAwait(false)) { - var parent = _parent; - _parent = null; - if (parent != null) + while (_queue.Reader.TryRead(out var item)) { - await parent.UnsubscribeAsync(Channel, null, this, flags).ForAwait(); + yield return item; } - _queue.Writer.TryComplete(error); } - - /// - /// Stop receiving messages on this channel. - /// - /// The flags to use when unsubscribing. - public void Unsubscribe(CommandFlags flags = CommandFlags.None) => UnsubscribeImpl(null, flags); - - /// - /// Stop receiving messages on this channel. - /// - /// The flags to use when unsubscribing. - public Task UnsubscribeAsync(CommandFlags flags = CommandFlags.None) => UnsubscribeAsyncImpl(null, flags); } +#endif } diff --git a/src/StackExchange.Redis/ClientInfo.cs b/src/StackExchange.Redis/ClientInfo.cs index fdee896fa..d743affff 100644 --- a/src/StackExchange.Redis/ClientInfo.cs +++ b/src/StackExchange.Redis/ClientInfo.cs @@ -1,107 +1,170 @@ using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; using System.IO; using System.Net; namespace StackExchange.Redis { /// - /// Represents the state of an individual client connection to redis + /// Represents the state of an individual client connection to redis. /// public sealed class ClientInfo { internal static readonly ResultProcessor Processor = new ClientInfoProcessor(); /// - /// Address (host and port) of the client + /// Address (host and port) of the client. /// - public EndPoint Address { get; private set; } + public EndPoint? Address { get; private set; } /// - /// total duration of the connection in seconds + /// Total duration of the connection in seconds. /// public int AgeSeconds { get; private set; } /// - /// current database ID + /// Current database ID. /// public int Database { get; private set; } /// - /// The flags associated with this connection + /// The flags associated with this connection. /// public ClientFlags Flags { get; private set; } /// /// The client flags can be a combination of: - /// - /// A: connection to be closed ASAP - /// b: the client is waiting in a blocking operation - /// c: connection to be closed after writing entire reply - /// d: a watched keys has been modified - EXEC will fail - /// i: the client is waiting for a VM I/O (deprecated) - /// M: the client is a master - /// N: no specific flag set - /// O: the client is a replica in MONITOR mode - /// P: the client is a Pub/Sub subscriber - /// r: the client is in readonly mode against a cluster node - /// S: the client is a normal replica server - /// U: the client is connected via a Unix domain socket - /// x: the client is in a MULTI/EXEC context + /// + /// + /// A + /// Connection to be closed ASAP. + /// + /// + /// b + /// The client is waiting in a blocking operation. + /// + /// + /// c + /// Connection to be closed after writing entire reply. + /// + /// + /// d + /// A watched keys has been modified - EXEC will fail. + /// + /// + /// i + /// The client is waiting for a VM I/O (deprecated). + /// + /// + /// M + /// The client is a primary. + /// + /// + /// N + /// No specific flag set. + /// + /// + /// O + /// The client is a replica in MONITOR mode. + /// + /// + /// P + /// The client is a Pub/Sub subscriber. + /// + /// + /// r + /// The client is in readonly mode against a cluster node. + /// + /// + /// S + /// The client is a normal replica server. + /// + /// + /// u + /// The client is unblocked. + /// + /// + /// U + /// The client is unblocked. + /// + /// + /// x + /// The client is in a MULTI/EXEC context. + /// + /// + /// t + /// The client enabled keys tracking in order to perform client side caching. + /// + /// + /// R + /// The client tracking target client is invalid. + /// + /// + /// B + /// The client enabled broadcast tracking mode. + /// + /// /// - public string FlagsRaw { get; private set; } + /// + public string? FlagsRaw { get; private set; } /// - /// The host of the client (typically an IP address) + /// The host of the client (typically an IP address). /// - public string Host => Format.TryGetHostPort(Address, out string host, out _) ? host : null; + public string? Host => Format.TryGetHostPort(Address, out string? host, out _) ? host : null; /// - /// idle time of the connection in seconds + /// Idle time of the connection in seconds. /// public int IdleSeconds { get; private set; } /// - /// last command played + /// Last command played. /// - public string LastCommand { get; private set; } + public string? LastCommand { get; private set; } /// - /// The name allocated to this connection, if any + /// The name allocated to this connection, if any. /// - public string Name { get; private set; } + public string? Name { get; private set; } /// - /// number of pattern matching subscriptions + /// Number of pattern-matching subscriptions. /// public int PatternSubscriptionCount { get; private set; } /// - /// The port of the client + /// Number of sharded subscriptions. /// - public int Port => Format.TryGetHostPort(Address, out _, out int port) ? port : 0; + public int ShardedSubscriptionCount { get; private set; } /// - /// The raw content from redis + /// The port of the client. /// - public string Raw { get; private set; } + public int Port => Format.TryGetHostPort(Address, out _, out int? port) ? port.Value : 0; /// - /// number of channel subscriptions + /// The raw content from redis. + /// + public string? Raw { get; private set; } + + /// + /// Number of channel subscriptions. /// public int SubscriptionCount { get; private set; } /// - /// number of commands in a MULTI/EXEC context + /// Number of commands in a MULTI/EXEC context. /// public int TransactionCommandLength { get; private set; } /// - /// an unique 64-bit client ID (introduced in Redis 2.8.12). + /// A unique 64-bit client ID (introduced in Redis 2.8.12). /// - public long Id { get;private set; } + public long Id { get; private set; } /// - /// Format the object as a string + /// Format the object as a string. /// public override string ToString() { @@ -110,7 +173,7 @@ public override string ToString() } /// - /// The class of the connection + /// The class of the connection. /// public ClientType ClientType { @@ -122,19 +185,44 @@ public ClientType ClientType } } - internal static ClientInfo[] Parse(string input) + /// + /// Client RESP protocol version. Added in Redis 7.0. + /// + public string? ProtocolVersion { get; private set; } + + /// + /// Client RESP protocol version. Added in Redis 7.0. + /// + public RedisProtocol? Protocol => ConfigurationOptions.TryParseRedisProtocol(ProtocolVersion, out var value) ? value : null; + + /// + /// Client library name. Added in Redis 7.2. + /// + /// + public string? LibraryName { get; private set; } + + /// + /// Client library version. Added in Redis 7.2. + /// + /// + public string? LibraryVersion { get; private set; } + + internal static bool TryParse(string? input, [NotNullWhen(true)] out ClientInfo[]? clientList) { - if (input == null) return null; + if (input == null) + { + clientList = null; + return false; + } var clients = new List(); using (var reader = new StringReader(input)) { - string line; - while ((line = reader.ReadLine()) != null) + while (reader.ReadLine() is string line) { var client = new ClientInfo { - Raw = line + Raw = line, }; string[] tokens = line.Split(StringSplits.Space); for (int i = 0; i < tokens.Length; i++) @@ -146,13 +234,14 @@ internal static ClientInfo[] Parse(string input) switch (key) { - case "addr": client.Address = Format.TryParseEndPoint(value); break; + case "addr" when Format.TryParseEndPoint(value, out var addr): client.Address = addr; break; case "age": client.AgeSeconds = Format.ParseInt32(value); break; case "idle": client.IdleSeconds = Format.ParseInt32(value); break; case "db": client.Database = Format.ParseInt32(value); break; case "name": client.Name = value; break; case "sub": client.SubscriptionCount = Format.ParseInt32(value); break; case "psub": client.PatternSubscriptionCount = Format.ParseInt32(value); break; + case "ssub": client.ShardedSubscriptionCount = Format.ParseInt32(value); break; case "multi": client.TransactionCommandLength = Format.ParseInt32(value); break; case "cmd": client.LastCommand = value; break; case "flags": @@ -172,17 +261,25 @@ internal static ClientInfo[] Parse(string input) AddFlag(ref flags, value, ClientFlags.Unblocked, 'u'); AddFlag(ref flags, value, ClientFlags.UnixDomainSocket, 'U'); AddFlag(ref flags, value, ClientFlags.Transaction, 'x'); - + + AddFlag(ref flags, value, ClientFlags.KeysTracking, 't'); + AddFlag(ref flags, value, ClientFlags.TrackingTargetInvalid, 'R'); + AddFlag(ref flags, value, ClientFlags.BroadcastTracking, 'B'); + client.Flags = flags; break; case "id": client.Id = Format.ParseInt64(value); break; + case "resp": client.ProtocolVersion = value; break; + case "lib-name": client.LibraryName = value; break; + case "lib-ver": client.LibraryVersion = value; break; } } clients.Add(client); } } - return clients.ToArray(); + clientList = clients.ToArray(); + return true; } private static void AddFlag(ref ClientFlags value, string raw, ClientFlags toAdd, char token) @@ -190,18 +287,20 @@ private static void AddFlag(ref ClientFlags value, string raw, ClientFlags toAdd if (raw.IndexOf(token) >= 0) value |= toAdd; } - private class ClientInfoProcessor : ResultProcessor + private sealed class ClientInfoProcessor : ResultProcessor { protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { - switch(result.Type) + switch (result.Resp2TypeBulkString) { case ResultType.BulkString: - var raw = result.GetString(); - var clients = Parse(raw); - SetResult(message, clients); - return true; + if (TryParse(raw, out var clients)) + { + SetResult(message, clients); + return true; + } + break; } return false; } diff --git a/src/StackExchange.Redis/ClusterConfiguration.cs b/src/StackExchange.Redis/ClusterConfiguration.cs index 819697f25..084f7c639 100644 --- a/src/StackExchange.Redis/ClusterConfiguration.cs +++ b/src/StackExchange.Redis/ClusterConfiguration.cs @@ -9,14 +9,14 @@ namespace StackExchange.Redis { /// - /// Indicates a range of slots served by a cluster node + /// Indicates a range of slots served by a cluster node. /// public readonly struct SlotRange : IEquatable, IComparable, IComparable { private readonly short from, to; /// - /// Create a new SlotRange value + /// Create a new SlotRange value. /// /// The slot ID to start at. /// The slot ID to end at. @@ -34,18 +34,26 @@ private SlotRange(short from, short to) this.from = from; this.to = to; } + /// - /// The start of the range (inclusive) + /// The start of the range (inclusive). /// public int From => from; /// - /// The end of the range (inclusive) + /// The end of the range (inclusive). /// public int To => to; + internal bool IsSingleSlot => From == To; + + internal const int MinSlot = 0, MaxSlot = 16383; + + private static SlotRange[]? s_SharedAllSlots; + internal static SlotRange[] SharedAllSlots => s_SharedAllSlots ??= [new(MinSlot, MaxSlot)]; + /// - /// Indicates whether two ranges are not equal + /// Indicates whether two ranges are not equal. /// /// The first slot range. /// The second slot range. @@ -67,7 +75,7 @@ public static bool TryParse(string range, out SlotRange value) { if (string.IsNullOrWhiteSpace(range)) { - value = default(SlotRange); + value = default; return false; } int i = range.IndexOf('-'); @@ -88,12 +96,13 @@ public static bool TryParse(string range, out SlotRange value) return true; } } - value = default(SlotRange); + value = default; return false; } /// - /// Compares the current instance with another object of the same type and returns an integer that indicates whether the current instance precedes, follows, or occurs in the same position in the sort order as the other object. + /// Compares the current instance with another object of the same type and returns an integer that indicates + /// whether the current instance precedes, follows, or occurs in the same position in the sort order as the other object. /// /// The other slot range to compare to. public int CompareTo(SlotRange other) @@ -103,20 +112,18 @@ public int CompareTo(SlotRange other) } /// - /// See Object.Equals + /// See . /// /// The other slot range to compare to. - public override bool Equals(object obj) => obj is SlotRange sRange && Equals(sRange); + public override bool Equals(object? obj) => obj is SlotRange sRange && Equals(sRange); /// - /// Indicates whether two ranges are equal + /// Indicates whether two ranges are equal. /// /// The other slot range to compare to. public bool Equals(SlotRange other) => other.from == from && other.to == to; - /// - /// See Object.GetHashCode() - /// + /// public override int GetHashCode() { int x = from, y = to; // makes CS0675 a little happier @@ -124,7 +131,7 @@ public override int GetHashCode() } /// - /// See Object.ToString() + /// String representation ("{from}-{to}") of the range. /// public override string ToString() => from == to ? from.ToString() : (from + "-" + to); @@ -147,15 +154,15 @@ private static bool TryParseInt16(string s, int offset, int count, out short val } } - int IComparable.CompareTo(object obj) => obj is SlotRange sRange ? CompareTo(sRange) : -1; + int IComparable.CompareTo(object? obj) => obj is SlotRange sRange ? CompareTo(sRange) : -1; } /// - /// Describes the state of the cluster as reported by a single node + /// Describes the state of the cluster as reported by a single node. /// public sealed class ClusterConfiguration { - private readonly Dictionary nodeLookup = new Dictionary(); + private readonly Dictionary nodeLookup = new(); private readonly ServerSelectionStrategy serverSelectionStrategy; internal ClusterConfiguration(ServerSelectionStrategy serverSelectionStrategy, string nodes, EndPoint origin) @@ -165,14 +172,13 @@ internal ClusterConfiguration(ServerSelectionStrategy serverSelectionStrategy, s Origin = origin; using (var reader = new StringReader(nodes)) { - string line; - while ((line = reader.ReadLine()) != null) + while (reader.ReadLine() is string line) { if (string.IsNullOrWhiteSpace(line)) continue; var node = new ClusterNode(this, line, origin); - // Be resilient to ":0 {master,replica},fail,noaddr" nodes, and nodes where the endpoint doesn't parse - if (node.IsNoAddr || node.EndPoint == null) + // Be resilient to ":0 {primary,replica},fail,noaddr" nodes, and nodes where the endpoint doesn't parse + if (node.IsNoAddr || node.IsFail || node.EndPoint == null) continue; // Override the origin value with the endpoint advertised with the target node to @@ -181,7 +187,7 @@ internal ClusterConfiguration(ServerSelectionStrategy serverSelectionStrategy, s if (node.IsMyself) Origin = node.EndPoint; - if (nodeLookup.ContainsKey(node.EndPoint)) + if (nodeLookup.TryGetValue(node.EndPoint, out var lookedUpNode)) { // Deal with conflicting node entries for the same endpoint // This can happen in dynamic environments when a node goes down and a new one is created @@ -191,7 +197,7 @@ internal ClusterConfiguration(ServerSelectionStrategy serverSelectionStrategy, s // The node we're trying to add is probably about to become stale. Ignore it. continue; } - else if (!nodeLookup[node.EndPoint].IsConnected) + else if (!lookedUpNode.IsConnected) { // The node we registered previously is probably stale. Replace it with a known good node. nodeLookup[node.EndPoint] = node; @@ -212,25 +218,24 @@ internal ClusterConfiguration(ServerSelectionStrategy serverSelectionStrategy, s } /// - /// Gets all nodes contained in the configuration + /// Gets all nodes contained in the configuration. /// - /// public ICollection Nodes => nodeLookup.Values; /// - /// The node that was asked for the configuration + /// The node that was asked for the configuration. /// public EndPoint Origin { get; } /// - /// Obtain the node relating to a specified endpoint + /// Obtain the node relating to a specified endpoint. /// /// The endpoint to get a cluster node from. - public ClusterNode this[EndPoint endpoint] => endpoint == null + public ClusterNode? this[EndPoint endpoint] => endpoint == null ? null - : nodeLookup.TryGetValue(endpoint, out ClusterNode result) ? result : null; + : nodeLookup.TryGetValue(endpoint, out ClusterNode? result) ? result : null; - internal ClusterNode this[string nodeId] + internal ClusterNode? this[string nodeId] { get { @@ -247,9 +252,9 @@ internal ClusterNode this[string nodeId] /// Gets the node that serves the specified slot. /// /// The slot ID to get a node by. - public ClusterNode GetBySlot(int slot) + public ClusterNode? GetBySlot(int slot) { - foreach(var node in Nodes) + foreach (var node in Nodes) { if (!node.IsReplica && node.ServesSlot(slot)) return node; } @@ -260,28 +265,22 @@ public ClusterNode GetBySlot(int slot) /// Gets the node that serves the specified key's slot. /// /// The key to identify a node by. - public ClusterNode GetBySlot(RedisKey key) => GetBySlot(serverSelectionStrategy.HashSlot(key)); + public ClusterNode? GetBySlot(RedisKey key) => GetBySlot(serverSelectionStrategy.HashSlot(key)); } /// /// Represents the configuration of a single node in a cluster configuration. /// - public sealed class ClusterNode : IEquatable, IComparable, IComparable + /// + public sealed class ClusterNode : IEquatable, IComparable, IComparable { - private static readonly ClusterNode Dummy = new ClusterNode(); - private readonly ClusterConfiguration configuration; + private IList? children; + private ClusterNode? parent; + private string? toString; - private IList children; - - private ClusterNode parent; - - private string toString; - - internal ClusterNode() { } internal ClusterNode(ClusterConfiguration configuration, string raw, EndPoint origin) { - // https://redis.io/commands/cluster-nodes this.configuration = configuration; Raw = raw; var parts = raw.Split(StringSplits.Space); @@ -293,7 +292,10 @@ internal ClusterNode(ClusterConfiguration configuration, string raw, EndPoint or int at = ep.IndexOf('@'); if (at >= 0) ep = ep.Substring(0, at); - EndPoint = Format.TryParseEndPoint(ep); + if (Format.TryParseEndPoint(ep, out var epResult)) + { + EndPoint = epResult; + } if (flags.Contains("myself")) { IsMyself = true; @@ -306,11 +308,13 @@ internal ClusterNode(ClusterConfiguration configuration, string raw, EndPoint or } NodeId = parts[0]; + IsFail = flags.Contains("fail"); + IsPossiblyFail = flags.Contains("fail?"); IsReplica = flags.Contains("slave") || flags.Contains("replica"); IsNoAddr = flags.Contains("noaddr"); ParentNodeId = string.IsNullOrWhiteSpace(parts[3]) ? null : parts[3]; - List slots = null; + List? slots = null; for (int i = 8; i < parts.Length; i++) { @@ -322,16 +326,17 @@ internal ClusterNode(ClusterConfiguration configuration, string raw, EndPoint or Slots = slots?.AsReadOnly() ?? (IList)Array.Empty(); IsConnected = parts[7] == "connected"; // Can be "connected" or "disconnected" } + /// - /// Gets all child nodes of the current node + /// Gets all child nodes of the current node. /// public IList Children { get { - if (children != null) return children; + if (children is not null) return children; - List nodes = null; + List? nodes = null; foreach (var node in configuration.Nodes) { if (node.ParentNodeId == NodeId) @@ -345,81 +350,86 @@ public IList Children } /// - /// Gets the endpoint of the current node + /// Gets the endpoint of the current node. + /// + public EndPoint? EndPoint { get; } + + /// + /// Gets whether this node is in a failed state. /// - public EndPoint EndPoint { get; } + public bool IsFail { get; } /// - /// Gets whether this is the node which responded to the CLUSTER NODES request + /// Gets whether this node is possibly in a failed state. + /// Possibly here means the node we're getting status from can't communicate with it, but doesn't mean it's down for sure. + /// + public bool IsPossiblyFail { get; } + + /// + /// Gets whether this is the node which responded to the CLUSTER NODES request. /// public bool IsMyself { get; } /// - /// Gets whether this node is a replica + /// Gets whether this node is a replica. /// - [Obsolete("Starting with Redis version 5, Redis has moved to 'replica' terminology. Please use " + nameof(IsReplica) + " instead.")] + [Obsolete("Starting with Redis version 5, Redis has moved to 'replica' terminology. Please use " + nameof(IsReplica) + " instead, this will be removed in 3.0.")] [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] public bool IsSlave => IsReplica; + /// - /// Gets whether this node is a replica + /// Gets whether this node is a replica. /// public bool IsReplica { get; } /// - /// Gets whether this node is flagged as noaddr + /// Gets whether this node is flagged as noaddr. /// public bool IsNoAddr { get; } /// - /// Gets the node's connection status + /// Gets the node's connection status. /// public bool IsConnected { get; } /// - /// Gets the unique node-id of the current node + /// Gets the unique node-id of the current node. /// public string NodeId { get; } /// - /// Gets the parent node of the current node + /// Gets the parent node of the current node. /// - public ClusterNode Parent - { - get - { - if (parent != null) return parent == Dummy ? null : parent; - ClusterNode found = configuration[ParentNodeId]; - parent = found ?? Dummy; - return found; - } - } + public ClusterNode? Parent => (parent is not null) ? parent = configuration[ParentNodeId!] : null; /// - /// Gets the unique node-id of the parent of the current node + /// Gets the unique node-id of the parent of the current node. /// - public string ParentNodeId { get; } + public string? ParentNodeId { get; } /// - /// The configuration as reported by the server + /// The configuration as reported by the server. /// public string Raw { get; } /// - /// The slots owned by this server + /// The slots owned by this server. /// public IList Slots { get; } /// - /// Compares the current instance with another object of the same type and returns an integer that indicates whether the current instance precedes, follows, or occurs in the same position in the sort order as the other object. + /// Compares the current instance with another object of the same type and returns an integer that indicates + /// whether the current instance precedes, follows, or occurs in the same position in the sort order as the other object. /// /// The to compare to. - public int CompareTo(ClusterNode other) + public int CompareTo(ClusterNode? other) { if (other == null) return -1; - if (IsReplica != other.IsReplica) return IsReplica ? 1 : -1; // masters first + if (IsReplica != other.IsReplica) return IsReplica ? 1 : -1; // primaries first - if (IsReplica) // both replicas? compare by parent, so we get masters A, B, C and then replicas of A, B, C + // both replicas? compare by parent, so we get primaries A, B, C and then replicas of A, B, C + if (IsReplica) { int i = string.CompareOrdinal(ParentNodeId, other.ParentNodeId); if (i != 0) return i; @@ -428,51 +438,43 @@ public int CompareTo(ClusterNode other) } /// - /// See Object.Equals + /// See . /// /// The to compare to. - public override bool Equals(object obj) => Equals(obj as ClusterNode); + public override bool Equals(object? obj) => Equals(obj as ClusterNode); /// - /// Indicates whether two ClusterNode instances are equivalent + /// Indicates whether two instances are equivalent. /// /// The to compare to. - public bool Equals(ClusterNode other) - { - if (other == null) return false; - - return ToString() == other.ToString(); // lazy, but effective - plus only computes once - } + public bool Equals(ClusterNode? other) => other is ClusterNode node && ToString() == node.ToString(); - /// - /// See object.GetHashCode() - /// + /// public override int GetHashCode() => ToString().GetHashCode(); /// - /// See Object.ToString() + /// A string summary of this cluster configuration. /// public override string ToString() { - if (toString != null) return toString; + if (toString is not null) return toString; var sb = new StringBuilder().Append(NodeId).Append(" at ").Append(EndPoint); if (IsReplica) { sb.Append(", replica of ").Append(ParentNodeId); - var parent = Parent; - if (parent != null) sb.Append(" at ").Append(parent.EndPoint); + if (Parent is ClusterNode parent) sb.Append(" at ").Append(parent.EndPoint); } var childCount = Children.Count; - switch(childCount) + switch (childCount) { case 0: break; case 1: sb.Append(", 1 replica"); break; default: sb.Append(", ").Append(childCount).Append(" replicas"); break; } - if(Slots.Count != 0) + if (Slots.Count != 0) { sb.Append(", slots: "); - foreach(var slot in Slots) + foreach (var slot in Slots) { sb.Append(slot).Append(' '); } @@ -490,9 +492,6 @@ internal bool ServesSlot(int hashSlot) return false; } - int IComparable.CompareTo(object obj) - { - return CompareTo(obj as ClusterNode); - } + int IComparable.CompareTo(object? obj) => CompareTo(obj as ClusterNode); } } diff --git a/src/StackExchange.Redis/CommandBytes.cs b/src/StackExchange.Redis/CommandBytes.cs index 7579bc1cc..19a69549b 100644 --- a/src/StackExchange.Redis/CommandBytes.cs +++ b/src/StackExchange.Redis/CommandBytes.cs @@ -8,7 +8,7 @@ namespace StackExchange.Redis { private static Encoding Encoding => Encoding.UTF8; - internal unsafe static CommandBytes TrimToFit(string value) + internal static unsafe CommandBytes TrimToFit(string value) { if (string.IsNullOrWhiteSpace(value)) return default; value = value.Trim(); @@ -46,14 +46,14 @@ public override int GetHashCode() hashCode = (hashCode * -1521134295) + _3.GetHashCode(); return hashCode; } - public override bool Equals(object obj) => obj is CommandBytes cb && Equals(cb); + + public override bool Equals(object? obj) => obj is CommandBytes cb && Equals(cb); bool IEquatable.Equals(CommandBytes other) => _0 == other._0 && _1 == other._1 && _2 == other._2 && _3 == other._3; public bool Equals(in CommandBytes other) => _0 == other._0 && _1 == other._1 && _2 == other._2 && _3 == other._3; // note: don't add == operators; with the implicit op above, that invalidates "==null" compiler checks (which should report a failure!) - public static implicit operator CommandBytes(string value) => new CommandBytes(value); public override unsafe string ToString() @@ -78,9 +78,7 @@ public unsafe int Length public bool IsEmpty => _0 == 0L; // cheap way of checking zero length -#pragma warning disable RCS1231 // Make parameter ref read-only. - spans are tiny! public unsafe void CopyTo(Span target) -#pragma warning restore RCS1231 // Make parameter ref read-only. { fixed (ulong* uPtr = &_0) { @@ -88,6 +86,7 @@ public unsafe void CopyTo(Span target) new Span(bPtr + 1, *bPtr).CopyTo(target); } } + public unsafe byte this[int index] { get @@ -102,10 +101,10 @@ public unsafe byte this[int index] } } - public unsafe CommandBytes(string value) + public unsafe CommandBytes(string? value) { _0 = _1 = _2 = _3 = 0L; - if (string.IsNullOrEmpty(value)) return; + if (value.IsNullOrEmpty()) return; var len = Encoding.GetByteCount(value); if (len > MaxLength) throw new ArgumentOutOfRangeException($"Command '{value}' exceeds library limit of {MaxLength} bytes"); @@ -120,11 +119,9 @@ public unsafe CommandBytes(string value) } } -#pragma warning disable RCS1231 // Make parameter ref read-only. - spans are tiny! public unsafe CommandBytes(ReadOnlySpan value) -#pragma warning restore RCS1231 // Make parameter ref read-only. { - if (value.Length > MaxLength) throw new ArgumentOutOfRangeException("Maximum command length exceeed: " + value.Length + " bytes"); + if (value.Length > MaxLength) throw new ArgumentOutOfRangeException("Maximum command length exceeded: " + value.Length + " bytes"); _0 = _1 = _2 = _3 = 0L; fixed (ulong* uPtr = &_0) { @@ -136,7 +133,7 @@ public unsafe CommandBytes(ReadOnlySpan value) public unsafe CommandBytes(in ReadOnlySequence value) { - if (value.Length > MaxLength) throw new ArgumentOutOfRangeException("Maximum command length exceeed"); + if (value.Length > MaxLength) throw new ArgumentOutOfRangeException(nameof(value), "Maximum command length exceeded"); int len = unchecked((int)value.Length); _0 = _1 = _2 = _3 = 0L; fixed (ulong* uPtr = &_0) @@ -164,7 +161,7 @@ private unsafe int UpperCasify(int len, byte* bPtr) const ulong HighBits = 0x8080808080808080; if (((_0 | _1 | _2 | _3) & HighBits) == 0) { - // no unicode; use ASCII bit bricks + // no Unicode; use ASCII bit bricks for (int i = 0; i < len; i++) { *bPtr = ToUpperInvariantAscii(*bPtr++); @@ -183,10 +180,16 @@ private static unsafe int UpperCasifyUnicode(int oldLen, byte* bPtr) char* workspace = stackalloc char[MaxChars]; int charCount = Encoding.GetChars(bPtr, oldLen, workspace, MaxChars); char* c = workspace; - for (int i = 0; i < charCount; i++) *c = char.ToUpperInvariant(*c++); + for (int i = 0; i < charCount; i++) + { + *c = char.ToUpperInvariant(*c++); + } int newLen = Encoding.GetBytes(workspace, charCount, bPtr, MaxLength); // don't forget to zero any shrink - for (int i = newLen; i < oldLen; i++) bPtr[i] = 0; + for (int i = newLen; i < oldLen; i++) + { + bPtr[i] = 0; + } return newLen; } diff --git a/src/StackExchange.Redis/CommandMap.cs b/src/StackExchange.Redis/CommandMap.cs index 78bd5fbfe..683e51219 100644 --- a/src/StackExchange.Redis/CommandMap.cs +++ b/src/StackExchange.Redis/CommandMap.cs @@ -5,80 +5,106 @@ namespace StackExchange.Redis { /// - /// Represents the commands mapped on a particular configuration + /// Represents the commands mapped on a particular configuration. /// public sealed class CommandMap { private readonly CommandBytes[] map; - internal CommandMap(CommandBytes[] map) - { - this.map = map; - } + internal CommandMap(CommandBytes[] map) => this.map = map; + /// - /// The default commands specified by redis + /// The default commands specified by redis. /// public static CommandMap Default { get; } = CreateImpl(null, null); /// - /// The commands available to https://github.com/twitter/twemproxy + /// The commands available to twemproxy. /// - /// https://github.com/twitter/twemproxy/blob/master/notes/redis.md + /// public static CommandMap Twemproxy { get; } = CreateImpl(null, exclusions: new HashSet { - // see https://github.com/twitter/twemproxy/blob/master/notes/redis.md RedisCommand.KEYS, RedisCommand.MIGRATE, RedisCommand.MOVE, RedisCommand.OBJECT, RedisCommand.RANDOMKEY, - RedisCommand.RENAME, RedisCommand.RENAMENX, RedisCommand.SORT, RedisCommand.SCAN, + RedisCommand.RENAME, RedisCommand.RENAMENX, RedisCommand.SCAN, - RedisCommand.BITOP, RedisCommand.MSET, RedisCommand.MSETNX, - - RedisCommand.HSCAN, + RedisCommand.BITOP, RedisCommand.MSETEX, RedisCommand.MSETNX, RedisCommand.BLPOP, RedisCommand.BRPOP, RedisCommand.BRPOPLPUSH, // yeah, me neither! - RedisCommand.SSCAN, + RedisCommand.PSUBSCRIBE, RedisCommand.PUBLISH, RedisCommand.PUNSUBSCRIBE, RedisCommand.SUBSCRIBE, RedisCommand.UNSUBSCRIBE, RedisCommand.SPUBLISH, RedisCommand.SSUBSCRIBE, RedisCommand.SUNSUBSCRIBE, + + RedisCommand.DISCARD, RedisCommand.EXEC, RedisCommand.MULTI, RedisCommand.UNWATCH, RedisCommand.WATCH, - RedisCommand.ZSCAN, + RedisCommand.SCRIPT, - RedisCommand.PSUBSCRIBE, RedisCommand.PUBLISH, RedisCommand.PUNSUBSCRIBE, RedisCommand.SUBSCRIBE, RedisCommand.UNSUBSCRIBE, + RedisCommand.ECHO, RedisCommand.SELECT, - RedisCommand.DISCARD, RedisCommand.EXEC, RedisCommand.MULTI, RedisCommand.UNWATCH, RedisCommand.WATCH, + RedisCommand.BGREWRITEAOF, RedisCommand.BGSAVE, RedisCommand.CLIENT, RedisCommand.CLUSTER, RedisCommand.CONFIG, RedisCommand.DBSIZE, + RedisCommand.DEBUG, RedisCommand.FLUSHALL, RedisCommand.FLUSHDB, RedisCommand.INFO, RedisCommand.LASTSAVE, RedisCommand.MONITOR, RedisCommand.REPLICAOF, + RedisCommand.SAVE, RedisCommand.SHUTDOWN, RedisCommand.SLAVEOF, RedisCommand.SLOWLOG, RedisCommand.SYNC, RedisCommand.TIME, RedisCommand.HOTKEYS, + }); + + /// + /// The commands available to envoyproxy. + /// + /// + public static CommandMap Envoyproxy { get; } = CreateImpl(null, exclusions: new HashSet + { + RedisCommand.KEYS, RedisCommand.MIGRATE, RedisCommand.MOVE, RedisCommand.OBJECT, RedisCommand.RANDOMKEY, + RedisCommand.RENAME, RedisCommand.RENAMENX, RedisCommand.SORT, RedisCommand.SCAN, + + RedisCommand.BITOP, RedisCommand.MSETEX, RedisCommand.MSETNX, + + RedisCommand.BLPOP, RedisCommand.BRPOP, RedisCommand.BRPOPLPUSH, // yeah, me neither! + + RedisCommand.PSUBSCRIBE, RedisCommand.PUBLISH, RedisCommand.PUNSUBSCRIBE, RedisCommand.SUBSCRIBE, RedisCommand.UNSUBSCRIBE, RedisCommand.SPUBLISH, RedisCommand.SSUBSCRIBE, RedisCommand.SUNSUBSCRIBE, RedisCommand.SCRIPT, - RedisCommand.ECHO, RedisCommand.PING, RedisCommand.QUIT, RedisCommand.SELECT, + RedisCommand.SELECT, RedisCommand.BGREWRITEAOF, RedisCommand.BGSAVE, RedisCommand.CLIENT, RedisCommand.CLUSTER, RedisCommand.CONFIG, RedisCommand.DBSIZE, RedisCommand.DEBUG, RedisCommand.FLUSHALL, RedisCommand.FLUSHDB, RedisCommand.INFO, RedisCommand.LASTSAVE, RedisCommand.MONITOR, RedisCommand.REPLICAOF, - RedisCommand.SAVE, RedisCommand.SHUTDOWN, RedisCommand.SLAVEOF, RedisCommand.SLOWLOG, RedisCommand.SYNC, RedisCommand.TIME + RedisCommand.SAVE, RedisCommand.SHUTDOWN, RedisCommand.SLAVEOF, RedisCommand.SLOWLOG, RedisCommand.SYNC, RedisCommand.TIME, RedisCommand.HOTKEYS, + + // supported by envoy but not enabled by stack exchange + // RedisCommand.BITFIELD, + // + // RedisCommand.GEORADIUS_RO, + // RedisCommand.GEORADIUSBYMEMBER_RO, }); /// - /// The commands available to http://www.ideawu.com/ssdb/ + /// The commands available to SSDB. /// - /// http://www.ideawu.com/ssdb/docs/redis-to-ssdb.html - public static CommandMap SSDB { get; } = Create(new HashSet { - // see http://www.ideawu.com/ssdb/docs/redis-to-ssdb.html - "ping", - "get", "set", "del", "incr", "incrby", "mget", "mset", "keys", "getset", "setnx", - "hget", "hset", "hdel", "hincrby", "hkeys", "hvals", "hmget", "hmset", "hlen", - "zscore", "zadd", "zrem", "zrange", "zrangebyscore", "zincrby", "zdecrby", "zcard", - "llen", "lpush", "rpush", "lpop", "rpop", "lrange", "lindex" - }, true); + /// + public static CommandMap SSDB { get; } = Create( + new HashSet + { + "ping", + "get", "set", "del", "incr", "incrby", "mget", "mset", "keys", "getset", "setnx", + "hget", "hset", "hdel", "hincrby", "hkeys", "hvals", "hmget", "hmset", "hlen", + "zscore", "zadd", "zrem", "zrange", "zrangebyscore", "zincrby", "zdecrby", "zcard", + "llen", "lpush", "rpush", "lpop", "rpop", "lrange", "lindex", + }, + true); /// - /// The commands available to https://redis.io/topics/sentinel + /// The commands available to Sentinel. /// - /// https://redis.io/topics/sentinel - public static CommandMap Sentinel { get; } = Create(new HashSet { - // see https://redis.io/topics/sentinel - "auth", "ping", "info", "role", "sentinel", "subscribe", "shutdown", "psubscribe", "unsubscribe", "punsubscribe" }, true); + /// + public static CommandMap Sentinel { get; } = Create( + new HashSet + { + "auth", "hello", "ping", "info", "role", "sentinel", "subscribe", "shutdown", "psubscribe", "unsubscribe", "punsubscribe", + }, + true); /// - /// Create a new CommandMap, customizing some commands + /// Create a new , customizing some commands. /// /// The commands to override. - public static CommandMap Create(Dictionary overrides) + public static CommandMap Create(Dictionary? overrides) { if (overrides == null || overrides.Count == 0) return Default; @@ -90,13 +116,13 @@ public static CommandMap Create(Dictionary overrides) else { // need case insensitive - overrides = new Dictionary(overrides, StringComparer.OrdinalIgnoreCase); + overrides = new Dictionary(overrides, StringComparer.OrdinalIgnoreCase); } return CreateImpl(overrides, null); } /// - /// Creates a CommandMap by specifying which commands are available or unavailable + /// Creates a by specifying which commands are available or unavailable. /// /// The commands to specify. /// Whether the commands are available or excluded. @@ -104,9 +130,9 @@ public static CommandMap Create(HashSet commands, bool available = true) { if (available) { - var dictionary = new Dictionary(StringComparer.OrdinalIgnoreCase); + var dictionary = new Dictionary(StringComparer.OrdinalIgnoreCase); // nix everything - foreach (RedisCommand command in Enum.GetValues(typeof(RedisCommand))) + foreach (RedisCommand command in (RedisCommand[])Enum.GetValues(typeof(RedisCommand))) { dictionary[command.ToString()] = null; } @@ -122,7 +148,7 @@ public static CommandMap Create(HashSet commands, bool available = true) } else { - HashSet exclusions = null; + HashSet? exclusions = null; if (commands != null) { // nix the things that are specified @@ -130,7 +156,7 @@ public static CommandMap Create(HashSet commands, bool available = true) { if (Enum.TryParse(command, true, out RedisCommand parsed)) { - (exclusions ?? (exclusions = new HashSet())).Add(parsed); + (exclusions ??= new HashSet()).Add(parsed); } } } @@ -140,7 +166,7 @@ public static CommandMap Create(HashSet commands, bool available = true) } /// - /// See Object.ToString() + /// See . /// public override string ToString() { @@ -174,8 +200,9 @@ internal void AssertAvailable(RedisCommand command) internal CommandBytes GetBytes(string command) { if (command == null) return default; - if(Enum.TryParse(command, true, out RedisCommand cmd)) - { // we know that one! + if (Enum.TryParse(command, true, out RedisCommand cmd)) + { + // we know that one! return map[(int)cmd]; } return new CommandBytes(command); @@ -183,16 +210,15 @@ internal CommandBytes GetBytes(string command) internal bool IsAvailable(RedisCommand command) => !map[(int)command].IsEmpty; - private static CommandMap CreateImpl(Dictionary caseInsensitiveOverrides, HashSet exclusions) + private static CommandMap CreateImpl(Dictionary? caseInsensitiveOverrides, HashSet? exclusions) { var commands = (RedisCommand[])Enum.GetValues(typeof(RedisCommand)); var map = new CommandBytes[commands.Length]; - bool haveDelta = false; for (int i = 0; i < commands.Length; i++) { int idx = (int)commands[i]; - string name = commands[i].ToString(), value = name; + string? name = commands[i].ToString(), value = name; if (exclusions?.Contains(commands[i]) == true) { @@ -200,18 +226,13 @@ private static CommandMap CreateImpl(Dictionary caseInsensitiveO } else { - if (caseInsensitiveOverrides != null && caseInsensitiveOverrides.TryGetValue(name, out string tmp)) + if (caseInsensitiveOverrides != null && caseInsensitiveOverrides.TryGetValue(name, out string? tmp)) { value = tmp; } - if (value != name) haveDelta = true; - // TODO: bug? - haveDelta = true; map[idx] = new CommandBytes(value); } } - if (!haveDelta && Default != null) return Default; - return new CommandMap(map); } } diff --git a/src/StackExchange.Redis/CommandTrace.cs b/src/StackExchange.Redis/CommandTrace.cs index de3cd1849..a61499f0c 100644 --- a/src/StackExchange.Redis/CommandTrace.cs +++ b/src/StackExchange.Redis/CommandTrace.cs @@ -3,8 +3,8 @@ namespace StackExchange.Redis { /// - /// Represents the information known about long-running commands - /// + /// Represents the information known about long-running commands. + /// public sealed class CommandTrace { internal static readonly ResultProcessor Processor = new CommandTraceProcessor(); @@ -14,7 +14,7 @@ internal CommandTrace(long uniqueId, long time, long duration, RedisValue[] argu UniqueId = uniqueId; Time = RedisBase.UnixEpoch.AddSeconds(time); // duration = The amount of time needed for its execution, in microseconds. - // A tick is equal to 100 nanoseconds, or one ten-millionth of a second. + // A tick is equal to 100 nanoseconds, or one ten-millionth of a second. // So 1 microsecond = 10 ticks Duration = TimeSpan.FromTicks(duration * 10); Arguments = arguments; @@ -26,7 +26,7 @@ internal CommandTrace(long uniqueId, long time, long duration, RedisValue[] argu public RedisValue[] Arguments { get; } /// - /// The amount of time needed for its execution + /// The amount of time needed for its execution. /// public TimeSpan Duration { get; } @@ -42,15 +42,15 @@ internal CommandTrace(long uniqueId, long time, long duration, RedisValue[] argu public long UniqueId { get; } /// - /// Deduces a link to the redis documentation about the specified command + /// Deduces a link to the redis documentation about the specified command. /// - public string GetHelpUrl() + public string? GetHelpUrl() { if (Arguments == null || Arguments.Length == 0) return null; const string BaseUrl = "https://redis.io/commands/"; - string encoded0 = Uri.EscapeUriString(((string)Arguments[0]).ToLowerInvariant()); + string encoded0 = Uri.EscapeDataString(((string)Arguments[0]!).ToLowerInvariant()); if (Arguments.Length > 1) { @@ -62,29 +62,29 @@ public string GetHelpUrl() case "config": case "debug": case "pubsub": - string encoded1 = Uri.EscapeUriString(((string)Arguments[1]).ToLowerInvariant()); + string encoded1 = Uri.EscapeDataString(((string)Arguments[1]!).ToLowerInvariant()); return BaseUrl + encoded0 + "-" + encoded1; } } return BaseUrl + encoded0; } - private class CommandTraceProcessor : ResultProcessor + private sealed class CommandTraceProcessor : ResultProcessor { protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { - switch(result.Type) + switch (result.Resp2TypeArray) { - case ResultType.MultiBulk: + case ResultType.Array: var parts = result.GetItems(); CommandTrace[] arr = new CommandTrace[parts.Length]; int i = 0; - foreach(var item in parts) + foreach (var item in parts) { var subParts = item.GetItems(); if (!subParts[0].TryGetInt64(out long uniqueid) || !subParts[1].TryGetInt64(out long time) || !subParts[2].TryGetInt64(out long duration)) return false; - arr[i++] = new CommandTrace(uniqueid, time, duration, subParts[3].GetItemsAsValues()); + arr[i++] = new CommandTrace(uniqueid, time, duration, subParts[3].GetItemsAsValues()!); } SetResult(message, arr); return true; diff --git a/src/StackExchange.Redis/CompletedDefaultTask.cs b/src/StackExchange.Redis/CompletedDefaultTask.cs index feb35877c..1035cb6a8 100644 --- a/src/StackExchange.Redis/CompletedDefaultTask.cs +++ b/src/StackExchange.Redis/CompletedDefaultTask.cs @@ -4,11 +4,21 @@ namespace StackExchange.Redis { internal static class CompletedTask { - private static readonly Task defaultTask = FromResult(default(T), null); + private static readonly Task defaultTask = FromResult(default(T), null); - public static Task Default(object asyncState) => asyncState == null ? defaultTask : FromResult(default(T), asyncState); + public static Task Default(object? asyncState) => asyncState == null ? defaultTask : FromResult(default(T), asyncState); - public static Task FromResult(T value, object asyncState) + public static Task FromResult(T? value, object? asyncState) + { + if (asyncState == null) return Task.FromResult(value); + // note we do not need to deny exec-sync here; the value will be known + // before we hand it to them + var tcs = TaskSource.Create(asyncState); + tcs.SetResult(value); + return tcs.Task; + } + + public static Task FromDefault(T value, object? asyncState) { if (asyncState == null) return Task.FromResult(value); // note we do not need to deny exec-sync here; the value will be known diff --git a/src/StackExchange.Redis/Condition.cs b/src/StackExchange.Redis/Condition.cs index d21f9bc58..ec7ee53b6 100644 --- a/src/StackExchange.Redis/Condition.cs +++ b/src/StackExchange.Redis/Condition.cs @@ -1,12 +1,10 @@ using System; using System.Collections.Generic; -#pragma warning disable RCS1231 - namespace StackExchange.Redis { /// - /// Describes a pre-condition used in a redis transaction + /// Describes a precondition used in a redis transaction. /// public abstract class Condition { @@ -69,13 +67,13 @@ public static Condition HashNotExists(RedisKey key, RedisValue hashField) public static Condition KeyExists(RedisKey key) => new ExistsCondition(key, RedisType.None, RedisValue.Null, true); /// - /// Enforces that the given key must not exist + /// Enforces that the given key must not exist. /// /// The key that must not exist. public static Condition KeyNotExists(RedisKey key) => new ExistsCondition(key, RedisType.None, RedisValue.Null, false); /// - /// Enforces that the given list index must have the specified value + /// Enforces that the given list index must have the specified value. /// /// The key of the list to check. /// The position in the list to check. @@ -83,14 +81,14 @@ public static Condition HashNotExists(RedisKey key, RedisValue hashField) public static Condition ListIndexEqual(RedisKey key, long index, RedisValue value) => new ListCondition(key, index, true, value); /// - /// Enforces that the given list index must exist + /// Enforces that the given list index must exist. /// /// The key of the list to check. /// The position in the list that must exist. public static Condition ListIndexExists(RedisKey key, long index) => new ListCondition(key, index, true, null); /// - /// Enforces that the given list index must not have the specified value + /// Enforces that the given list index must not have the specified value. /// /// The key of the list to check. /// The position in the list to check. @@ -98,14 +96,14 @@ public static Condition HashNotExists(RedisKey key, RedisValue hashField) public static Condition ListIndexNotEqual(RedisKey key, long index, RedisValue value) => new ListCondition(key, index, false, value); /// - /// Enforces that the given list index must not exist + /// Enforces that the given list index must not exist. /// /// The key of the list to check. /// The position in the list that must not exist. public static Condition ListIndexNotExists(RedisKey key, long index) => new ListCondition(key, index, false, null); /// - /// Enforces that the given key must have the specified value + /// Enforces that the given key must have the specified value. /// /// The key to check. /// The value that must match. @@ -116,7 +114,7 @@ public static Condition StringEqual(RedisKey key, RedisValue value) } /// - /// Enforces that the given key must not have the specified value + /// Enforces that the given key must not have the specified value. /// /// The key to check. /// The value that must not match. @@ -127,112 +125,112 @@ public static Condition StringNotEqual(RedisKey key, RedisValue value) } /// - /// Enforces that the given hash length is a certain value + /// Enforces that the given hash length is a certain value. /// /// The key of the hash to check. /// The length the hash must have. public static Condition HashLengthEqual(RedisKey key, long length) => new LengthCondition(key, RedisType.Hash, 0, length); /// - /// Enforces that the given hash length is less than a certain value + /// Enforces that the given hash length is less than a certain value. /// /// The key of the hash to check. /// The length the hash must be less than. public static Condition HashLengthLessThan(RedisKey key, long length) => new LengthCondition(key, RedisType.Hash, 1, length); /// - /// Enforces that the given hash length is greater than a certain value + /// Enforces that the given hash length is greater than a certain value. /// /// The key of the hash to check. /// The length the hash must be greater than. public static Condition HashLengthGreaterThan(RedisKey key, long length) => new LengthCondition(key, RedisType.Hash, -1, length); /// - /// Enforces that the given string length is a certain value + /// Enforces that the given string length is a certain value. /// /// The key of the string to check. /// The length the string must be equal to. public static Condition StringLengthEqual(RedisKey key, long length) => new LengthCondition(key, RedisType.String, 0, length); /// - /// Enforces that the given string length is less than a certain value + /// Enforces that the given string length is less than a certain value. /// /// The key of the string to check. /// The length the string must be less than. public static Condition StringLengthLessThan(RedisKey key, long length) => new LengthCondition(key, RedisType.String, 1, length); /// - /// Enforces that the given string length is greater than a certain value + /// Enforces that the given string length is greater than a certain value. /// /// The key of the string to check. /// The length the string must be greater than. public static Condition StringLengthGreaterThan(RedisKey key, long length) => new LengthCondition(key, RedisType.String, -1, length); /// - /// Enforces that the given list length is a certain value + /// Enforces that the given list length is a certain value. /// /// The key of the list to check. /// The length the list must be equal to. public static Condition ListLengthEqual(RedisKey key, long length) => new LengthCondition(key, RedisType.List, 0, length); /// - /// Enforces that the given list length is less than a certain value + /// Enforces that the given list length is less than a certain value. /// /// The key of the list to check. /// The length the list must be less than. public static Condition ListLengthLessThan(RedisKey key, long length) => new LengthCondition(key, RedisType.List, 1, length); /// - /// Enforces that the given list length is greater than a certain value + /// Enforces that the given list length is greater than a certain value. /// /// The key of the list to check. /// The length the list must be greater than. public static Condition ListLengthGreaterThan(RedisKey key, long length) => new LengthCondition(key, RedisType.List, -1, length); /// - /// Enforces that the given set cardinality is a certain value + /// Enforces that the given set cardinality is a certain value. /// /// The key of the set to check. /// The length the set must be equal to. public static Condition SetLengthEqual(RedisKey key, long length) => new LengthCondition(key, RedisType.Set, 0, length); /// - /// Enforces that the given set cardinality is less than a certain value + /// Enforces that the given set cardinality is less than a certain value. /// /// The key of the set to check. /// The length the set must be less than. public static Condition SetLengthLessThan(RedisKey key, long length) => new LengthCondition(key, RedisType.Set, 1, length); /// - /// Enforces that the given set cardinality is greater than a certain value + /// Enforces that the given set cardinality is greater than a certain value. /// /// The key of the set to check. /// The length the set must be greater than. public static Condition SetLengthGreaterThan(RedisKey key, long length) => new LengthCondition(key, RedisType.Set, -1, length); /// - /// Enforces that the given set contains a certain member + /// Enforces that the given set contains a certain member. /// /// The key of the set to check. /// The member the set must contain. public static Condition SetContains(RedisKey key, RedisValue member) => new ExistsCondition(key, RedisType.Set, member, true); /// - /// Enforces that the given set does not contain a certain member + /// Enforces that the given set does not contain a certain member. /// /// The key of the set to check. /// The member the set must not contain. public static Condition SetNotContains(RedisKey key, RedisValue member) => new ExistsCondition(key, RedisType.Set, member, false); /// - /// Enforces that the given sorted set cardinality is a certain value + /// Enforces that the given sorted set cardinality is a certain value. /// /// The key of the sorted set to check. /// The length the sorted set must be equal to. public static Condition SortedSetLengthEqual(RedisKey key, long length) => new LengthCondition(key, RedisType.SortedSet, 0, length); /// - /// Enforces that the given sorted set contains a certain number of members with scores in the given range + /// Enforces that the given sorted set contains a certain number of members with scores in the given range. /// /// The key of the sorted set to check. /// The length the sorted set must be equal to. @@ -241,14 +239,14 @@ public static Condition StringNotEqual(RedisKey key, RedisValue value) public static Condition SortedSetLengthEqual(RedisKey key, long length, double min = double.NegativeInfinity, double max = double.PositiveInfinity) => new SortedSetRangeLengthCondition(key, min, max, 0, length); /// - /// Enforces that the given sorted set cardinality is less than a certain value + /// Enforces that the given sorted set cardinality is less than a certain value. /// /// The key of the sorted set to check. /// The length the sorted set must be less than. public static Condition SortedSetLengthLessThan(RedisKey key, long length) => new LengthCondition(key, RedisType.SortedSet, 1, length); /// - /// Enforces that the given sorted set contains less than a certain number of members with scores in the given range + /// Enforces that the given sorted set contains less than a certain number of members with scores in the given range. /// /// The key of the sorted set to check. /// The length the sorted set must be equal to. @@ -257,14 +255,14 @@ public static Condition StringNotEqual(RedisKey key, RedisValue value) public static Condition SortedSetLengthLessThan(RedisKey key, long length, double min = double.NegativeInfinity, double max = double.PositiveInfinity) => new SortedSetRangeLengthCondition(key, min, max, 1, length); /// - /// Enforces that the given sorted set cardinality is greater than a certain value + /// Enforces that the given sorted set cardinality is greater than a certain value. /// /// The key of the sorted set to check. /// The length the sorted set must be greater than. public static Condition SortedSetLengthGreaterThan(RedisKey key, long length) => new LengthCondition(key, RedisType.SortedSet, -1, length); /// - /// Enforces that the given sorted set contains more than a certain number of members with scores in the given range + /// Enforces that the given sorted set contains more than a certain number of members with scores in the given range. /// /// The key of the sorted set to check. /// The length the sorted set must be equal to. @@ -273,19 +271,33 @@ public static Condition StringNotEqual(RedisKey key, RedisValue value) public static Condition SortedSetLengthGreaterThan(RedisKey key, long length, double min = double.NegativeInfinity, double max = double.PositiveInfinity) => new SortedSetRangeLengthCondition(key, min, max, -1, length); /// - /// Enforces that the given sorted set contains a certain member + /// Enforces that the given sorted set contains a certain member. /// /// The key of the sorted set to check. /// The member the sorted set must contain. public static Condition SortedSetContains(RedisKey key, RedisValue member) => new ExistsCondition(key, RedisType.SortedSet, member, true); /// - /// Enforces that the given sorted set does not contain a certain member + /// Enforces that the given sorted set does not contain a certain member. /// /// The key of the sorted set to check. /// The member the sorted set must not contain. public static Condition SortedSetNotContains(RedisKey key, RedisValue member) => new ExistsCondition(key, RedisType.SortedSet, member, false); + /// + /// Enforces that the given sorted set contains a member that starts with the specified prefix. + /// + /// The key of the sorted set to check. + /// The sorted set must contain at least one member that starts with the specified prefix. + public static Condition SortedSetContainsStarting(RedisKey key, RedisValue prefix) => new StartsWithCondition(key, prefix, true); + + /// + /// Enforces that the given sorted set does not contain a member that starts with the specified prefix. + /// + /// The key of the sorted set to check. + /// The sorted set must not contain at a member that starts with the specified prefix. + public static Condition SortedSetNotContainsStarting(RedisKey key, RedisValue prefix) => new StartsWithCondition(key, prefix, false); + /// /// Enforces that the given sorted set member must have the specified score. /// @@ -332,31 +344,50 @@ public static Condition StringNotEqual(RedisKey key, RedisValue value) /// The number of members which sorted set must not have. public static Condition SortedSetScoreNotExists(RedisKey key, RedisValue score, RedisValue count) => new SortedSetScoreCondition(key, score, false, count); + /// + /// Enforces that the given stream length is a certain value. + /// + /// The key of the stream to check. + /// The length the stream must have. + public static Condition StreamLengthEqual(RedisKey key, long length) => new LengthCondition(key, RedisType.Stream, 0, length); + + /// + /// Enforces that the given stream length is less than a certain value. + /// + /// The key of the stream to check. + /// The length the stream must be less than. + public static Condition StreamLengthLessThan(RedisKey key, long length) => new LengthCondition(key, RedisType.Stream, 1, length); + + /// + /// Enforces that the given stream length is greater than a certain value. + /// + /// The key of the stream to check. + /// The length the stream must be greater than. + public static Condition StreamLengthGreaterThan(RedisKey key, long length) => new LengthCondition(key, RedisType.Stream, -1, length); + #pragma warning restore RCS1231 internal abstract void CheckCommands(CommandMap commandMap); - internal abstract IEnumerable CreateMessages(int db, IResultBox resultBox); + internal abstract IEnumerable CreateMessages(int db, IResultBox? resultBox); internal abstract int GetHashSlot(ServerSelectionStrategy serverSelectionStrategy); internal abstract bool TryValidate(in RawResult result, out bool value); internal sealed class ConditionProcessor : ResultProcessor { - public static readonly ConditionProcessor Default = new ConditionProcessor(); + public static readonly ConditionProcessor Default = new(); -#pragma warning disable RCS1231 // Make parameter ref read-only. - public static Message CreateMessage(Condition condition, int db, CommandFlags flags, RedisCommand command, in RedisKey key, RedisValue value = default(RedisValue)) -#pragma warning restore RCS1231 // Make parameter ref read-only. - { - return new ConditionMessage(condition, db, flags, command, key, value); - } + public static Message CreateMessage(Condition condition, int db, CommandFlags flags, RedisCommand command, in RedisKey key, RedisValue value = default) => + new ConditionMessage(condition, db, flags, command, key, value); - public static Message CreateMessage(Condition condition, int db, CommandFlags flags, RedisCommand command, in RedisKey key, in RedisValue value, in RedisValue value1) - { - return new ConditionMessage(condition, db, flags, command, key, value, value1); - } + public static Message CreateMessage(Condition condition, int db, CommandFlags flags, RedisCommand command, in RedisKey key, in RedisValue value, in RedisValue value1) => + new ConditionMessage(condition, db, flags, command, key, value, value1); + + public static Message CreateMessage(Condition condition, int db, CommandFlags flags, RedisCommand command, in RedisKey key, in RedisValue value, in RedisValue value1, in RedisValue value2, in RedisValue value3, in RedisValue value4) => + new ConditionMessage(condition, db, flags, command, key, value, value1, value2, value3, value4); + [System.Diagnostics.CodeAnalysis.SuppressMessage("Style", "IDE0071:Simplify interpolation", Justification = "Allocations (string.Concat vs. string.Format)")] protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { connection?.BridgeCouldBeNull?.Multiplexer?.OnTransactionLog($"condition '{message.CommandAndKey}' got '{result.ToString()}'"); @@ -370,11 +401,14 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes return false; } - private class ConditionMessage : Message.CommandKeyBase + private sealed class ConditionMessage : Message.CommandKeyBase { public readonly Condition Condition; private readonly RedisValue value; private readonly RedisValue value1; + private readonly RedisValue value2; + private readonly RedisValue value3; + private readonly RedisValue value4; public ConditionMessage(Condition condition, int db, CommandFlags flags, RedisCommand command, in RedisKey key, in RedisValue value) : base(db, flags, command, key) @@ -389,6 +423,15 @@ public ConditionMessage(Condition condition, int db, CommandFlags flags, RedisCo this.value1 = value1; // note no assert here } + // Message with 3 or 4 values not used, therefore not implemented + public ConditionMessage(Condition condition, int db, CommandFlags flags, RedisCommand command, in RedisKey key, in RedisValue value, in RedisValue value1, in RedisValue value2, in RedisValue value3, in RedisValue value4) + : this(condition, db, flags, command, key, value, value1) + { + this.value2 = value2; // note no assert here + this.value3 = value3; // note no assert here + this.value4 = value4; // note no assert here + } + protected override void WriteImpl(PhysicalConnection physical) { if (value.IsNull) @@ -398,20 +441,24 @@ protected override void WriteImpl(PhysicalConnection physical) } else { - physical.WriteHeader(command, value1.IsNull ? 2 : 3); + physical.WriteHeader(command, value1.IsNull ? 2 : value2.IsNull ? 3 : value3.IsNull ? 4 : value4.IsNull ? 5 : 6); physical.Write(Key); physical.WriteBulkString(value); if (!value1.IsNull) - { physical.WriteBulkString(value1); - } + if (!value2.IsNull) + physical.WriteBulkString(value2); + if (!value3.IsNull) + physical.WriteBulkString(value3); + if (!value4.IsNull) + physical.WriteBulkString(value4); } } - public override int ArgCount => value.IsNull ? 1 : value1.IsNull ? 2 : 3; + public override int ArgCount => value.IsNull ? 1 : value1.IsNull ? 2 : value2.IsNull ? 3 : value3.IsNull ? 4 : value4.IsNull ? 5 : 6; } } - internal class ExistsCondition : Condition + internal sealed class ExistsCondition : Condition { private readonly bool expectedResult; private readonly RedisValue expectedValue; @@ -419,14 +466,12 @@ internal class ExistsCondition : Condition private readonly RedisType type; private readonly RedisCommand cmd; - internal override Condition MapKeys(Func map) - { - return new ExistsCondition(map(key), type, expectedValue, expectedResult); - } + internal override Condition MapKeys(Func map) => + new ExistsCondition(map(key), type, expectedValue, expectedResult); public ExistsCondition(in RedisKey key, RedisType type, in RedisValue expectedValue, bool expectedResult) { - if (key.IsNull) throw new ArgumentException("key"); + if (key.IsNull) throw new ArgumentNullException(nameof(key)); this.key = key; this.type = type; this.expectedValue = expectedValue; @@ -438,35 +483,23 @@ public ExistsCondition(in RedisKey key, RedisType type, in RedisValue expectedVa } else { - switch (type) + cmd = type switch { - case RedisType.Hash: - cmd = RedisCommand.HEXISTS; - break; - - case RedisType.Set: - cmd = RedisCommand.SISMEMBER; - break; - - case RedisType.SortedSet: - cmd = RedisCommand.ZSCORE; - break; - - default: - throw new ArgumentException(nameof(type)); - } + RedisType.Hash => RedisCommand.HEXISTS, + RedisType.Set => RedisCommand.SISMEMBER, + RedisType.SortedSet => RedisCommand.ZSCORE, + _ => throw new ArgumentException($"Type {type} is not recognized", nameof(type)), + }; } } - public override string ToString() - { - return (expectedValue.IsNull ? key.ToString() : ((string)key) + " " + type + " > " + expectedValue) + public override string ToString() => + (expectedValue.IsNull ? key.ToString() : ((string?)key) + " " + type + " > " + expectedValue) + (expectedResult ? " exists" : " does not exists"); - } internal override void CheckCommands(CommandMap commandMap) => commandMap.AssertAvailable(cmd); - internal override IEnumerable CreateMessages(int db, IResultBox resultBox) + internal override IEnumerable CreateMessages(int db, IResultBox? resultBox) { yield return Message.Create(db, CommandFlags.None, RedisCommand.WATCH, key); @@ -483,7 +516,7 @@ internal override bool TryValidate(in RawResult result, out bool value) { case RedisType.SortedSet: var parsedValue = result.AsRedisValue(); - value = (parsedValue.IsNull != expectedResult); + value = parsedValue.IsNull != expectedResult; ConnectionMultiplexer.TraceWithoutContext("exists: " + parsedValue + "; expected: " + expectedResult + "; voting: " + value); return true; @@ -501,12 +534,71 @@ internal override bool TryValidate(in RawResult result, out bool value) } } - internal class EqualsCondition : Condition + internal sealed class StartsWithCondition : Condition { - internal override Condition MapKeys(Func map) + /* only usable for RedisType.SortedSet, members of SortedSets are always byte-arrays, expectedStartValue therefore is a byte-array + any Encoding and Conversion for the search-sequence has to be executed in calling application + working with byte arrays should prevent any encoding within this class, that could distort the comparison */ + + private readonly bool expectedResult; + private readonly RedisValue prefix; + private readonly RedisKey key; + + internal override Condition MapKeys(Func map) => + new StartsWithCondition(map(key), prefix, expectedResult); + + public StartsWithCondition(in RedisKey key, in RedisValue prefix, bool expectedResult) + { + if (key.IsNull) throw new ArgumentNullException(nameof(key)); + if (prefix.IsNull) throw new ArgumentNullException(nameof(prefix)); + this.key = key; + this.prefix = prefix; + this.expectedResult = expectedResult; + } + + public override string ToString() => + $"{key} {nameof(RedisType.SortedSet)} > {(expectedResult ? " member starting " : " no member starting ")} {prefix} + prefix"; + + internal override void CheckCommands(CommandMap commandMap) => commandMap.AssertAvailable(RedisCommand.ZRANGEBYLEX); + + internal override IEnumerable CreateMessages(int db, IResultBox? resultBox) + { + yield return Message.Create(db, CommandFlags.None, RedisCommand.WATCH, key); + + // prepend '[' to prefix for inclusive search + var startValueWithToken = RedisDatabase.GetLexRange(prefix, Exclude.None, isStart: true, Order.Ascending); + + var message = ConditionProcessor.CreateMessage( + this, + db, + CommandFlags.None, + RedisCommand.ZRANGEBYLEX, + key, + startValueWithToken, + RedisLiterals.PlusSymbol, + RedisLiterals.LIMIT, + 0, + 1); + + message.SetSource(ConditionProcessor.Default, resultBox); + yield return message; + } + + internal override int GetHashSlot(ServerSelectionStrategy serverSelectionStrategy) => serverSelectionStrategy.HashSlot(key); + + internal override bool TryValidate(in RawResult result, out bool value) { - return new EqualsCondition(map(key), type, memberName, expectedEqual, expectedValue); + value = result.ItemsCount == 1 && result[0].AsRedisValue().StartsWith(prefix); + + if (!expectedResult) value = !value; + return true; } + } + + internal sealed class EqualsCondition : Condition + { + internal override Condition MapKeys(Func map) => + new EqualsCondition(map(key), type, memberName, expectedEqual, expectedValue); private readonly bool expectedEqual; private readonly RedisValue memberName, expectedValue; @@ -516,37 +608,28 @@ internal override Condition MapKeys(Func map) public EqualsCondition(in RedisKey key, RedisType type, in RedisValue memberName, bool expectedEqual, in RedisValue expectedValue) { - if (key.IsNull) throw new ArgumentException("key"); + if (key.IsNull) throw new ArgumentNullException(nameof(key)); this.key = key; this.memberName = memberName; this.expectedEqual = expectedEqual; this.expectedValue = expectedValue; this.type = type; - switch (type) + cmd = type switch { - case RedisType.Hash: - cmd = memberName.IsNull ? RedisCommand.GET : RedisCommand.HGET; - break; - - case RedisType.SortedSet: - cmd = RedisCommand.ZSCORE; - break; - - default: - throw new ArgumentException(nameof(type)); - } + RedisType.Hash => memberName.IsNull ? RedisCommand.GET : RedisCommand.HGET, + RedisType.SortedSet => RedisCommand.ZSCORE, + _ => throw new ArgumentException($"Unknown type: {type}", nameof(type)), + }; } - public override string ToString() - { - return (memberName.IsNull ? key.ToString() : ((string)key) + " " + type + " > " + memberName) + public override string ToString() => + (memberName.IsNull ? key.ToString() : ((string?)key) + " " + type + " > " + memberName) + (expectedEqual ? " == " : " != ") + expectedValue; - } internal override void CheckCommands(CommandMap commandMap) => commandMap.AssertAvailable(cmd); - internal sealed override IEnumerable CreateMessages(int db, IResultBox resultBox) + internal override IEnumerable CreateMessages(int db, IResultBox? resultBox) { yield return Message.Create(db, CommandFlags.None, RedisCommand.WATCH, key); @@ -555,10 +638,7 @@ internal sealed override IEnumerable CreateMessages(int db, IResultBox yield return message; } - internal override int GetHashSlot(ServerSelectionStrategy serverSelectionStrategy) - { - return serverSelectionStrategy.HashSlot(key); - } + internal override int GetHashSlot(ServerSelectionStrategy serverSelectionStrategy) => serverSelectionStrategy.HashSlot(key); internal override bool TryValidate(in RawResult result, out bool value) { @@ -566,28 +646,25 @@ internal override bool TryValidate(in RawResult result, out bool value) { case RedisType.SortedSet: var parsedValue = RedisValue.Null; - if (!result.IsNull) + if (!result.IsNull && result.TryGetDouble(out var val)) { - if (result.TryGetDouble(out var val)) - { - parsedValue = val; - } + parsedValue = val; } value = (parsedValue == expectedValue) == expectedEqual; - ConnectionMultiplexer.TraceWithoutContext("actual: " + (string)parsedValue + "; expected: " + (string)expectedValue + + ConnectionMultiplexer.TraceWithoutContext("actual: " + (string?)parsedValue + "; expected: " + (string?)expectedValue + "; wanted: " + (expectedEqual ? "==" : "!=") + "; voting: " + value); return true; default: - switch (result.Type) + switch (result.Resp2TypeBulkString) { case ResultType.BulkString: case ResultType.SimpleString: case ResultType.Integer: var parsed = result.AsRedisValue(); value = (parsed == expectedValue) == expectedEqual; - ConnectionMultiplexer.TraceWithoutContext("actual: " + (string)parsed + "; expected: " + (string)expectedValue + + ConnectionMultiplexer.TraceWithoutContext("actual: " + (string?)parsed + "; expected: " + (string?)expectedValue + "; wanted: " + (expectedEqual ? "==" : "!=") + "; voting: " + value); return true; } @@ -597,38 +674,33 @@ internal override bool TryValidate(in RawResult result, out bool value) } } - internal class ListCondition : Condition + internal sealed class ListCondition : Condition { - internal override Condition MapKeys(Func map) - { - return new ListCondition(map(key), index, expectedResult, expectedValue); - } + internal override Condition MapKeys(Func map) => + new ListCondition(map(key), index, expectedResult, expectedValue); private readonly bool expectedResult; private readonly long index; private readonly RedisValue? expectedValue; private readonly RedisKey key; + + [System.Diagnostics.CodeAnalysis.SuppressMessage("Roslynator", "RCS1242:Do not pass non-read-only struct by read-only reference.", Justification = "Attribute")] public ListCondition(in RedisKey key, long index, bool expectedResult, in RedisValue? expectedValue) { - if (key.IsNull) throw new ArgumentException(nameof(key)); + if (key.IsNull) throw new ArgumentNullException(nameof(key)); this.key = key; this.index = index; this.expectedResult = expectedResult; this.expectedValue = expectedValue; } - public override string ToString() - { - return ((string)key) + "[" + index.ToString() + "]" + public override string ToString() => + ((string?)key) + "[" + index.ToString() + "]" + (expectedValue.HasValue ? (expectedResult ? " == " : " != ") + expectedValue.Value : (expectedResult ? " exists" : " does not exist")); - } - internal override void CheckCommands(CommandMap commandMap) - { - commandMap.AssertAvailable(RedisCommand.LINDEX); - } + internal override void CheckCommands(CommandMap commandMap) => commandMap.AssertAvailable(RedisCommand.LINDEX); - internal sealed override IEnumerable CreateMessages(int db, IResultBox resultBox) + internal override IEnumerable CreateMessages(int db, IResultBox? resultBox) { yield return Message.Create(db, CommandFlags.None, RedisCommand.WATCH, key); @@ -641,7 +713,7 @@ internal sealed override IEnumerable CreateMessages(int db, IResultBox internal override bool TryValidate(in RawResult result, out bool value) { - switch (result.Type) + switch (result.Resp2TypeBulkString) { case ResultType.BulkString: case ResultType.SimpleString: @@ -650,12 +722,12 @@ internal override bool TryValidate(in RawResult result, out bool value) if (expectedValue.HasValue) { value = (parsed == expectedValue.Value) == expectedResult; - ConnectionMultiplexer.TraceWithoutContext("actual: " + (string)parsed + "; expected: " + (string)expectedValue.Value + + ConnectionMultiplexer.TraceWithoutContext("actual: " + (string?)parsed + "; expected: " + (string?)expectedValue.Value + "; wanted: " + (expectedResult ? "==" : "!=") + "; voting: " + value); } else { - value = (parsed.IsNull != expectedResult); + value = parsed.IsNull != expectedResult; ConnectionMultiplexer.TraceWithoutContext("exists: " + parsed + "; expected: " + expectedResult + "; voting: " + value); } return true; @@ -665,12 +737,10 @@ internal override bool TryValidate(in RawResult result, out bool value) } } - internal class LengthCondition : Condition + internal sealed class LengthCondition : Condition { - internal override Condition MapKeys(Func map) - { - return new LengthCondition(map(key), type, compareToResult, expectedLength); - } + internal override Condition MapKeys(Func map) => + new LengthCondition(map(key), type, compareToResult, expectedLength); private readonly int compareToResult; private readonly long expectedLength; @@ -680,54 +750,30 @@ internal override Condition MapKeys(Func map) public LengthCondition(in RedisKey key, RedisType type, int compareToResult, long expectedLength) { - if (key.IsNull) throw new ArgumentException(nameof(key)); + if (key.IsNull) throw new ArgumentNullException(nameof(key)); this.key = key; this.compareToResult = compareToResult; this.expectedLength = expectedLength; this.type = type; - switch (type) + cmd = type switch { - case RedisType.Hash: - cmd = RedisCommand.HLEN; - break; - - case RedisType.Set: - cmd = RedisCommand.SCARD; - break; - - case RedisType.List: - cmd = RedisCommand.LLEN; - break; - - case RedisType.SortedSet: - cmd = RedisCommand.ZCARD; - break; - - case RedisType.String: - cmd = RedisCommand.STRLEN; - break; - - default: - throw new ArgumentException(nameof(type)); - } + RedisType.Hash => RedisCommand.HLEN, + RedisType.Set => RedisCommand.SCARD, + RedisType.List => RedisCommand.LLEN, + RedisType.SortedSet => RedisCommand.ZCARD, + RedisType.Stream => RedisCommand.XLEN, + RedisType.String => RedisCommand.STRLEN, + _ => throw new ArgumentException($"Type {type} isn't recognized", nameof(type)), + }; } - public override string ToString() - { - return ((string)key) + " " + type + " length" + GetComparisonString() + expectedLength; - } + public override string ToString() => ((string?)key) + " " + type + " length" + GetComparisonString() + expectedLength; - private string GetComparisonString() - { - return compareToResult == 0 ? " == " : (compareToResult < 0 ? " > " : " < "); - } + private string GetComparisonString() => compareToResult == 0 ? " == " : (compareToResult < 0 ? " > " : " < "); - internal override void CheckCommands(CommandMap commandMap) - { - commandMap.AssertAvailable(cmd); - } + internal override void CheckCommands(CommandMap commandMap) => commandMap.AssertAvailable(cmd); - internal sealed override IEnumerable CreateMessages(int db, IResultBox resultBox) + internal override IEnumerable CreateMessages(int db, IResultBox? resultBox) { yield return Message.Create(db, CommandFlags.None, RedisCommand.WATCH, key); @@ -736,21 +782,18 @@ internal sealed override IEnumerable CreateMessages(int db, IResultBox yield return message; } - internal override int GetHashSlot(ServerSelectionStrategy serverSelectionStrategy) - { - return serverSelectionStrategy.HashSlot(key); - } + internal override int GetHashSlot(ServerSelectionStrategy serverSelectionStrategy) => serverSelectionStrategy.HashSlot(key); internal override bool TryValidate(in RawResult result, out bool value) { - switch (result.Type) + switch (result.Resp2TypeBulkString) { case ResultType.BulkString: case ResultType.SimpleString: case ResultType.Integer: var parsed = result.AsRedisValue(); value = parsed.IsInteger && (expectedLength.CompareTo((long)parsed) == compareToResult); - ConnectionMultiplexer.TraceWithoutContext("actual: " + (string)parsed + "; expected: " + expectedLength + + ConnectionMultiplexer.TraceWithoutContext("actual: " + (string?)parsed + "; expected: " + expectedLength + "; wanted: " + GetComparisonString() + "; voting: " + value); return true; } @@ -759,12 +802,10 @@ internal override bool TryValidate(in RawResult result, out bool value) } } - internal class SortedSetRangeLengthCondition : Condition + internal sealed class SortedSetRangeLengthCondition : Condition { - internal override Condition MapKeys(Func map) - { - return new SortedSetRangeLengthCondition(map(key), min, max, compareToResult, expectedLength); - } + internal override Condition MapKeys(Func map) => + new SortedSetRangeLengthCondition(map(key), min, max, compareToResult, expectedLength); private readonly RedisValue min; private readonly RedisValue max; @@ -774,7 +815,7 @@ internal override Condition MapKeys(Func map) public SortedSetRangeLengthCondition(in RedisKey key, RedisValue min, RedisValue max, int compareToResult, long expectedLength) { - if (key.IsNull) throw new ArgumentException(nameof(key)); + if (key.IsNull) throw new ArgumentNullException(nameof(key)); this.key = key; this.min = min; this.max = max; @@ -782,22 +823,14 @@ public SortedSetRangeLengthCondition(in RedisKey key, RedisValue min, RedisValue this.expectedLength = expectedLength; } - public override string ToString() - { - return ((string)key) + " " + RedisType.SortedSet + " range[" + min + ", " + max + "] length" + GetComparisonString() + expectedLength; - } + public override string ToString() => + ((string?)key) + " " + RedisType.SortedSet + " range[" + min + ", " + max + "] length" + GetComparisonString() + expectedLength; - private string GetComparisonString() - { - return compareToResult == 0 ? " == " : (compareToResult < 0 ? " > " : " < "); - } + private string GetComparisonString() => compareToResult == 0 ? " == " : (compareToResult < 0 ? " > " : " < "); - internal override void CheckCommands(CommandMap commandMap) - { - commandMap.AssertAvailable(RedisCommand.ZCOUNT); - } + internal override void CheckCommands(CommandMap commandMap) => commandMap.AssertAvailable(RedisCommand.ZCOUNT); - internal sealed override IEnumerable CreateMessages(int db, IResultBox resultBox) + internal override IEnumerable CreateMessages(int db, IResultBox? resultBox) { yield return Message.Create(db, CommandFlags.None, RedisCommand.WATCH, key); @@ -806,21 +839,18 @@ internal sealed override IEnumerable CreateMessages(int db, IResultBox yield return message; } - internal override int GetHashSlot(ServerSelectionStrategy serverSelectionStrategy) - { - return serverSelectionStrategy.HashSlot(key); - } + internal override int GetHashSlot(ServerSelectionStrategy serverSelectionStrategy) => serverSelectionStrategy.HashSlot(key); internal override bool TryValidate(in RawResult result, out bool value) { - switch (result.Type) + switch (result.Resp2TypeBulkString) { case ResultType.BulkString: case ResultType.SimpleString: case ResultType.Integer: var parsed = result.AsRedisValue(); value = parsed.IsInteger && (expectedLength.CompareTo((long)parsed) == compareToResult); - ConnectionMultiplexer.TraceWithoutContext("actual: " + (string)parsed + "; expected: " + expectedLength + + ConnectionMultiplexer.TraceWithoutContext("actual: " + (string?)parsed + "; expected: " + expectedLength + "; wanted: " + GetComparisonString() + "; voting: " + value); return true; } @@ -829,12 +859,10 @@ internal override bool TryValidate(in RawResult result, out bool value) } } - internal class SortedSetScoreCondition : Condition + internal sealed class SortedSetScoreCondition : Condition { - internal override Condition MapKeys(Func map) - { - return new SortedSetScoreCondition(map(key), sortedSetScore, expectedEqual, expectedValue); - } + internal override Condition MapKeys(Func map) => + new SortedSetScoreCondition(map(key), sortedSetScore, expectedEqual, expectedValue); private readonly bool expectedEqual; private readonly RedisValue sortedSetScore, expectedValue; @@ -844,7 +872,7 @@ public SortedSetScoreCondition(in RedisKey key, in RedisValue sortedSetScore, bo { if (key.IsNull) { - throw new ArgumentException("key"); + throw new ArgumentNullException(nameof(key)); } this.key = key; @@ -853,14 +881,12 @@ public SortedSetScoreCondition(in RedisKey key, in RedisValue sortedSetScore, bo this.expectedValue = expectedValue; } - public override string ToString() - { - return key.ToString() + (expectedEqual ? " contains " : " not contains ") + expectedValue + " members with score: " + sortedSetScore; - } + public override string ToString() => + key.ToString() + (expectedEqual ? " contains " : " not contains ") + expectedValue + " members with score: " + sortedSetScore; internal override void CheckCommands(CommandMap commandMap) => commandMap.AssertAvailable(RedisCommand.ZCOUNT); - internal sealed override IEnumerable CreateMessages(int db, IResultBox resultBox) + internal override IEnumerable CreateMessages(int db, IResultBox? resultBox) { yield return Message.Create(db, CommandFlags.None, RedisCommand.WATCH, key); @@ -874,12 +900,12 @@ internal sealed override IEnumerable CreateMessages(int db, IResultBox internal override bool TryValidate(in RawResult result, out bool value) { - switch (result.Type) + switch (result.Resp2TypeBulkString) { case ResultType.Integer: var parsedValue = result.AsRedisValue(); value = (parsedValue == expectedValue) == expectedEqual; - ConnectionMultiplexer.TraceWithoutContext("actual: " + (string)parsedValue + "; expected: " + (string)expectedValue + "; wanted: " + (expectedEqual ? "==" : "!=") + "; voting: " + value); + ConnectionMultiplexer.TraceWithoutContext("actual: " + (string?)parsedValue + "; expected: " + (string?)expectedValue + "; wanted: " + (expectedEqual ? "==" : "!=") + "; voting: " + value); return true; } @@ -890,13 +916,13 @@ internal override bool TryValidate(in RawResult result, out bool value) } /// - /// Indicates the status of a condition as part of a transaction + /// Indicates the status of a condition as part of a transaction. /// public sealed class ConditionResult { internal readonly Condition Condition; - private IResultBox resultBox; + private IResultBox? resultBox; private volatile bool wasSatisfied; @@ -907,13 +933,13 @@ internal ConditionResult(Condition condition) } /// - /// Indicates whether the condition was satisfied + /// Indicates whether the condition was satisfied. /// public bool WasSatisfied => wasSatisfied; internal IEnumerable CreateMessages(int db) => Condition.CreateMessages(db, resultBox); - internal IResultBox GetBox() { return resultBox; } + internal IResultBox? GetBox() => resultBox; internal bool UnwrapBox() { if (resultBox != null) diff --git a/src/StackExchange.Redis/Configuration/AzureManagedRedisOptionsProvider.cs b/src/StackExchange.Redis/Configuration/AzureManagedRedisOptionsProvider.cs new file mode 100644 index 000000000..06656b608 --- /dev/null +++ b/src/StackExchange.Redis/Configuration/AzureManagedRedisOptionsProvider.cs @@ -0,0 +1,62 @@ +using System; +using System.Net; +using System.Threading.Tasks; +using StackExchange.Redis.Maintenance; + +namespace StackExchange.Redis.Configuration +{ + /// + /// Options provider for Azure Managed Redis environments. + /// + public class AzureManagedRedisOptionsProvider : DefaultOptionsProvider + { + /// + /// Allow connecting after startup, in the cases where remote cache isn't ready or is overloaded. + /// + public override bool AbortOnConnectFail => false; + + /// + /// The minimum version of Redis in Azure Managed Redis is 7.4, so use the widest set of available commands when connecting. + /// + public override Version DefaultVersion => RedisFeatures.v7_4_0; + + private static readonly string[] azureManagedRedisDomains = + [ + ".redis.azure.net", + ".redis.chinacloudapi.cn", + ".redis.usgovcloudapi.net", + ".redisenterprise.cache.azure.net", + ]; + + /// + public override bool IsMatch(EndPoint endpoint) + { + if (endpoint is DnsEndPoint dnsEp && IsHostInDomains(dnsEp.Host, azureManagedRedisDomains)) + { + return true; + } + + return false; + } + + private bool IsHostInDomains(string hostName, string[] domains) + { + foreach (var domain in domains) + { + if (hostName.EndsWith(domain, StringComparison.InvariantCultureIgnoreCase)) + { + return true; + } + } + + return false; + } + + /// + public override Task AfterConnectAsync(ConnectionMultiplexer muxer, Action log) + => AzureMaintenanceEvent.AddListenerAsync(muxer, log); + + /// + public override bool GetDefaultSsl(EndPointCollection endPoints) => true; + } +} diff --git a/src/StackExchange.Redis/Configuration/AzureOptionsProvider.cs b/src/StackExchange.Redis/Configuration/AzureOptionsProvider.cs new file mode 100644 index 000000000..c02f8f760 --- /dev/null +++ b/src/StackExchange.Redis/Configuration/AzureOptionsProvider.cs @@ -0,0 +1,88 @@ +using System; +using System.Net; +using System.Threading.Tasks; +using StackExchange.Redis.Maintenance; + +namespace StackExchange.Redis.Configuration +{ + /// + /// Options provider for Azure environments. + /// + public class AzureOptionsProvider : DefaultOptionsProvider + { + /// + /// Allow connecting after startup, in the cases where remote cache isn't ready or is overloaded. + /// + public override bool AbortOnConnectFail => false; + + /// + /// The minimum version of Redis in Azure is 6, so use the widest set of available commands when connecting. + /// + public override Version DefaultVersion => RedisFeatures.v6_0_0; + + /// + /// Lists of domains known to be Azure Redis, so we can light up some helpful functionality + /// for minimizing downtime during maintenance events and such. + /// + private static readonly string[] azureRedisDomains = new[] + { + ".redis.cache.windows.net", + ".redis.cache.chinacloudapi.cn", + ".redis.cache.usgovcloudapi.net", + ".redis.cache.sovcloud-api.de", + ".redis.cache.sovcloud-api.fr", + }; + + /// + public override bool IsMatch(EndPoint endpoint) + { + if (endpoint is DnsEndPoint dnsEp && IsHostInDomains(dnsEp.Host, azureRedisDomains)) + { + return true; + } + + return false; + } + + private bool IsHostInDomains(string hostName, string[] domains) + { + foreach (var domain in domains) + { + if (hostName.EndsWith(domain, StringComparison.InvariantCultureIgnoreCase)) + { + return true; + } + } + + return false; + } + + /// + public override Task AfterConnectAsync(ConnectionMultiplexer muxer, Action log) + => AzureMaintenanceEvent.AddListenerAsync(muxer, log); + + /// + public override bool GetDefaultSsl(EndPointCollection endPoints) + { + foreach (var ep in endPoints) + { + switch (ep) + { + case DnsEndPoint dns: + if (dns.Port == 6380) + { + return true; + } + break; + case IPEndPoint ip: + if (ip.Port == 6380) + { + return true; + } + break; + } + } + return false; + } + } +} diff --git a/src/StackExchange.Redis/Configuration/DefaultOptionsProvider.cs b/src/StackExchange.Redis/Configuration/DefaultOptionsProvider.cs new file mode 100644 index 000000000..e4fa25891 --- /dev/null +++ b/src/StackExchange.Redis/Configuration/DefaultOptionsProvider.cs @@ -0,0 +1,342 @@ +using System; +using System.Collections.Generic; +using System.Net; +using System.Reflection; +using System.Threading.Tasks; +using Microsoft.Extensions.Logging; + +namespace StackExchange.Redis.Configuration +{ + /// + /// A defaults providers for . + /// This providers defaults not explicitly specified and is present to be inherited by environments that want to provide + /// better defaults for their use case, e.g. in a single wrapper library used many places. + /// + /// + /// Why not just have a default instance? Good question! + /// Since we null coalesce down to the defaults, there's an inherent pit-of-failure with that approach of . + /// If you forget anything or if someone creates a provider nulling these out...kaboom. + /// + public class DefaultOptionsProvider + { + /// + /// The known providers to match against (built into the library) - the default set. + /// If none of these match, is used. + /// + private static readonly List BuiltInProviders = new() + { + new AzureOptionsProvider(), + new AzureManagedRedisOptionsProvider(), + }; + + /// + /// The current list of providers to match (potentially modified from defaults via . + /// + private static LinkedList KnownProviders { get; set; } = new(BuiltInProviders); + + /// + /// Adds a provider to match endpoints against. The last provider added has the highest priority. + /// If you want your provider to match everything, implement as return true;. + /// + /// The provider to add. + public static void AddProvider(DefaultOptionsProvider provider) + { + var newList = new LinkedList(KnownProviders); + newList.AddFirst(provider); + KnownProviders = newList; + } + + /// + /// Whether this options provider matches a given endpoint, for automatically selecting a provider based on what's being connected to. + /// + public virtual bool IsMatch(EndPoint endpoint) => false; + + /// + /// Gets a provider for the given endpoints, falling back to if nothing more specific is found. + /// + public static DefaultOptionsProvider GetProvider(EndPointCollection endpoints) + { + foreach (var provider in KnownProviders) + { + foreach (var endpoint in endpoints) + { + if (provider.IsMatch(endpoint)) + { + return provider; + } + } + } + + return new DefaultOptionsProvider(); // no memoize; allow mutability concerns (also impacts subclasses, but: pragmatism) + } + + /// + /// Gets a provider for a given endpoints, falling back to if nothing more specific is found. + /// + public static DefaultOptionsProvider GetProvider(EndPoint endpoint) + { + foreach (var provider in KnownProviders) + { + if (provider.IsMatch(endpoint)) + { + return provider; + } + } + return new DefaultOptionsProvider(); // no memoize; allow mutability concerns (also impacts subclasses, but: pragmatism) + } + + /// + /// Gets or sets whether connect/configuration timeouts should be explicitly notified via a TimeoutException. + /// + public virtual bool AbortOnConnectFail => true; + + /// + /// Indicates whether admin operations should be allowed. + /// + public virtual bool AllowAdmin => false; + + /// + /// The backlog policy to be used for commands when a connection is unhealthy. + /// + public virtual BacklogPolicy BacklogPolicy => BacklogPolicy.Default; + + /// + /// A Boolean value that specifies whether the certificate revocation list is checked during authentication. + /// + public virtual bool CheckCertificateRevocation => true; + + /// + /// A Boolean value that specifies whether to use per-command validation of strict protocol validity. + /// This sends an additional command after EVERY command which incurs measurable overhead. + /// + /// + /// The regular RESP protocol does not include correlation identifiers between requests and responses; in exceptional + /// scenarios, protocol desynchronization can occur, which may not be noticed immediately; this option adds additional data + /// to ensure that this cannot occur, at the cost of some (small) additional bandwidth usage. + /// + public virtual bool HighIntegrity => false; + + /// + /// The number of times to repeat the initial connect cycle if no servers respond promptly. + /// + public virtual int ConnectRetry => 3; + + /// + /// Specifies the time that should be allowed for connection. + /// Falls back to Max(5000, SyncTimeout) if null. + /// + public virtual TimeSpan? ConnectTimeout => null; + + /// + /// The command-map associated with this configuration. + /// + public virtual CommandMap? CommandMap => null; + + /// + /// Channel to use for broadcasting and listening for configuration change notification. + /// + public virtual string ConfigurationChannel => "__Booksleeve_MasterChanged"; + + /// + /// The server version to assume. + /// + public virtual Version DefaultVersion => RedisFeatures.v3_0_0; + + /// + /// Controls how often the connection heartbeats. A heartbeat includes: + /// - Evaluating if any messages have timed out. + /// - Evaluating connection status (checking for failures). + /// - Sending a server message to keep the connection alive if needed. + /// + /// Be aware setting this very low incurs additional overhead of evaluating the above more often. + public virtual TimeSpan HeartbeatInterval => TimeSpan.FromSeconds(1); + + /// + /// Whether to enable ECHO checks on every heartbeat to ensure network stream consistency. + /// This is a rare measure to react to any potential network traffic drops ASAP, terminating the connection. + /// + public virtual bool HeartbeatConsistencyChecks => false; + + /// + /// Whether exceptions include identifiable details (key names, additional .Data annotations). + /// + public virtual bool IncludeDetailInExceptions => true; + + /// + /// Whether exceptions include performance counter details. + /// + /// + /// CPU usage, etc - note that this can be problematic on some platforms. + /// + public virtual bool IncludePerformanceCountersInExceptions => false; + + /// + /// Specifies the time interval at which connections should be pinged to ensure validity. + /// + public virtual TimeSpan KeepAliveInterval => TimeSpan.FromSeconds(60); + + /// + /// The to get loggers for connection events. + /// Note: changes here only affect s created after. + /// + public virtual ILoggerFactory? LoggerFactory => null; + + /// + /// Type of proxy to use (if any); for example . + /// + public virtual Proxy Proxy => Proxy.None; + + /// + /// The retry policy to be used for connection reconnects. + /// + public virtual IReconnectRetryPolicy? ReconnectRetryPolicy => null; + + /// + /// Indicates whether endpoints should be resolved via DNS before connecting. + /// If enabled the ConnectionMultiplexer will not re-resolve DNS when attempting to re-connect after a connection failure. + /// + public virtual bool ResolveDns => false; + + /// + /// Specifies the time that the system should allow for synchronous operations. + /// + public virtual TimeSpan SyncTimeout => TimeSpan.FromSeconds(5); + + /// + /// Tie-breaker used to choose between primaries (must match the endpoint exactly). + /// + public virtual string TieBreaker => "__Booksleeve_TieBreak"; + + /// + /// Check configuration every n interval. + /// + public virtual TimeSpan ConfigCheckInterval => TimeSpan.FromMinutes(1); + + /// + /// The username to use to authenticate with the server. + /// + public virtual string? User => null; + + /// + /// The password to use to authenticate with the server. + /// + public virtual string? Password => null; + + // We memoize this to reduce cost on re-access + private string? defaultClientName; + + /// + /// The default client name for a connection, with the library version appended. + /// + public string ClientName => defaultClientName ??= GetDefaultClientName(); + + /// + /// Gets the default client name for a connection. + /// + protected virtual string GetDefaultClientName() => + (TryGetAzureRoleInstanceIdNoThrow() + ?? ComputerName + ?? "StackExchange.Redis") + "(SE.Redis-v" + LibraryVersion + ")"; + + /// + /// Gets the library name to use for CLIENT SETINFO lib-name calls to Redis during handshake. + /// Defaults to "SE.Redis". + /// + public virtual string LibraryName => "SE.Redis"; + + /// + /// String version of the StackExchange.Redis library, for use in any options. + /// + protected static string LibraryVersion => Utils.GetLibVersion(); + + /// + /// Name of the machine we're running on, for use in any options. + /// + protected static string ComputerName => Environment.MachineName ?? Environment.GetEnvironmentVariable("ComputerName") ?? "Unknown"; + + /// + /// Whether to identify the client by library name/version when possible. + /// + public virtual bool SetClientLibrary => true; + + /// + /// Tries to get the RoleInstance Id if Microsoft.WindowsAzure.ServiceRuntime is loaded. + /// In case of any failure, swallows the exception and returns null. + /// + /// + /// Azure, in the default provider? Yes, to maintain existing compatibility/convenience. + /// Source != destination here. + /// + internal static string? TryGetAzureRoleInstanceIdNoThrow() + { + string? roleInstanceId; + try + { + var roleEnvironmentType = Type.GetType("Microsoft.WindowsAzure.ServiceRuntime.RoleEnvironment, Microsoft.WindowsAzure.ServiceRuntime", throwOnError: false); + + // https://msdn.microsoft.com/en-us/library/microsoft.windowsazure.serviceruntime.roleenvironment.isavailable.aspx + if (roleEnvironmentType?.GetProperty("IsAvailable") is not PropertyInfo isAvailableProp + || isAvailableProp.GetValue(null, null) is not bool isAvailableVal + || !isAvailableVal) + { + return null; + } + + var currentRoleInstanceProp = roleEnvironmentType.GetProperty("CurrentRoleInstance"); + var currentRoleInstanceId = currentRoleInstanceProp?.GetValue(null, null); + + var roleInstanceType = Type.GetType("Microsoft.WindowsAzure.ServiceRuntime.RoleInstance, Microsoft.WindowsAzure.ServiceRuntime", throwOnError: false); + roleInstanceId = roleInstanceType?.GetProperty("Id")?.GetValue(currentRoleInstanceId, null)?.ToString(); + + if (roleInstanceId.IsNullOrEmpty()) + { + roleInstanceId = null; + } + } + catch (Exception) + { + // Silently ignores the exception + roleInstanceId = null; + } + return roleInstanceId; + } + + /// + /// The action to perform, if any, immediately after an initial connection completes. + /// + /// The multiplexer that just connected. + /// The logger for the connection, to emit to the connection output log. + public virtual Task AfterConnectAsync(ConnectionMultiplexer multiplexer, Action log) => Task.CompletedTask; + + /// + /// Gets the default SSL "enabled or not" based on a set of endpoints. + /// Note: this setting then applies for *all* endpoints. + /// + /// The configured endpoints to determine SSL usage from (e.g. from the port). + /// Whether to enable SSL for connections (unless explicitly overridden in a direct set). + public virtual bool GetDefaultSsl(EndPointCollection endPoints) => false; + + /// + /// Gets the SSL Host to check for when connecting to endpoints (customizable in case of internal certificate shenanigans. + /// + /// The configured endpoints to determine SSL host from (e.g. from the port). + /// The common host, if any, detected from the endpoint collection. + public virtual string? GetSslHostFromEndpoints(EndPointCollection endPoints) + { + string? commonHost = null; + foreach (var endpoint in endPoints) + { + if (endpoint is DnsEndPoint dnsEndpoint) + { + commonHost ??= dnsEndpoint.Host; + // Mismatch detected, no assumptions. + if (dnsEndpoint.Host != commonHost) + { + return null; + } + } + } + return commonHost; + } + } +} diff --git a/src/StackExchange.Redis/Configuration/LoggingTunnel.cs b/src/StackExchange.Redis/Configuration/LoggingTunnel.cs new file mode 100644 index 000000000..18216c1f2 --- /dev/null +++ b/src/StackExchange.Redis/Configuration/LoggingTunnel.cs @@ -0,0 +1,631 @@ +using System; +using System.Buffers; +using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; +using System.IO; +using System.IO.Pipelines; +using System.Net; +using System.Net.Security; +using System.Net.Sockets; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using Pipelines.Sockets.Unofficial; +using Pipelines.Sockets.Unofficial.Arenas; +using static StackExchange.Redis.PhysicalConnection; + +namespace StackExchange.Redis.Configuration; + +/// +/// Captures redis traffic; intended for debug use. +/// +[Obsolete("This API is experimental, has security and performance implications, and may change without notice", false)] +[SuppressMessage("ApiDesign", "RS0016:Add public types and members to the declared API", Justification = "Experimental API")] +public abstract class LoggingTunnel : Tunnel +{ + private readonly ConfigurationOptions _options; + private readonly bool _ssl; + private readonly Tunnel? _tail; + + /// + /// Replay the RESP messages for a pair of streams, invoking a callback per operation. + /// + public static async Task ReplayAsync(Stream @out, Stream @in, Action pair) + { + using Arena arena = new(); + var outPipe = StreamConnection.GetReader(@out); + var inPipe = StreamConnection.GetReader(@in); + + long count = 0; + while (true) + { + var sent = await ReadOneAsync(outPipe, arena, isInbound: false).ForAwait(); + ContextualRedisResult received; + try + { + do + { + received = await ReadOneAsync(inPipe, arena, isInbound: true).ForAwait(); + if (received.IsOutOfBand && received.Result is not null) + { + // spoof an empty request for OOB messages + pair(RedisResult.NullSingle, received.Result); + } + } + while (received.IsOutOfBand); + } + catch (Exception ex) + { + // if we got an exception following a command, spoof that as a pair, + // so we see the message that had a corrupted reply + if (sent.Result is not null) + { + pair(sent.Result, RedisResult.Create(ex.Message, ResultType.Error)); + } + throw; // still surface the original exception + } + + if (sent.Result is null || received.Result is null) break; // no more paired messages + + pair(sent.Result, received.Result); + count++; + } + return count; + } + + /// + /// Replay the RESP messages all the streams in a folder, invoking a callback per operation. + /// + /// The directory of captured files to replay. + /// Operation to perform per replayed message pair. + public static async Task ReplayAsync(string path, Action pair) + { + long total = 0; + foreach (var outPath in Directory.EnumerateFiles(path, "*.out")) + { + var inPath = Path.ChangeExtension(outPath, "in"); + if (!File.Exists(outPath)) continue; + + using var outFile = File.OpenRead(outPath); + using var inFile = File.OpenRead(inPath); + total += await ReplayAsync(outFile, inFile, pair).ForAwait(); + } + return total; + } + + private static async ValueTask ReadOneAsync(PipeReader input, Arena arena, bool isInbound) + { + while (true) + { + var readResult = await input.ReadAsync().ForAwait(); + var buffer = readResult.Buffer; + int handled = 0; + var result = buffer.IsEmpty ? default : ProcessBuffer(arena, ref buffer, isInbound); + input.AdvanceTo(buffer.Start, buffer.End); + + if (result.Result is not null) return result; + + if (handled == 0 && readResult.IsCompleted) + { + break; // no more data, or trailing incomplete messages + } + } + return default; + } + + /// + /// Validate a RESP stream and return the number of top-level RESP fragments. + /// + /// The path of a single file to validate, or a directory of captured files to validate. + public static async Task ValidateAsync(string path) + { + if (File.Exists(path)) + { + using var singleFile = File.OpenRead(path); + return await ValidateAsync(singleFile).ForAwait(); + } + else if (Directory.Exists(path)) + { + long total = 0; + foreach (var file in Directory.EnumerateFiles(path)) + { + try + { + using var folderFile = File.OpenRead(file); + total += await ValidateAsync(folderFile).ForAwait(); + } + catch (Exception ex) + { + throw new InvalidOperationException(ex.Message + " in " + file, ex); + } + } + return total; + } + else + { + throw new FileNotFoundException(path); + } + } + + /// + /// Validate a RESP stream and return the number of top-level RESP fragments. + /// + public static async Task ValidateAsync(Stream stream) + { + using var arena = new Arena(); + var input = StreamConnection.GetReader(stream); + long total = 0, position = 0; + while (true) + { + var readResult = await input.ReadAsync().ForAwait(); + var buffer = readResult.Buffer; + int handled = 0; + if (!buffer.IsEmpty) + { + try + { + ProcessBuffer(arena, ref buffer, ref position, ref handled); // updates buffer.Start + } + catch (Exception ex) + { + throw new InvalidOperationException($"Invalid fragment starting at {position} (fragment {total + handled})", ex); + } + total += handled; + } + + input.AdvanceTo(buffer.Start, buffer.End); + + if (handled == 0 && readResult.IsCompleted) + { + break; // no more data, or trailing incomplete messages + } + } + return total; + } + private static void ProcessBuffer(Arena arena, ref ReadOnlySequence buffer, ref long position, ref int messageCount) + { + while (!buffer.IsEmpty) + { + var reader = new BufferReader(buffer); + try + { + var result = TryParseResult(true, arena, in buffer, ref reader, true, null); + if (result.HasValue) + { + buffer = reader.SliceFromCurrent(); + position += reader.TotalConsumed; + messageCount++; + } + else + { + break; // remaining buffer isn't enough; give up + } + } + finally + { + arena.Reset(); + } + } + } + + private readonly struct ContextualRedisResult + { + public readonly RedisResult? Result; + public readonly bool IsOutOfBand; + public ContextualRedisResult(RedisResult? result, bool isOutOfBand) + { + Result = result; + IsOutOfBand = isOutOfBand; + } + } + + private static ContextualRedisResult ProcessBuffer(Arena arena, ref ReadOnlySequence buffer, bool isInbound) + { + if (!buffer.IsEmpty) + { + var reader = new BufferReader(buffer); + try + { + var result = TryParseResult(true, arena, in buffer, ref reader, true, null); + bool isOutOfBand = result.Resp3Type == ResultType.Push + || (isInbound && result.Resp2TypeArray == ResultType.Array && IsArrayOutOfBand(result)); + if (result.HasValue) + { + buffer = reader.SliceFromCurrent(); + if (!RedisResult.TryCreate(null, result, out var parsed)) + { + throw new InvalidOperationException("Unable to parse raw result to RedisResult"); + } + return new(parsed, isOutOfBand); + } + } + finally + { + arena.Reset(); + } + } + return default; + + static bool IsArrayOutOfBand(in RawResult result) + { + var items = result.GetItems(); + return (items.Length >= 3 && (items[0].IsEqual(message) || items[0].IsEqual(smessage))) + || (items.Length >= 4 && items[0].IsEqual(pmessage)); + } + } + private static readonly CommandBytes message = "message", pmessage = "pmessage", smessage = "smessage"; + + /// + /// Create a new instance of a . + /// + protected LoggingTunnel(ConfigurationOptions? options = null, Tunnel? tail = null) + { + options ??= new(); + _options = options; + _ssl = options.Ssl; + _tail = tail; + options.Ssl = false; // disable here, since we want to log *decrypted* + } + + /// + /// Configures the provided options to perform file-based logging to a directory; + /// files will be sequential per stream starting from zero, and will blindly overwrite existing files. + /// + public static void LogToDirectory(ConfigurationOptions options, string path) + { + var tunnel = new DirectoryLoggingTunnel(path, options, options.Tunnel); + options.Tunnel = tunnel; + } + + private sealed class DirectoryLoggingTunnel : LoggingTunnel + { + private readonly string path; + private int _nextIndex = -1; + + internal DirectoryLoggingTunnel(string path, ConfigurationOptions? options = null, Tunnel? tail = null) + : base(options, tail) + { + this.path = path; + if (!Directory.Exists(path)) throw new InvalidOperationException("Directory does not exist: " + path); + } + + protected override Stream Log(Stream stream, EndPoint endpoint, ConnectionType connectionType) + { + int index = Interlocked.Increment(ref _nextIndex); + var name = $"{Format.ToString(endpoint)} {connectionType} {index}.tmp"; + foreach (var c in InvalidChars) + { + name = name.Replace(c, ' '); + } + name = Path.Combine(path, name); + var reads = File.Create(Path.ChangeExtension(name, ".in")); + var writes = File.Create(Path.ChangeExtension(name, ".out")); + return new LoggingDuplexStream(stream, reads, writes); + } + + private static readonly char[] InvalidChars = Path.GetInvalidFileNameChars(); + } + + /// + public override async ValueTask BeforeAuthenticateAsync(EndPoint endpoint, ConnectionType connectionType, Socket? socket, CancellationToken cancellationToken) + { + Stream? stream = null; + if (_tail is not null) + { + stream = await _tail.BeforeAuthenticateAsync(endpoint, connectionType, socket, cancellationToken).ForAwait(); + } + stream ??= new NetworkStream(socket ?? throw new InvalidOperationException("No stream or socket available")); + if (_ssl) + { + stream = await TlsHandshakeAsync(stream, endpoint).ForAwait(); + } + return Log(stream, endpoint, connectionType); + } + + /// + /// Perform logging on the provided stream. + /// + protected abstract Stream Log(Stream stream, EndPoint endpoint, ConnectionType connectionType); + + /// + public override ValueTask BeforeSocketConnectAsync(EndPoint endPoint, ConnectionType connectionType, Socket? socket, CancellationToken cancellationToken) + { + return _tail is null ? base.BeforeSocketConnectAsync(endPoint, connectionType, socket, cancellationToken) + : _tail.BeforeSocketConnectAsync(endPoint, connectionType, socket, cancellationToken); + } + + /// + public override ValueTask GetSocketConnectEndpointAsync(EndPoint endpoint, CancellationToken cancellationToken) + { + return _tail is null ? base.GetSocketConnectEndpointAsync(endpoint, cancellationToken) + : _tail.GetSocketConnectEndpointAsync(endpoint, cancellationToken); + } + +#pragma warning disable CS1998 // Async method lacks 'await' operators and will run synchronously - netfx back-compat mode + private async Task TlsHandshakeAsync(Stream stream, EndPoint endpoint) +#pragma warning restore CS1998 // Async method lacks 'await' operators and will run synchronously + { + // mirrors TLS handshake from PhysicalConnection, but wouldn't help to share code here + var host = _options.SslHost; + if (host.IsNullOrWhiteSpace()) + { + host = Format.ToStringHostOnly(endpoint); + } + + var ssl = new SslStream( + innerStream: stream, + leaveInnerStreamOpen: false, + userCertificateValidationCallback: _options.CertificateValidationCallback ?? PhysicalConnection.GetAmbientIssuerCertificateCallback(), + userCertificateSelectionCallback: _options.CertificateSelectionCallback ?? PhysicalConnection.GetAmbientClientCertificateCallback(), + encryptionPolicy: EncryptionPolicy.RequireEncryption); + +#if NET + var configOptions = _options.SslClientAuthenticationOptions?.Invoke(host); + if (configOptions is not null) + { + await ssl.AuthenticateAsClientAsync(configOptions).ForAwait(); + } + else + { + await ssl.AuthenticateAsClientAsync(host, _options.SslProtocols, _options.CheckCertificateRevocation).ForAwait(); + } +#else + await ssl.AuthenticateAsClientAsync(host, _options.SslProtocols, _options.CheckCertificateRevocation).ForAwait(); +#endif + return ssl; + } + + /// + /// Get a typical text representation of a redis command. + /// + public static string DefaultFormatCommand(RedisResult value) + { + try + { + if (value.IsNull) return "(null)"; + if (value.Type == ResultType.Array) + { + var sb = new StringBuilder(); + for (int i = 0; i < value.Length; i++) + { + var item = value[i]; + if (i != 0) sb.Append(' '); + if (IsSimple(item)) + { + sb.Append(item.AsString()); + } + else + { + sb.Append("..."); + break; + } + } + return sb.ToString(); + } + } + catch { } + return value.Type.ToString(); + + static bool IsSimple(RedisResult value) + { + try + { + switch (value.Resp2Type) + { + case ResultType.Array: return false; + case ResultType.Error: return true; + default: + var blob = value.AsByteArray(); // note non-alloc in the remaining cases + if (blob is null) return true; + if (blob.Length >= 50) return false; + for (int i = 0; i < blob.Length; i++) + { + char c = (char)blob[i]; + if (c < ' ' || c > '~') return false; + } + return true; + } + } + catch + { + return false; + } + } + } + + /// + /// Get a typical text representation of a redis response. + /// + public static string DefaultFormatResponse(RedisResult value) + { + try + { + if (value.IsNull) return "(null)"; + switch (value.Type.ToResp2()) + { + case ResultType.Integer: + case ResultType.BulkString: + case ResultType.SimpleString: + return value.AsString()!; + case ResultType.Error: + return "-" + value.ToString(); + case ResultType.Array: + return $"[{value.Length}]"; + } + } + catch (Exception ex) + { + Debug.Write(ex.Message); + } + return value.Type.ToString(); + } + +#pragma warning disable CS1591 // Missing XML comment for publicly visible type or member + protected sealed class LoggingDuplexStream : Stream + { + private readonly Stream _inner, _reads, _writes; + + internal LoggingDuplexStream(Stream inner, Stream reads, Stream writes) + { + _inner = inner; + _reads = reads; + _writes = writes; + } + + public override bool CanRead => _inner.CanRead; + public override bool CanWrite => _inner.CanWrite; + + public override bool CanSeek => false; // duplex + public override bool CanTimeout => _inner.CanTimeout; + public override int ReadTimeout { get => _inner.ReadTimeout; set => _inner.ReadTimeout = value; } + public override int WriteTimeout { get => _inner.WriteTimeout; set => _inner.WriteTimeout = value; } + public override long Length => throw new NotSupportedException(); // duplex + public override long Position + { + get => throw new NotSupportedException(); // duplex + set => throw new NotSupportedException(); // duplex + } + public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException(); // duplex + public override void SetLength(long value) => throw new NotSupportedException(); // duplex + + // we don't use these APIs + public override IAsyncResult BeginRead(byte[] buffer, int offset, int count, AsyncCallback? callback, object? state) => throw new NotSupportedException(); + public override IAsyncResult BeginWrite(byte[] buffer, int offset, int count, AsyncCallback? callback, object? state) => throw new NotSupportedException(); + public override int EndRead(IAsyncResult asyncResult) => throw new NotSupportedException(); + public override void EndWrite(IAsyncResult asyncResult) => throw new NotSupportedException(); + + public override void Flush() + { + // note we don't flush _reads, as that could be cross-threaded + // (flush is a write operation, not a read one) + _writes.Flush(); + _inner.Flush(); + } + + public override async Task FlushAsync(CancellationToken cancellationToken) + { + var writesTask = _writes.FlushAsync().ForAwait(); + await _inner.FlushAsync().ForAwait(); + await writesTask; + } + + protected override void Dispose(bool disposing) + { + if (disposing) + { + _inner.Dispose(); + try { _reads.Flush(); } catch { } + _reads.Dispose(); + try { _writes.Flush(); } catch { } + _writes.Dispose(); + } + base.Dispose(disposing); + } + + public override void Close() + { + _inner.Close(); + try { _reads.Flush(); } catch { } + _reads.Close(); + try { _writes.Flush(); } catch { } + _writes.Close(); + base.Close(); + } + +#if NET + public override async ValueTask DisposeAsync() + { + await _inner.DisposeAsync().ForAwait(); + try { await _reads.FlushAsync().ForAwait(); } catch { } + await _reads.DisposeAsync().ForAwait(); + try { await _writes.FlushAsync().ForAwait(); } catch { } + await _writes.DisposeAsync().ForAwait(); + await base.DisposeAsync().ForAwait(); + } +#endif + + public override int ReadByte() + { + var val = _inner.ReadByte(); + if (val >= 0) + { + _reads.WriteByte((byte)val); + _reads.Flush(); + } + return val; + } + public override int Read(byte[] buffer, int offset, int count) + { + var len = _inner.Read(buffer, offset, count); + if (len > 0) + { + _reads.Write(buffer, offset, len); + _reads.Flush(); + } + return len; + } + public override async Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + { + var len = await _inner.ReadAsync(buffer, offset, count, cancellationToken).ForAwait(); + if (len > 0) + { + await _reads.WriteAsync(buffer, offset, len, cancellationToken).ForAwait(); + await _reads.FlushAsync(cancellationToken).ForAwait(); + } + return len; + } +#if NET + public override int Read(Span buffer) + { + var len = _inner.Read(buffer); + if (len > 0) + { + _reads.Write(buffer.Slice(0, len)); + _reads.Flush(); + } + return len; + } + public override async ValueTask ReadAsync(Memory buffer, CancellationToken cancellationToken) + { + var len = await _inner.ReadAsync(buffer, cancellationToken).ForAwait(); + if (len > 0) + { + await _reads.WriteAsync(buffer.Slice(0, len), cancellationToken).ForAwait(); + await _reads.FlushAsync(cancellationToken).ForAwait(); + } + return len; + } +#endif + + public override void WriteByte(byte value) + { + _writes.WriteByte(value); + _inner.WriteByte(value); + } + public override void Write(byte[] buffer, int offset, int count) + { + _writes.Write(buffer, offset, count); + _inner.Write(buffer, offset, count); + } + public override async Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + { + var writesTask = _writes.WriteAsync(buffer, offset, count, cancellationToken).ForAwait(); + await _inner.WriteAsync(buffer, offset, count, cancellationToken).ForAwait(); + await writesTask; + } +#if NET + public override void Write(ReadOnlySpan buffer) + { + _writes.Write(buffer); + _inner.Write(buffer); + } + public override async ValueTask WriteAsync(ReadOnlyMemory buffer, CancellationToken cancellationToken) + { + var writesTask = _writes.WriteAsync(buffer, cancellationToken).ForAwait(); + await _inner.WriteAsync(buffer, cancellationToken).ForAwait(); + await writesTask; + } +#endif + } +#pragma warning restore CS1591 // Missing XML comment for publicly visible type or member +} diff --git a/src/StackExchange.Redis/Configuration/Tunnel.cs b/src/StackExchange.Redis/Configuration/Tunnel.cs new file mode 100644 index 000000000..beebff2dc --- /dev/null +++ b/src/StackExchange.Redis/Configuration/Tunnel.cs @@ -0,0 +1,116 @@ +using System; +using System.Buffers; +using System.IO; +using System.Net; +using System.Net.Sockets; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using Pipelines.Sockets.Unofficial; + +namespace StackExchange.Redis.Configuration +{ + /// + /// Allows interception of the transport used to communicate with Redis. + /// + public abstract class Tunnel + { + /// + /// Gets the underlying socket endpoint to use when connecting to a logical endpoint. + /// + /// null should be returned if a socket is not required for this endpoint. + public virtual ValueTask GetSocketConnectEndpointAsync(EndPoint endpoint, CancellationToken cancellationToken) => new(endpoint); + + internal virtual bool IsInbuilt => false; // only inbuilt tunnels get added to config strings + + /// + /// Allows modification of a between creation and connection. + /// Passed in is the endpoint we're connecting to, which type of connection it is, and the socket itself. + /// For example, a specific local IP endpoint could be bound, linger time altered, etc. + /// + public virtual ValueTask BeforeSocketConnectAsync(EndPoint endPoint, ConnectionType connectionType, Socket? socket, CancellationToken cancellationToken) => default; + + /// + /// Invoked on a connected endpoint before server authentication and other handshakes occur, allowing pre-redis handshakes. By returning a custom , + /// the entire data flow can be intercepted, providing entire custom transports. + /// + public virtual ValueTask BeforeAuthenticateAsync(EndPoint endpoint, ConnectionType connectionType, Socket? socket, CancellationToken cancellationToken) => default; + + private sealed class HttpProxyTunnel : Tunnel + { + public EndPoint Proxy { get; } + public HttpProxyTunnel(EndPoint proxy) => Proxy = proxy ?? throw new ArgumentNullException(nameof(proxy)); + + public override ValueTask GetSocketConnectEndpointAsync(EndPoint endpoint, CancellationToken cancellationToken) => new(Proxy); + + public override async ValueTask BeforeAuthenticateAsync(EndPoint endpoint, ConnectionType connectionType, Socket? socket, CancellationToken cancellationToken) + { + if (socket is not null) + { + var encoding = Encoding.ASCII; + var ep = Format.ToString(endpoint); + const string Prefix = "CONNECT ", Suffix = " HTTP/1.1\r\n\r\n", ExpectedResponse1 = "HTTP/1.1 200 OK\r\n\r\n", ExpectedResponse2 = "HTTP/1.1 200 Connection established\r\n\r\n"; + byte[] chunk = ArrayPool.Shared.Rent(Math.Max( + encoding.GetByteCount(Prefix) + encoding.GetByteCount(ep) + encoding.GetByteCount(Suffix), + Math.Max(encoding.GetByteCount(ExpectedResponse1), encoding.GetByteCount(ExpectedResponse2)))); + var offset = 0; + offset += encoding.GetBytes(Prefix, 0, Prefix.Length, chunk, offset); + offset += encoding.GetBytes(ep, 0, ep.Length, chunk, offset); + offset += encoding.GetBytes(Suffix, 0, Suffix.Length, chunk, offset); + + static void SafeAbort(object? obj) + { + try + { + (obj as SocketAwaitableEventArgs)?.Abort(SocketError.TimedOut); + } + catch { } // best effort only + } + + using (var args = new SocketAwaitableEventArgs()) + using (cancellationToken.Register(static s => SafeAbort(s), args)) + { + args.SetBuffer(chunk, 0, offset); + if (!socket.SendAsync(args)) args.Complete(); + await args; + + // we expect to see: "HTTP/1.1 200 OK\n"; note our buffer is definitely big enough already + int toRead = Math.Max(encoding.GetByteCount(ExpectedResponse1), encoding.GetByteCount(ExpectedResponse2)), read; + offset = 0; + + var actualResponse = ""; + while (toRead > 0 && !actualResponse.EndsWith("\r\n\r\n")) + { + args.SetBuffer(chunk, offset, toRead); + if (!socket.ReceiveAsync(args)) args.Complete(); + read = await args; + + if (read <= 0) break; // EOF (since we're never doing zero-length reads) + toRead -= read; + offset += read; + + actualResponse = encoding.GetString(chunk, 0, offset); + } + if (toRead != 0 && !actualResponse.EndsWith("\r\n\r\n")) throw new EndOfStreamException("EOF negotiating HTTP tunnel"); + // lazy + if (ExpectedResponse1 != actualResponse && ExpectedResponse2 != actualResponse) + { + throw new InvalidOperationException("Unexpected response negotiating HTTP tunnel"); + } + ArrayPool.Shared.Return(chunk); + } + } + return default; // no need for custom stream wrapper here + } + + internal override bool IsInbuilt => true; + public override string ToString() => "http:" + Format.ToString(Proxy); + } + + /// + /// Create a tunnel via an HTTP proxy server. + /// + /// The endpoint to use as an HTTP proxy server. + public static Tunnel HttpProxy(EndPoint proxy) => new HttpProxyTunnel(proxy); + } +} diff --git a/src/StackExchange.Redis/ConfigurationOptions.cs b/src/StackExchange.Redis/ConfigurationOptions.cs index b50b0a777..641fccc95 100644 --- a/src/StackExchange.Redis/ConfigurationOptions.cs +++ b/src/StackExchange.Redis/ConfigurationOptions.cs @@ -1,25 +1,35 @@ using System; using System.Collections.Generic; using System.ComponentModel; -using System.IO; using System.Linq; using System.Net; using System.Net.Security; +using System.Net.Sockets; using System.Security.Authentication; +using System.Security.Cryptography; using System.Security.Cryptography.X509Certificates; using System.Text; +using System.Threading; using System.Threading.Tasks; -using static StackExchange.Redis.ConnectionMultiplexer; +using Microsoft.Extensions.Logging; +using StackExchange.Redis.Configuration; namespace StackExchange.Redis { /// - /// The options relevant to a set of redis connections + /// The options relevant to a set of redis connections. /// + /// + /// Some options are not observed by a after initial creation: + /// + /// + /// + /// + /// + /// + /// public sealed class ConfigurationOptions : ICloneable { - internal const string DefaultTieBreaker = "__Booksleeve_TieBreak", DefaultConfigurationChannel = "__Booksleeve_MasterChanged"; - private static class OptionKeys { public static int ParseInt32(string key, string value, int minValue = int.MinValue, int maxValue = int.MaxValue) @@ -38,8 +48,11 @@ internal static bool ParseBoolean(string key, string value) internal static Version ParseVersion(string key, string value) { - if (!System.Version.TryParse(value, out Version tmp)) throw new ArgumentOutOfRangeException(key, $"Keyword '{key}' requires a version value; the value '{value}' is not recognised."); - return tmp; + if (Format.TryParseVersion(value, out Version? tmp)) + { + return tmp; + } + throw new ArgumentOutOfRangeException(key, $"Keyword '{key}' requires a version value; the value '{value}' is not recognised."); } internal static Proxy ParseProxy(string key, string value) @@ -48,9 +61,9 @@ internal static Proxy ParseProxy(string key, string value) return tmp; } - internal static SslProtocols ParseSslProtocols(string key, string value) + internal static SslProtocols ParseSslProtocols(string key, string? value) { - //Flags expect commas as separators, but we need to use '|' since commas are already used in the connection string to mean something else + // Flags expect commas as separators, but we need to use '|' since commas are already used in the connection string to mean something else value = value?.Replace("|", ","); if (!Enum.TryParse(value, true, out SslProtocols tmp)) throw new ArgumentOutOfRangeException(key, $"Keyword '{key}' requires an SslProtocol value (multiple values separated by '|'); the value '{value}' is not recognised."); @@ -58,11 +71,15 @@ internal static SslProtocols ParseSslProtocols(string key, string value) return tmp; } - internal static void Unknown(string key) + internal static RedisProtocol ParseRedisProtocol(string key, string value) { - throw new ArgumentException($"Keyword '{key}' is not supported.", key); + if (TryParseRedisProtocol(value, out var protocol)) return protocol; + throw new ArgumentOutOfRangeException(key, $"Keyword '{key}' requires a RedisProtocol value or a known protocol version number; the value '{value}' is not recognised."); } + internal static void Unknown(string key) => + throw new ArgumentException($"Keyword '{key}' is not supported.", key); + internal const string AbortOnConnectFail = "abortConnect", AllowAdmin = "allowAdmin", @@ -90,8 +107,11 @@ internal const string TieBreaker = "tiebreaker", Version = "version", WriteBuffer = "writeBuffer", - CheckCertificateRevocation = "checkCertificateRevocation"; - + CheckCertificateRevocation = "checkCertificateRevocation", + Tunnel = "tunnel", + SetClientLibrary = "setlib", + Protocol = "protocol", + HighIntegrity = "highIntegrity"; private static readonly Dictionary normalizedOptions = new[] { @@ -120,12 +140,14 @@ internal const string TieBreaker, Version, WriteBuffer, - CheckCertificateRevocation + CheckCertificateRevocation, + Protocol, + HighIntegrity, }.ToDictionary(x => x, StringComparer.OrdinalIgnoreCase); public static string TryNormalize(string value) { - if (value != null && normalizedOptions.TryGetValue(value, out string tmp)) + if (value != null && normalizedOptions.TryGetValue(value, out string? tmp)) { return tmp ?? ""; } @@ -133,372 +155,731 @@ public static string TryNormalize(string value) } } - private bool? allowAdmin, abortOnConnectFail, highPrioritySocketThreads, resolveDns, ssl, checkCertificateRevocation; + private DefaultOptionsProvider? defaultOptions; + + private bool? allowAdmin, abortOnConnectFail, resolveDns, ssl, checkCertificateRevocation, heartbeatConsistencyChecks, + includeDetailInExceptions, includePerformanceCountersInExceptions, setClientLibrary, highIntegrity; - private string tieBreaker, sslHost, configChannel; + private string? tieBreaker, sslHost, configChannel, user, password; - private CommandMap commandMap; + private TimeSpan? heartbeatInterval; - private Version defaultVersion; + private CommandMap? commandMap; - private int? keepAlive, asyncTimeout, syncTimeout, connectTimeout, responseTimeout, writeBuffer, connectRetry, configCheckSeconds; + private Version? defaultVersion; + + private int? keepAlive, asyncTimeout, syncTimeout, connectTimeout, responseTimeout, connectRetry, configCheckSeconds; private Proxy? proxy; - private IReconnectRetryPolicy reconnectRetryPolicy; + private IReconnectRetryPolicy? reconnectRetryPolicy; + + private BacklogPolicy? backlogPolicy; + + private ILoggerFactory? loggerFactory; /// /// A LocalCertificateSelectionCallback delegate responsible for selecting the certificate used for authentication; note /// that this cannot be specified in the configuration-string. /// - [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1009:DeclareEventHandlersCorrectly")] - public event LocalCertificateSelectionCallback CertificateSelection; + [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1009:DeclareEventHandlersCorrectly", Justification = "Existing compatibility")] + public event LocalCertificateSelectionCallback? CertificateSelection; /// /// A RemoteCertificateValidationCallback delegate responsible for validating the certificate supplied by the remote party; note /// that this cannot be specified in the configuration-string. /// - [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1009:DeclareEventHandlersCorrectly")] - public event RemoteCertificateValidationCallback CertificateValidation; + [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1009:DeclareEventHandlersCorrectly", Justification = "Existing compatibility")] + public event RemoteCertificateValidationCallback? CertificateValidation; /// - /// Gets or sets whether connect/configuration timeouts should be explicitly notified via a TimeoutException + /// The default (not explicitly configured) options for this connection, fetched based on our parsed endpoints. /// - public bool AbortOnConnectFail { get { return abortOnConnectFail ?? GetDefaultAbortOnConnectFailSetting(); } set { abortOnConnectFail = value; } } + public DefaultOptionsProvider Defaults + { + get => defaultOptions ??= DefaultOptionsProvider.GetProvider(EndPoints); + set => defaultOptions = value; + } /// - /// Indicates whether admin operations should be allowed + /// Allows modification of a between creation and connection. + /// Passed in is the endpoint we're connecting to, which type of connection it is, and the socket itself. + /// For example, a specific local IP endpoint could be bound, linger time altered, etc. /// - public bool AllowAdmin { get { return allowAdmin.GetValueOrDefault(); } set { allowAdmin = value; } } + public Action? BeforeSocketConnect { get; set; } + + internal Func, Task> AfterConnectAsync => Defaults.AfterConnectAsync; /// - /// Specifies the time in milliseconds that the system should allow for asynchronous operations (defaults to SyncTimeout) + /// Gets or sets whether connect/configuration timeouts should be explicitly notified via a TimeoutException. /// - public int AsyncTimeout { get { return asyncTimeout ?? SyncTimeout; } set { asyncTimeout = value; } } + public bool AbortOnConnectFail + { + get => abortOnConnectFail ?? Defaults.AbortOnConnectFail; + set => abortOnConnectFail = value; + } /// - /// Indicates whether the connection should be encrypted + /// Indicates whether admin operations should be allowed. /// - [Obsolete("Please use .Ssl instead of .UseSsl"), + public bool AllowAdmin + { + get => allowAdmin ?? Defaults.AllowAdmin; + set => allowAdmin = value; + } + + /// + /// Specifies the time in milliseconds that the system should allow for asynchronous operations (defaults to SyncTimeout). + /// + public int AsyncTimeout + { + get => asyncTimeout ?? SyncTimeout; + set => asyncTimeout = value; + } + + /// + /// Indicates whether the connection should be encrypted. + /// + [Obsolete("Please use .Ssl instead of .UseSsl, will be removed in 3.0."), Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] - public bool UseSsl { get { return Ssl; } set { Ssl = value; } } + public bool UseSsl + { + get => Ssl; + set => Ssl = value; + } + + /// + /// Gets or sets whether the library should identify itself by library-name/version when possible. + /// + public bool SetClientLibrary + { + get => setClientLibrary ?? Defaults.SetClientLibrary; + set => setClientLibrary = value; + } + + /// + /// Gets or sets the library name to use for CLIENT SETINFO lib-name calls to Redis during handshake. + /// Defaults to "SE.Redis". + /// + /// If the value is null, empty or whitespace, then the value from the options-provider is used; + /// to disable the library name feature, use instead. + public string? LibraryName { get; set; } /// - /// Automatically encodes and decodes channels + /// Automatically encodes and decodes channels. /// public RedisChannel ChannelPrefix { get; set; } /// /// A Boolean value that specifies whether the certificate revocation list is checked during authentication. /// - public bool CheckCertificateRevocation { get { return checkCertificateRevocation ?? true; } set { checkCertificateRevocation = value; } } + public bool CheckCertificateRevocation + { + get => checkCertificateRevocation ?? Defaults.CheckCertificateRevocation; + set => checkCertificateRevocation = value; + } + + /// + /// A Boolean value that specifies whether to use per-command validation of strict protocol validity. + /// This sends an additional command after EVERY command which incurs measurable overhead. + /// + /// + /// The regular RESP protocol does not include correlation identifiers between requests and responses; in exceptional + /// scenarios, protocol desynchronization can occur, which may not be noticed immediately; this option adds additional data + /// to ensure that this cannot occur, at the cost of some (small) additional bandwidth usage. + /// + public bool HighIntegrity + { + get => highIntegrity ?? Defaults.HighIntegrity; + set => highIntegrity = value; + } /// - /// Create a certificate validation check that checks against the supplied issuer even if not known by the machine + /// Create a certificate validation check that checks against the supplied issuer even when not known by the machine. /// /// The file system path to find the certificate at. public void TrustIssuer(string issuerCertificatePath) => CertificateValidationCallback = TrustIssuerCallback(issuerCertificatePath); +#if NET /// - /// Create a certificate validation check that checks against the supplied issuer even if not known by the machine + /// Supply a user certificate from a PEM file pair and enable TLS. + /// + /// The path for the the user certificate (commonly a .crt file). + /// The path for the the user key (commonly a .key file). + public void SetUserPemCertificate(string userCertificatePath, string? userKeyPath = null) + { + CertificateSelectionCallback = CreatePemUserCertificateCallback(userCertificatePath, userKeyPath); + Ssl = true; + } +#endif + + /// + /// Supply a user certificate from a PFX file and optional password and enable TLS. + /// + /// The path for the the user certificate (commonly a .pfx file). + /// The password for the certificate file. + public void SetUserPfxCertificate(string userCertificatePath, string? password = null) + { + CertificateSelectionCallback = CreatePfxUserCertificateCallback(userCertificatePath, password); + Ssl = true; + } + +#if NET + internal static LocalCertificateSelectionCallback CreatePemUserCertificateCallback(string userCertificatePath, string? userKeyPath) + { + // PEM handshakes not universally supported and causes a runtime error about ephemeral certificates; to avoid, export as PFX + using var pem = X509Certificate2.CreateFromPemFile(userCertificatePath, userKeyPath); +#pragma warning disable SYSLIB0057 // Type or member is obsolete + var pfx = new X509Certificate2(pem.Export(X509ContentType.Pfx)); +#pragma warning restore SYSLIB0057 // Type or member is obsolete + + return (sender, targetHost, localCertificates, remoteCertificate, acceptableIssuers) => pfx; + } +#endif + + internal static LocalCertificateSelectionCallback CreatePfxUserCertificateCallback(string userCertificatePath, string? password, X509KeyStorageFlags storageFlags = X509KeyStorageFlags.DefaultKeySet) + { +#pragma warning disable SYSLIB0057 + var pfx = new X509Certificate2(userCertificatePath, password ?? "", storageFlags); +#pragma warning restore SYSLIB0057 + return (sender, targetHost, localCertificates, remoteCertificate, acceptableIssuers) => pfx; + } + + /// + /// Create a certificate validation check that checks against the supplied issuer even when not known by the machine. /// /// The issuer to trust. public void TrustIssuer(X509Certificate2 issuer) => CertificateValidationCallback = TrustIssuerCallback(issuer); internal static RemoteCertificateValidationCallback TrustIssuerCallback(string issuerCertificatePath) +#pragma warning disable SYSLIB0057 => TrustIssuerCallback(new X509Certificate2(issuerCertificatePath)); +#pragma warning restore SYSLIB0057 private static RemoteCertificateValidationCallback TrustIssuerCallback(X509Certificate2 issuer) { if (issuer == null) throw new ArgumentNullException(nameof(issuer)); - return (object _, X509Certificate certificate, X509Chain __, SslPolicyErrors sslPolicyError) - => sslPolicyError == SslPolicyErrors.RemoteCertificateChainErrors - && certificate is X509Certificate2 v2 - && CheckTrustedIssuer(v2, issuer); + return (object _, X509Certificate? certificate, X509Chain? certificateChain, SslPolicyErrors sslPolicyError) => + { + // If we're already valid, there's nothing further to check + if (sslPolicyError == SslPolicyErrors.None) + { + return true; + } + // If we're not valid due to chain errors - check against the trusted issuer + // Note that we're only proceeding at all here if the *only* issue is chain errors (not more flags in SslPolicyErrors) + return sslPolicyError == SslPolicyErrors.RemoteCertificateChainErrors + && certificate is X509Certificate2 v2 + && CheckTrustedIssuer(v2, certificateChain, issuer); + }; } - private static bool CheckTrustedIssuer(X509Certificate2 certificateToValidate, X509Certificate2 authority) + private static readonly Oid _serverAuthOid = new Oid("1.3.6.1.5.5.7.3.1", "1.3.6.1.5.5.7.3.1"); + + private static bool CheckTrustedIssuer(X509Certificate2 certificateToValidate, X509Chain? chainToValidate, X509Certificate2 authority) { - // reference: https://stackoverflow.com/questions/6497040/how-do-i-validate-that-a-certificate-was-created-by-a-particular-certification-a - X509Chain chain = new X509Chain(); + // Reference: + // https://stackoverflow.com/questions/6497040/how-do-i-validate-that-a-certificate-was-created-by-a-particular-certification-a + // https://github.com/stewartadam/dotnet-x509-certificate-verification + using X509Chain chain = new X509Chain(); chain.ChainPolicy.RevocationMode = X509RevocationMode.NoCheck; - chain.ChainPolicy.RevocationFlag = X509RevocationFlag.ExcludeRoot; chain.ChainPolicy.VerificationFlags = X509VerificationFlags.AllowUnknownCertificateAuthority; - chain.ChainPolicy.VerificationTime = DateTime.Now; + chain.ChainPolicy.VerificationTime = chainToValidate?.ChainPolicy?.VerificationTime ?? DateTime.Now; chain.ChainPolicy.UrlRetrievalTimeout = new TimeSpan(0, 0, 0); + // Ensure entended key usage checks are run and that we're observing a server TLS certificate + chain.ChainPolicy.ApplicationPolicy.Add(_serverAuthOid); chain.ChainPolicy.ExtraStore.Add(authority); - return chain.Build(certificateToValidate); + try + { + // This only verifies that the chain is valid, but with AllowUnknownCertificateAuthority could trust + // self-signed or partial chained certificates + bool chainIsVerified; + try + { + chainIsVerified = chain.Build(certificateToValidate); + } + catch (ArgumentException ex) when ((ex.ParamName ?? ex.Message) == "certificate" && Runtime.IsMono) + { + // work around Mono cert limitation; report as rejected rather than fault + // (note also the likely .ctor mixup re param-name vs message) + chainIsVerified = false; + } + if (chainIsVerified) + { + // Our method is "TrustIssuer", which means any intermediate cert we're being told to trust + // is a valid thing to trust, up until it's a root CA + bool found = false; + byte[] authorityData = authority.RawData; + foreach (var chainElement in chain.ChainElements) + { + using var chainCert = chainElement.Certificate; + if (!found) + { +#if NET8_0_OR_GREATER + if (chainCert.RawDataMemory.Span.SequenceEqual(authorityData)) +#else + if (chainCert.RawData.SequenceEqual(authorityData)) +#endif + { + found = true; + } + } + } + return found; + } + } + catch (CryptographicException) + { + // We specifically don't want to throw during validation here and would rather exit out gracefully + } + + // If we didn't find the trusted issuer in the chain at all - we do not trust the result. + return false; } /// - /// The client name to use for all connections + /// The client name to use for all connections. /// - public string ClientName { get; set; } + public string? ClientName { get; set; } /// - /// The number of times to repeat the initial connect cycle if no servers respond promptly + /// The number of times to repeat the initial connect cycle if no servers respond promptly. /// - public int ConnectRetry { get { return connectRetry ?? 3; } set { connectRetry = value; } } + public int ConnectRetry + { + get => connectRetry ?? Defaults.ConnectRetry; + set => connectRetry = value; + } /// - /// The command-map associated with this configuration + /// The command-map associated with this configuration. /// + /// + /// This is memoized when a connects. + /// Modifying it afterwards will have no effect on already-created multiplexers. + /// public CommandMap CommandMap { - get + get => commandMap ?? Defaults.CommandMap ?? Proxy switch { - if (commandMap != null) return commandMap; - switch (Proxy) - { - case Proxy.Twemproxy: - return CommandMap.Twemproxy; - default: - return CommandMap.Default; - } - } - set - { - commandMap = value ?? throw new ArgumentNullException(nameof(value)); - } + Proxy.Twemproxy => CommandMap.Twemproxy, + Proxy.Envoyproxy => CommandMap.Envoyproxy, + _ => CommandMap.Default, + }; + set => commandMap = value ?? throw new ArgumentNullException(nameof(value)); } /// - /// Channel to use for broadcasting and listening for configuration change notification + /// Gets the command map for a given server type, since some supersede settings when connecting. /// - public string ConfigurationChannel { get { return configChannel ?? DefaultConfigurationChannel; } set { configChannel = value; } } + internal CommandMap GetCommandMap(ServerType? serverType) => serverType switch + { + ServerType.Sentinel => CommandMap.Sentinel, + _ => CommandMap, + }; /// - /// Specifies the time in milliseconds that should be allowed for connection (defaults to 5 seconds unless SyncTimeout is higher) + /// Channel to use for broadcasting and listening for configuration change notification. + /// + /// + /// This is memoized when a connects. + /// Modifying it afterwards will have no effect on already-created multiplexers. + /// + public string ConfigurationChannel + { + get => configChannel ?? Defaults.ConfigurationChannel; + set => configChannel = value; + } + + /// + /// Specifies the time in milliseconds that should be allowed for connection (defaults to 5 seconds unless SyncTimeout is higher). /// public int ConnectTimeout { - get - { - if (connectTimeout.HasValue) return connectTimeout.GetValueOrDefault(); - return Math.Max(5000, SyncTimeout); - } - set { connectTimeout = value; } + get => connectTimeout ?? ((int?)Defaults.ConnectTimeout?.TotalMilliseconds) ?? Math.Max(5000, SyncTimeout); + set => connectTimeout = value; } /// - /// Specifies the default database to be used when calling ConnectionMultiplexer.GetDatabase() without any parameters + /// Specifies the default database to be used when calling without any parameters. /// public int? DefaultDatabase { get; set; } /// - /// The server version to assume + /// The server version to assume. /// - public Version DefaultVersion { get { return defaultVersion ?? (IsAzureEndpoint() ? RedisFeatures.v3_0_0 : RedisFeatures.v2_0_0); } set { defaultVersion = value; } } + public Version DefaultVersion + { + get => defaultVersion ?? Defaults.DefaultVersion; + set => defaultVersion = value; + } + + /// + /// The endpoints defined for this configuration. + /// + /// + /// This is memoized when a connects. + /// Modifying it afterwards will have no effect on already-created multiplexers. + /// + public EndPointCollection EndPoints { get; init; } = new EndPointCollection(); + + /// + /// Whether to enable ECHO checks on every heartbeat to ensure network stream consistency. + /// This is a rare measure to react to any potential network traffic drops ASAP, terminating the connection. + /// + public bool HeartbeatConsistencyChecks + { + get => heartbeatConsistencyChecks ?? Defaults.HeartbeatConsistencyChecks; + set => heartbeatConsistencyChecks = value; + } + + /// + /// Controls how often the connection heartbeats. A heartbeat includes: + /// - Evaluating if any messages have timed out. + /// - Evaluating connection status (checking for failures). + /// - Sending a server message to keep the connection alive if needed. + /// + /// + /// This defaults to 1000 milliseconds and should not be changed for most use cases. + /// If for example you want to evaluate whether commands have violated the at a lower fidelity + /// than 1000 milliseconds, you could lower this value. + /// Be aware setting this very low incurs additional overhead of evaluating the above more often. + /// + public TimeSpan HeartbeatInterval + { + get => heartbeatInterval ?? Defaults.HeartbeatInterval; + set => heartbeatInterval = value; + } /// - /// The endpoints defined for this configuration + /// Use ThreadPriority.AboveNormal for SocketManager reader and writer threads (true by default). + /// If , will be used. /// - public EndPointCollection EndPoints { get; } = new EndPointCollection(); + [Obsolete($"This setting no longer has any effect, please use {nameof(SocketManager.SocketManagerOptions)}.{nameof(SocketManager.SocketManagerOptions.UseHighPrioritySocketThreads)} instead - this setting will be removed in 3.0.")] + [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] + public bool HighPrioritySocketThreads + { + get => false; + set { } + } /// - /// Use ThreadPriority.AboveNormal for SocketManager reader and writer threads (true by default). If false, ThreadPriority.Normal will be used. + /// Whether exceptions include identifiable details (key names, additional .Data annotations). /// - public bool HighPrioritySocketThreads { get { return highPrioritySocketThreads ?? true; } set { highPrioritySocketThreads = value; } } + public bool IncludeDetailInExceptions + { + get => includeDetailInExceptions ?? Defaults.IncludeDetailInExceptions; + set => includeDetailInExceptions = value; + } - // Use coalesce expression. /// - /// Specifies the time in seconds at which connections should be pinged to ensure validity + /// Whether exceptions include performance counter details. /// -#pragma warning disable RCS1128 - public int KeepAlive { get { return keepAlive.GetValueOrDefault(-1); } set { keepAlive = value; } } -#pragma warning restore RCS1128 // Use coalesce expression. + /// + /// CPU usage, etc - note that this can be problematic on some platforms. + /// + public bool IncludePerformanceCountersInExceptions + { + get => includePerformanceCountersInExceptions ?? Defaults.IncludePerformanceCountersInExceptions; + set => includePerformanceCountersInExceptions = value; + } /// - /// The user to use to authenticate with the server. + /// Specifies the time in seconds at which connections should be pinged to ensure validity. + /// -1 Defaults to 60 Seconds. /// - public string User { get; set; } + public int KeepAlive + { + get => keepAlive ?? (int)Defaults.KeepAliveInterval.TotalSeconds; + set => keepAlive = value; + } + + /// + /// The to get loggers for connection events. + /// Note: changes here only affect s created after. + /// + public ILoggerFactory? LoggerFactory + { + get => loggerFactory ?? Defaults.LoggerFactory; + set => loggerFactory = value; + } + + /// + /// The username to use to authenticate with the server. + /// + public string? User + { + get => user ?? Defaults.User; + set => user = value; + } /// /// The password to use to authenticate with the server. /// - public string Password { get; set; } + public string? Password + { + get => password ?? Defaults.Password; + set => password = value; + } /// - /// Specifies whether asynchronous operations should be invoked in a way that guarantees their original delivery order + /// Specifies whether asynchronous operations should be invoked in a way that guarantees their original delivery order. /// - [Obsolete("Not supported; if you require ordered pub/sub, please see " + nameof(ChannelMessageQueue), false)] + [Obsolete("Not supported; if you require ordered pub/sub, please see " + nameof(ChannelMessageQueue) + " - this will be removed in 3.0.", false)] + [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] public bool PreserveAsyncOrder { - get { return false; } + get => false; set { } } /// - /// Type of proxy to use (if any); for example Proxy.Twemproxy. + /// Type of proxy to use (if any); for example . /// - public Proxy Proxy { get { return proxy.GetValueOrDefault(); } set { proxy = value; } } + public Proxy Proxy + { + get => proxy ?? Defaults.Proxy; + set => proxy = value; + } /// - /// The retry policy to be used for connection reconnects + /// The retry policy to be used for connection reconnects. /// - public IReconnectRetryPolicy ReconnectRetryPolicy { get { return reconnectRetryPolicy ??= new LinearRetry(ConnectTimeout); } set { reconnectRetryPolicy = value; } } + public IReconnectRetryPolicy ReconnectRetryPolicy + { + get => reconnectRetryPolicy ??= Defaults.ReconnectRetryPolicy ?? new ExponentialRetry(ConnectTimeout / 2); + set => reconnectRetryPolicy = value; + } + + /// + /// The backlog policy to be used for commands when a connection is unhealthy. + /// + public BacklogPolicy BacklogPolicy + { + get => backlogPolicy ?? Defaults.BacklogPolicy; + set => backlogPolicy = value; + } /// /// Indicates whether endpoints should be resolved via DNS before connecting. - /// If enabled the ConnectionMultiplexer will not re-resolve DNS - /// when attempting to re-connect after a connection failure. + /// If enabled the ConnectionMultiplexer will not re-resolve DNS when attempting to re-connect after a connection failure. /// - public bool ResolveDns { get { return resolveDns.GetValueOrDefault(); } set { resolveDns = value; } } + public bool ResolveDns + { + get => resolveDns ?? Defaults.ResolveDns; + set => resolveDns = value; + } /// - /// Specifies the time in milliseconds that the system should allow for responses before concluding that the socket is unhealthy - /// (defaults to SyncTimeout) + /// Specifies the time in milliseconds that the system should allow for responses before concluding that the socket is unhealthy. /// - [Obsolete("This setting no longer has any effect, and should not be used")] - public int ResponseTimeout { get { return 0; } set { } } + [Obsolete("This setting no longer has any effect, and should not be used - will be removed in 3.0.")] + [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] + public int ResponseTimeout + { + get => 0; + set { } + } /// /// The service name used to resolve a service via sentinel. /// - public string ServiceName { get; set; } + public string? ServiceName { get; set; } /// - /// Gets or sets the SocketManager instance to be used with these options; if this is null a shared cross-multiplexer SocketManager - /// is used + /// Gets or sets the SocketManager instance to be used with these options. + /// If this is null a shared cross-multiplexer is used. /// - public SocketManager SocketManager { get; set; } + /// + /// This is only used when a is created. + /// Modifying it afterwards will have no effect on already-created multiplexers. + /// + public SocketManager? SocketManager { get; set; } +#if NET /// - /// Indicates whether the connection should be encrypted + /// A provider for a given host, for custom TLS connection options. + /// Note: this overrides *all* other TLS and certificate settings, only for advanced use cases. /// - public bool Ssl { get { return ssl.GetValueOrDefault(); } set { ssl = value; } } + public Func? SslClientAuthenticationOptions { get; set; } +#endif /// - /// The target-host to use when validating SSL certificate; setting a value here enables SSL mode + /// Indicates whether the connection should be encrypted. /// - public string SslHost { get { return sslHost ?? InferSslHostFromEndpoints(); } set { sslHost = value; } } + public bool Ssl + { + get => ssl ?? Defaults.GetDefaultSsl(EndPoints); + set => ssl = value; + } /// - /// Configures which Ssl/TLS protocols should be allowed. If not set, defaults are chosen by the .NET framework. + /// The target-host to use when validating SSL certificate; setting a value here enables SSL mode. + /// + public string? SslHost + { + get => sslHost ?? Defaults.GetSslHostFromEndpoints(EndPoints); + set => sslHost = value; + } + + /// + /// Configures which SSL/TLS protocols should be allowed. If not set, defaults are chosen by the .NET framework. /// public SslProtocols? SslProtocols { get; set; } /// - /// Specifies the time in milliseconds that the system should allow for synchronous operations (defaults to 5 seconds) + /// Specifies the time in milliseconds that the system should allow for synchronous operations (defaults to 5 seconds). /// -#pragma warning disable RCS1128 - public int SyncTimeout { get { return syncTimeout.GetValueOrDefault(5000); } set { syncTimeout = value; } } -#pragma warning restore RCS1128 + public int SyncTimeout + { + get => syncTimeout ?? (int)Defaults.SyncTimeout.TotalMilliseconds; + set => syncTimeout = value; + } /// - /// Tie-breaker used to choose between masters (must match the endpoint exactly) + /// Tie-breaker used to choose between primaries (must match the endpoint exactly). /// - public string TieBreaker { get { return tieBreaker ?? DefaultTieBreaker; } set { tieBreaker = value; } } + public string TieBreaker + { + get => tieBreaker ?? Defaults.TieBreaker; + set => tieBreaker = value; + } + /// - /// The size of the output buffer to use + /// The size of the output buffer to use. /// - [Obsolete("This setting no longer has any effect, and should not be used")] - public int WriteBuffer { get { return 0; } set { } } + [Obsolete("This setting no longer has any effect, and should not be used - will be removed in 3.0.")] + [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] + public int WriteBuffer + { + get => 0; + set { } + } - internal LocalCertificateSelectionCallback CertificateSelectionCallback { get { return CertificateSelection; } private set { CertificateSelection = value; } } + internal LocalCertificateSelectionCallback? CertificateSelectionCallback + { + get => CertificateSelection; + private set => CertificateSelection = value; + } // these just rip out the underlying handlers, bypassing the event accessors - needed when creating the SSL stream - internal RemoteCertificateValidationCallback CertificateValidationCallback { get { return CertificateValidation; } private set { CertificateValidation = value; } } + internal RemoteCertificateValidationCallback? CertificateValidationCallback + { + get => CertificateValidation; + private set => CertificateValidation = value; + } /// - /// Check configuration every n seconds (every minute by default) + /// Check configuration every n seconds (every minute by default). /// -#pragma warning disable RCS1128 - public int ConfigCheckSeconds { get { return configCheckSeconds.GetValueOrDefault(60); } set { configCheckSeconds = value; } } -#pragma warning restore RCS1128 + public int ConfigCheckSeconds + { + get => configCheckSeconds ?? (int)Defaults.ConfigCheckInterval.TotalSeconds; + set => configCheckSeconds = value; + } /// - /// Parse the configuration from a comma-delimited configuration string + /// Parse the configuration from a comma-delimited configuration string. /// /// The configuration string to parse. - /// is null. + /// is . /// is empty. - public static ConfigurationOptions Parse(string configuration) - { - var options = new ConfigurationOptions(); - options.DoParse(configuration, false); - return options; - } + public static ConfigurationOptions Parse(string configuration) => Parse(configuration, false); /// - /// Parse the configuration from a comma-delimited configuration string + /// Parse the configuration from a comma-delimited configuration string. /// /// The configuration string to parse. /// Whether to ignore unknown elements in . - /// is null. + /// is . /// is empty. - public static ConfigurationOptions Parse(string configuration, bool ignoreUnknown) - { - var options = new ConfigurationOptions(); - options.DoParse(configuration, ignoreUnknown); - return options; - } + public static ConfigurationOptions Parse(string configuration, bool ignoreUnknown) => + new ConfigurationOptions().DoParse(configuration, ignoreUnknown); /// - /// Create a copy of the configuration + /// Create a copy of the configuration. /// - public ConfigurationOptions Clone() + public ConfigurationOptions Clone() => new ConfigurationOptions { - var options = new ConfigurationOptions - { - ClientName = ClientName, - ServiceName = ServiceName, - keepAlive = keepAlive, - syncTimeout = syncTimeout, - asyncTimeout = asyncTimeout, - allowAdmin = allowAdmin, - defaultVersion = defaultVersion, - connectTimeout = connectTimeout, - User = User, - Password = Password, - tieBreaker = tieBreaker, - writeBuffer = writeBuffer, - ssl = ssl, - sslHost = sslHost, - highPrioritySocketThreads = highPrioritySocketThreads, - configChannel = configChannel, - abortOnConnectFail = abortOnConnectFail, - resolveDns = resolveDns, - proxy = proxy, - commandMap = commandMap, - CertificateValidationCallback = CertificateValidationCallback, - CertificateSelectionCallback = CertificateSelectionCallback, - ChannelPrefix = ChannelPrefix.Clone(), - SocketManager = SocketManager, - connectRetry = connectRetry, - configCheckSeconds = configCheckSeconds, - responseTimeout = responseTimeout, - DefaultDatabase = DefaultDatabase, - ReconnectRetryPolicy = reconnectRetryPolicy, - SslProtocols = SslProtocols, - checkCertificateRevocation = checkCertificateRevocation, - }; - foreach (var item in EndPoints) - options.EndPoints.Add(item); - return options; - } + defaultOptions = defaultOptions, + ClientName = ClientName, + ServiceName = ServiceName, + keepAlive = keepAlive, + syncTimeout = syncTimeout, + asyncTimeout = asyncTimeout, + allowAdmin = allowAdmin, + defaultVersion = defaultVersion, + connectTimeout = connectTimeout, + user = user, + password = password, + tieBreaker = tieBreaker, + ssl = ssl, + sslHost = sslHost, + configChannel = configChannel, + abortOnConnectFail = abortOnConnectFail, + resolveDns = resolveDns, + proxy = proxy, + commandMap = commandMap, + CertificateValidationCallback = CertificateValidationCallback, + CertificateSelectionCallback = CertificateSelectionCallback, + ChannelPrefix = ChannelPrefix.Clone(), + SocketManager = SocketManager, + connectRetry = connectRetry, + configCheckSeconds = configCheckSeconds, + responseTimeout = responseTimeout, + DefaultDatabase = DefaultDatabase, + reconnectRetryPolicy = reconnectRetryPolicy, + backlogPolicy = backlogPolicy, + SslProtocols = SslProtocols, + checkCertificateRevocation = checkCertificateRevocation, + BeforeSocketConnect = BeforeSocketConnect, + EndPoints = EndPoints.Clone(), + LoggerFactory = LoggerFactory, +#if NET + SslClientAuthenticationOptions = SslClientAuthenticationOptions, +#endif + Tunnel = Tunnel, + setClientLibrary = setClientLibrary, + LibraryName = LibraryName, + Protocol = Protocol, + heartbeatInterval = heartbeatInterval, + heartbeatConsistencyChecks = heartbeatConsistencyChecks, + highIntegrity = highIntegrity, + }; /// - /// Resolve the default port for any endpoints that did not have a port explicitly specified + /// Apply settings to configure this instance of , e.g. for a specific scenario. /// - public void SetDefaultPorts() + /// An action that will update the properties of this instance. + /// This instance, with any changes made. + public ConfigurationOptions Apply(Action configure) { - EndPoints.SetDefaultPorts(Ssl ? 6380 : 6379); + configure?.Invoke(this); + return this; } /// - /// Sets default config settings required for sentinel usage + /// Resolve the default port for any endpoints that did not have a port explicitly specified. /// - internal void SetSentinelDefaults() - { - // this is required when connecting to sentinel servers - TieBreaker = ""; - CommandMap = CommandMap.Sentinel; + public void SetDefaultPorts() => EndPoints.SetDefaultPorts(ServerType.Standalone, ssl: Ssl); + + internal bool IsSentinel => !string.IsNullOrEmpty(ServiceName); - // use default sentinel port - EndPoints.SetDefaultPorts(26379); + /// + /// Gets a tie breaker if we both have one set, and should be using one. + /// + internal bool TryGetTieBreaker(out RedisKey tieBreaker) + { + var key = TieBreaker; + if (!IsSentinel && !string.IsNullOrWhiteSpace(key)) + { + tieBreaker = key; + return true; + } + tieBreaker = default; + return false; } /// @@ -529,71 +910,38 @@ public string ToString(bool includePassword) Append(sb, OptionKeys.AllowAdmin, allowAdmin); Append(sb, OptionKeys.Version, defaultVersion); Append(sb, OptionKeys.ConnectTimeout, connectTimeout); - Append(sb, OptionKeys.User, User); - Append(sb, OptionKeys.Password, (includePassword || string.IsNullOrEmpty(Password)) ? Password : "*****"); + Append(sb, OptionKeys.User, user); + Append(sb, OptionKeys.Password, (includePassword || string.IsNullOrEmpty(password)) ? password : "*****"); Append(sb, OptionKeys.TieBreaker, tieBreaker); - Append(sb, OptionKeys.WriteBuffer, writeBuffer); Append(sb, OptionKeys.Ssl, ssl); Append(sb, OptionKeys.SslProtocols, SslProtocols?.ToString().Replace(',', '|')); Append(sb, OptionKeys.CheckCertificateRevocation, checkCertificateRevocation); Append(sb, OptionKeys.SslHost, sslHost); - Append(sb, OptionKeys.HighPrioritySocketThreads, highPrioritySocketThreads); Append(sb, OptionKeys.ConfigChannel, configChannel); Append(sb, OptionKeys.AbortOnConnectFail, abortOnConnectFail); Append(sb, OptionKeys.ResolveDns, resolveDns); - Append(sb, OptionKeys.ChannelPrefix, (string)ChannelPrefix); + Append(sb, OptionKeys.ChannelPrefix, (string?)ChannelPrefix); Append(sb, OptionKeys.ConnectRetry, connectRetry); Append(sb, OptionKeys.Proxy, proxy); Append(sb, OptionKeys.ConfigCheckSeconds, configCheckSeconds); Append(sb, OptionKeys.ResponseTimeout, responseTimeout); Append(sb, OptionKeys.DefaultDatabase, DefaultDatabase); + Append(sb, OptionKeys.SetClientLibrary, setClientLibrary); + Append(sb, OptionKeys.HighIntegrity, highIntegrity); + Append(sb, OptionKeys.Protocol, FormatProtocol(Protocol)); + if (Tunnel is { IsInbuilt: true } tunnel) + { + Append(sb, OptionKeys.Tunnel, tunnel.ToString()); + } commandMap?.AppendDeltas(sb); return sb.ToString(); - } - - internal bool HasDnsEndPoints() - { - foreach (var endpoint in EndPoints) if (endpoint is DnsEndPoint) return true; - return false; - } - internal async Task ResolveEndPointsAsync(ConnectionMultiplexer multiplexer, LogProxy log) - { - var cache = new Dictionary(StringComparer.OrdinalIgnoreCase); - for (int i = 0; i < EndPoints.Count; i++) - { - if (EndPoints[i] is DnsEndPoint dns) - { - try - { - if (dns.Host == ".") - { - EndPoints[i] = new IPEndPoint(IPAddress.Loopback, dns.Port); - } - else if (cache.TryGetValue(dns.Host, out IPAddress ip)) - { // use cache - EndPoints[i] = new IPEndPoint(ip, dns.Port); - } - else - { - log?.WriteLine($"Using DNS to resolve '{dns.Host}'..."); - var ips = await Dns.GetHostAddressesAsync(dns.Host).ObserveErrors().ForAwait(); - if (ips.Length == 1) - { - ip = ips[0]; - log?.WriteLine($"'{dns.Host}' => {ip}"); - cache[dns.Host] = ip; - EndPoints[i] = new IPEndPoint(ip, dns.Port); - } - } - } - catch (Exception ex) - { - multiplexer.OnInternalError(ex); - log?.WriteLine(ex.Message); - } - } - } + static string? FormatProtocol(RedisProtocol? protocol) => protocol switch { + null => null, + RedisProtocol.Resp2 => "resp2", + RedisProtocol.Resp3 => "resp3", + _ => protocol.GetValueOrDefault().ToString(), + }; } private static void Append(StringBuilder sb, object value) @@ -607,9 +955,9 @@ private static void Append(StringBuilder sb, object value) } } - private static void Append(StringBuilder sb, string prefix, object value) + private static void Append(StringBuilder sb, string prefix, object? value) { - string s = value?.ToString(); + string? s = value?.ToString(); if (!string.IsNullOrWhiteSpace(s)) { if (sb.Length != 0) sb.Append(','); @@ -623,9 +971,9 @@ private static void Append(StringBuilder sb, string prefix, object value) private void Clear() { - ClientName = ServiceName = User = Password = tieBreaker = sslHost = configChannel = null; - keepAlive = syncTimeout = asyncTimeout = connectTimeout = writeBuffer = connectRetry = configCheckSeconds = DefaultDatabase = null; - allowAdmin = abortOnConnectFail = highPrioritySocketThreads = resolveDns = ssl = null; + ClientName = ServiceName = user = password = tieBreaker = sslHost = configChannel = null; + keepAlive = syncTimeout = asyncTimeout = connectTimeout = connectRetry = configCheckSeconds = DefaultDatabase = null; + allowAdmin = abortOnConnectFail = resolveDns = ssl = setClientLibrary = highIntegrity = null; SslProtocols = null; defaultVersion = null; EndPoints.Clear(); @@ -633,13 +981,14 @@ private void Clear() CertificateSelection = null; CertificateValidation = null; - ChannelPrefix = default(RedisChannel); + ChannelPrefix = default; SocketManager = null; + Tunnel = null; } object ICloneable.Clone() => Clone(); - private void DoParse(string configuration, bool ignoreUnknown) + private ConfigurationOptions DoParse(string configuration, bool ignoreUnknown) { if (configuration == null) { @@ -648,14 +997,14 @@ private void DoParse(string configuration, bool ignoreUnknown) if (string.IsNullOrWhiteSpace(configuration)) { - throw new ArgumentException("is empty", configuration); + throw new ArgumentException("is empty", nameof(configuration)); } Clear(); // break it down by commas var arr = configuration.Split(StringSplits.Comma); - Dictionary map = null; + Dictionary? map = null; foreach (var paddedOption in arr) { var option = paddedOption.Trim(); @@ -696,7 +1045,7 @@ private void DoParse(string configuration, bool ignoreUnknown) ClientName = value; break; case OptionKeys.ChannelPrefix: - ChannelPrefix = value; + ChannelPrefix = RedisChannel.Literal(value); break; case OptionKeys.ConfigChannel: ConfigurationChannel = value; @@ -717,10 +1066,10 @@ private void DoParse(string configuration, bool ignoreUnknown) DefaultVersion = OptionKeys.ParseVersion(key, value); break; case OptionKeys.User: - User = value; + user = value; break; case OptionKeys.Password: - Password = value; + password = value; break; case OptionKeys.TieBreaker: TieBreaker = value; @@ -731,37 +1080,62 @@ private void DoParse(string configuration, bool ignoreUnknown) case OptionKeys.SslHost: SslHost = value; break; - case OptionKeys.HighPrioritySocketThreads: - HighPrioritySocketThreads = OptionKeys.ParseBoolean(key, value); - break; - case OptionKeys.WriteBuffer: -#pragma warning disable CS0618 // Type or member is obsolete - WriteBuffer = OptionKeys.ParseInt32(key, value); -#pragma warning restore CS0618 // Type or member is obsolete - break; case OptionKeys.Proxy: Proxy = OptionKeys.ParseProxy(key, value); break; - case OptionKeys.ResponseTimeout: -#pragma warning disable CS0618 // Type or member is obsolete - ResponseTimeout = OptionKeys.ParseInt32(key, value, minValue: 1); -#pragma warning restore CS0618 // Type or member is obsolete - break; case OptionKeys.DefaultDatabase: DefaultDatabase = OptionKeys.ParseInt32(key, value); break; - case OptionKeys.PreserveAsyncOrder: - break; case OptionKeys.SslProtocols: SslProtocols = OptionKeys.ParseSslProtocols(key, value); break; + case OptionKeys.SetClientLibrary: + SetClientLibrary = OptionKeys.ParseBoolean(key, value); + break; + case OptionKeys.HighIntegrity: + HighIntegrity = OptionKeys.ParseBoolean(key, value); + break; + case OptionKeys.Tunnel: + if (value.IsNullOrWhiteSpace()) + { + Tunnel = null; + } + else + { + // For backwards compatibility with `http:address_with_port`. + if (value.StartsWith("http:") && !value.StartsWith("http://")) + { + value = value.Insert(5, "//"); + } + + var uri = new Uri(value, UriKind.Absolute); + if (uri.Scheme != "http") + { + throw new ArgumentException("Tunnel cannot be parsed: " + value); + } + if (!Format.TryParseEndPoint($"{uri.Host}:{uri.Port}", out var ep)) + { + throw new ArgumentException("HTTP tunnel cannot be parsed: " + value); + } + Tunnel = Tunnel.HttpProxy(ep); + } + break; + case OptionKeys.Protocol: + Protocol = OptionKeys.ParseRedisProtocol(key, value); + break; + // Deprecated options we ignore... + case OptionKeys.HighPrioritySocketThreads: + case OptionKeys.PreserveAsyncOrder: + case OptionKeys.ResponseTimeout: + case OptionKeys.WriteBuffer: + break; default: if (!string.IsNullOrEmpty(key) && key[0] == '$') { var cmdName = option.Substring(1, idx - 1); if (Enum.TryParse(cmdName, true, out RedisCommand cmd)) { - if (map == null) map = new Dictionary(StringComparer.OrdinalIgnoreCase); + map ??= new Dictionary(StringComparer.OrdinalIgnoreCase); map[cmdName] = value; } } @@ -774,53 +1148,75 @@ private void DoParse(string configuration, bool ignoreUnknown) } else { - var ep = Format.TryParseEndPoint(option); - if (ep != null && !EndPoints.Contains(ep)) EndPoints.Add(ep); + if (Format.TryParseEndPoint(option, out var ep) && !EndPoints.Contains(ep)) + { + EndPoints.Add(ep); + } } } if (map != null && map.Count != 0) { CommandMap = CommandMap.Create(map); } + return this; } - // Microsoft Azure team wants abortConnect=false by default - private bool GetDefaultAbortOnConnectFailSetting() => !IsAzureEndpoint(); + /// + /// Allows custom transport implementations, such as http-tunneling via a proxy. + /// + public Tunnel? Tunnel { get; set; } + + /// + /// Specify the redis protocol type. + /// + public RedisProtocol? Protocol { get; set; } + + internal bool TryResp3() + { + // note: deliberately leaving the IsAvailable duplicated to use short-circuit + + // if (Protocol is null) + // { + // // if not specified, lean on the server version and whether HELLO is available + // return new RedisFeatures(DefaultVersion).Resp3 && CommandMap.IsAvailable(RedisCommand.HELLO); + // } + // else + // ^^^ left for context; originally our intention was to auto-enable RESP3 by default *if* the server version + // is >= 6; however, it turns out (see extensive conversation here https://github.com/StackExchange/StackExchange.Redis/pull/2396) + // that tangential undocumented API breaks were made at the same time; this means that even if we fix every + // edge case in the library itself, the break is still visible to external callers via Execute[Async]; with an + // abundance of caution, we are therefore making RESP3 explicit opt-in only for now; we may revisit this in a major + { + return Protocol.GetValueOrDefault() >= RedisProtocol.Resp3 && CommandMap.IsAvailable(RedisCommand.HELLO); + } + } - private bool IsAzureEndpoint() + internal static bool TryParseRedisProtocol(string? value, out RedisProtocol protocol) { - foreach (var ep in EndPoints) + // accept raw integers too, but only trust them if we recognize them + // (note we need to do this before enums, because Enum.TryParse will + // accept integers as the raw value, which is not what we want here) + if (value is not null) { - if (ep is DnsEndPoint dnsEp) + if (Format.TryParseInt32(value, out int i32)) { - int firstDot = dnsEp.Host.IndexOf('.'); - if (firstDot >= 0) + switch (i32) { - switch (dnsEp.Host.Substring(firstDot).ToLowerInvariant()) - { - case ".redis.cache.windows.net": - case ".redis.cache.chinacloudapi.cn": - case ".redis.cache.usgovcloudapi.net": - case ".redis.cache.cloudapi.de": - return true; - } + case 2: + protocol = RedisProtocol.Resp2; + return true; + case 3: + protocol = RedisProtocol.Resp3; + return true; } } + else + { + if (Enum.TryParse(value, true, out protocol)) return true; + } } - + protocol = default; return false; } - - private string InferSslHostFromEndpoints() - { - var dnsEndpoints = EndPoints.Select(endpoint => endpoint as DnsEndPoint); - string dnsHost = dnsEndpoints.FirstOrDefault()?.Host; - if (dnsEndpoints.All(dnsEndpoint => (dnsEndpoint != null && dnsEndpoint.Host == dnsHost))) - { - return dnsHost; - } - - return null; - } } } diff --git a/src/StackExchange.Redis/ConnectionCounters.cs b/src/StackExchange.Redis/ConnectionCounters.cs index b9a3da5be..546e2eff5 100644 --- a/src/StackExchange.Redis/ConnectionCounters.cs +++ b/src/StackExchange.Redis/ConnectionCounters.cs @@ -3,7 +3,7 @@ namespace StackExchange.Redis { /// - /// Illustrates the counters associated with an individual connection + /// Illustrates the counters associated with an individual connection. /// public class ConnectionCounters { @@ -13,78 +13,78 @@ internal ConnectionCounters(ConnectionType connectionType) } /// - /// The number of operations that have been completed asynchronously + /// The number of operations that have been completed asynchronously. /// public long CompletedAsynchronously { get; internal set; } /// - /// The number of operations that have been completed synchronously + /// The number of operations that have been completed synchronously. /// public long CompletedSynchronously { get; internal set; } /// - /// The type of this connection + /// The type of this connection. /// public ConnectionType ConnectionType { get; } /// - /// The number of operations that failed to complete asynchronously + /// The number of operations that failed to complete asynchronously. /// public long FailedAsynchronously { get; internal set; } /// - /// Indicates if there are any pending items or failures on this connection + /// Indicates if there are any pending items or failures on this connection. /// public bool IsEmpty => PendingUnsentItems == 0 && SentItemsAwaitingResponse == 0 && ResponsesAwaitingAsyncCompletion == 0 && FailedAsynchronously == 0; /// - /// Indicates the total number of messages despatched to a non-preferred endpoint, for example sent to a master - /// when the caller stated a preference of replica + /// Indicates the total number of messages dispatched to a non-preferred endpoint, for example sent + /// to a primary when the caller stated a preference of replica. /// public long NonPreferredEndpointCount { get; internal set; } /// - /// The number of operations performed on this connection + /// The number of operations performed on this connection. /// public long OperationCount { get; internal set; } /// - /// Operations that have been requested, but which have not yet been sent to the server + /// Operations that have been requested, but which have not yet been sent to the server. /// public int PendingUnsentItems { get; internal set; } /// - /// Operations for which the response has been processed, but which are awaiting asynchronous completion + /// Operations for which the response has been processed, but which are awaiting asynchronous completion. /// public int ResponsesAwaitingAsyncCompletion { get; internal set; } /// - /// Operations that have been sent to the server, but which are awaiting a response + /// Operations that have been sent to the server, but which are awaiting a response. /// public int SentItemsAwaitingResponse { get; internal set; } /// - /// The number of sockets used by this logical connection (total, including reconnects) + /// The number of sockets used by this logical connection (total, including reconnects). /// public long SocketCount { get; internal set; } /// - /// The number of subscriptions (with and without patterns) currently held against this connection + /// The number of subscriptions (with and without patterns) currently held against this connection. /// - public long Subscriptions { get;internal set; } + public long Subscriptions { get; internal set; } /// - /// Indicates the total number of outstanding items against this connection + /// Indicates the total number of outstanding items against this connection. /// public int TotalOutstanding => PendingUnsentItems + SentItemsAwaitingResponse + ResponsesAwaitingAsyncCompletion; /// - /// Indicates the total number of writers items against this connection + /// Indicates the total number of writers items against this connection. /// public int WriterCount { get; internal set; } /// - /// See Object.ToString() + /// See . /// public override string ToString() { @@ -109,20 +109,18 @@ internal void Add(ConnectionCounters other) WriterCount += other.WriterCount; } - internal bool Any() - { - return CompletedAsynchronously != 0 - || CompletedSynchronously != 0 - || FailedAsynchronously != 0 - || NonPreferredEndpointCount != 0 - || OperationCount != 0 - || PendingUnsentItems != 0 - || ResponsesAwaitingAsyncCompletion != 0 - || SentItemsAwaitingResponse != 0 - || SocketCount != 0 - || Subscriptions != 0 - || WriterCount != 0; - } + internal bool Any() => + CompletedAsynchronously != 0 + || CompletedSynchronously != 0 + || FailedAsynchronously != 0 + || NonPreferredEndpointCount != 0 + || OperationCount != 0 + || PendingUnsentItems != 0 + || ResponsesAwaitingAsyncCompletion != 0 + || SentItemsAwaitingResponse != 0 + || SocketCount != 0 + || Subscriptions != 0 + || WriterCount != 0; internal void Append(StringBuilder sb) { diff --git a/src/StackExchange.Redis/ConnectionFailedEventArgs.cs b/src/StackExchange.Redis/ConnectionFailedEventArgs.cs index e87a7eff4..5d165add1 100644 --- a/src/StackExchange.Redis/ConnectionFailedEventArgs.cs +++ b/src/StackExchange.Redis/ConnectionFailedEventArgs.cs @@ -5,13 +5,13 @@ namespace StackExchange.Redis { /// - /// Contains information about a server connection failure + /// Contains information about a server connection failure. /// public class ConnectionFailedEventArgs : EventArgs, ICompletable { - private readonly EventHandler handler; + private readonly EventHandler? handler; private readonly object sender; - internal ConnectionFailedEventArgs(EventHandler handler, object sender, EndPoint endPoint, ConnectionType connectionType, ConnectionFailureType failureType, Exception exception, string physicalName) + internal ConnectionFailedEventArgs(EventHandler? handler, object sender, EndPoint? endPoint, ConnectionType connectionType, ConnectionFailureType failureType, Exception? exception, string? physicalName) { this.handler = handler; this.sender = sender; @@ -29,47 +29,43 @@ internal ConnectionFailedEventArgs(EventHandler handl /// Redis endpoint. /// Redis connection type. /// Redis connection failure type. - /// The exception occured. + /// The exception that occurred. /// Connection physical name. public ConnectionFailedEventArgs(object sender, EndPoint endPoint, ConnectionType connectionType, ConnectionFailureType failureType, Exception exception, string physicalName) - : this (null, sender, endPoint, connectionType, failureType, exception, physicalName) + : this(null, sender, endPoint, connectionType, failureType, exception, physicalName) { } private readonly string _physicalName; /// - /// Gets the connection-type of the failing connection + /// Gets the connection-type of the failing connection. /// public ConnectionType ConnectionType { get; } /// - /// Gets the failing server-endpoint + /// Gets the failing server-endpoint. /// - public EndPoint EndPoint { get; } + public EndPoint? EndPoint { get; } /// - /// Gets the exception if available (this can be null) + /// Gets the exception if available (this can be null). /// - public Exception Exception { get; } + public Exception? Exception { get; } /// - /// The type of failure + /// The type of failure. /// public ConnectionFailureType FailureType { get; } - void ICompletable.AppendStormLog(StringBuilder sb) - { - sb.Append("event, connection-failed: "); - if (EndPoint == null) sb.Append("n/a"); - else sb.Append(Format.ToString(EndPoint)); - } + void ICompletable.AppendStormLog(StringBuilder sb) => + sb.Append("event, connection-failed: ").Append(EndPoint != null ? Format.ToString(EndPoint) : "n/a"); bool ICompletable.TryComplete(bool isAsync) => ConnectionMultiplexer.TryCompleteHandler(handler, sender, this, isAsync); /// /// Returns the physical name of the connection. /// - public override string ToString() => _physicalName ?? base.ToString(); + public override string ToString() => _physicalName; } } diff --git a/src/StackExchange.Redis/ConnectionMultiplexer.Compat.cs b/src/StackExchange.Redis/ConnectionMultiplexer.Compat.cs new file mode 100644 index 000000000..6786e87d2 --- /dev/null +++ b/src/StackExchange.Redis/ConnectionMultiplexer.Compat.cs @@ -0,0 +1,22 @@ +using System; +using System.ComponentModel; +using System.Threading.Tasks; + +namespace StackExchange.Redis; + +public partial class ConnectionMultiplexer +{ + /// + /// No longer used. + /// + [Obsolete("No longer used, will be removed in 3.0.")] + [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] + public static TaskFactory Factory { get => Task.Factory; set { } } + + /// + /// Gets or sets whether asynchronous operations should be invoked in a way that guarantees their original delivery order. + /// + [Obsolete("Not supported; if you require ordered pub/sub, please see " + nameof(ChannelMessageQueue) + ", will be removed in 3.0", false)] + [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] + public bool PreserveAsyncOrder { get => false; set { } } +} diff --git a/src/StackExchange.Redis/ConnectionMultiplexer.Debug.cs b/src/StackExchange.Redis/ConnectionMultiplexer.Debug.cs new file mode 100644 index 000000000..bd77ec2f7 --- /dev/null +++ b/src/StackExchange.Redis/ConnectionMultiplexer.Debug.cs @@ -0,0 +1,48 @@ +using System.Threading; + +namespace StackExchange.Redis; + +public partial class ConnectionMultiplexer +{ + private static int _collectedWithoutDispose, s_DisposedCount, s_MuxerCreateCount; + internal static int CollectedWithoutDispose => Volatile.Read(ref _collectedWithoutDispose); + + internal static int GetLiveObjectCount(out int created, out int disposed, out int finalized) + { + // read destroy first, to prevent negative numbers in race conditions + disposed = Volatile.Read(ref s_DisposedCount); + created = Volatile.Read(ref s_MuxerCreateCount); + finalized = Volatile.Read(ref _collectedWithoutDispose); + return created - (disposed + finalized); + } + + /// + /// Invoked by the garbage collector. + /// + ~ConnectionMultiplexer() + { + Interlocked.Increment(ref _collectedWithoutDispose); + } + + bool IInternalConnectionMultiplexer.AllowConnect + { + get => AllowConnect; + set => AllowConnect = value; + } + + bool IInternalConnectionMultiplexer.IgnoreConnect + { + get => IgnoreConnect; + set => IgnoreConnect = value; + } + + /// + /// For debugging: when not enabled, servers cannot connect. + /// + internal volatile bool AllowConnect = true; + + /// + /// For debugging: when not enabled, end-connect is silently ignored (to simulate a long-running connect). + /// + internal volatile bool IgnoreConnect; +} diff --git a/src/StackExchange.Redis/ConnectionMultiplexer.Events.cs b/src/StackExchange.Redis/ConnectionMultiplexer.Events.cs new file mode 100644 index 000000000..0a8b95be5 --- /dev/null +++ b/src/StackExchange.Redis/ConnectionMultiplexer.Events.cs @@ -0,0 +1,120 @@ +using System; +using System.Net; +using System.Runtime.CompilerServices; +using StackExchange.Redis.Maintenance; + +namespace StackExchange.Redis; + +public partial class ConnectionMultiplexer +{ + /// + /// Raised whenever a physical connection fails. + /// + public event EventHandler? ConnectionFailed; + internal void OnConnectionFailed(EndPoint endpoint, ConnectionType connectionType, ConnectionFailureType failureType, Exception exception, bool reconfigure, string? physicalName) + { + if (_isDisposed) return; + var handler = ConnectionFailed; + if (handler != null) + { + CompleteAsWorker(new ConnectionFailedEventArgs(handler, this, endpoint, connectionType, failureType, exception, physicalName)); + } + if (reconfigure) + { + ReconfigureIfNeeded(endpoint, false, "connection failed"); + } + } + + /// + /// Raised whenever an internal error occurs (this is primarily for debugging). + /// + public event EventHandler? InternalError; + internal void OnInternalError(Exception exception, EndPoint? endpoint = null, ConnectionType connectionType = ConnectionType.None, [CallerMemberName] string? origin = null) + { + try + { + if (_isDisposed) return; + Trace("Internal error: " + origin + ", " + exception == null ? "unknown" : exception.Message); + var handler = InternalError; + if (handler != null) + { + CompleteAsWorker(new InternalErrorEventArgs(handler, this, endpoint, connectionType, exception, origin)); + } + } + catch + { + // Our internal error event failed...whatcha gonna do, exactly? + } + } + + /// + /// Raised whenever a physical connection is established. + /// + public event EventHandler? ConnectionRestored; + internal void OnConnectionRestored(EndPoint endpoint, ConnectionType connectionType, string? physicalName) + { + if (_isDisposed) return; + var handler = ConnectionRestored; + if (handler != null) + { + CompleteAsWorker(new ConnectionFailedEventArgs(handler, this, endpoint, connectionType, ConnectionFailureType.None, null, physicalName)); + } + ReconfigureIfNeeded(endpoint, false, "connection restored"); + } + + /// + /// Raised when configuration changes are detected. + /// + public event EventHandler? ConfigurationChanged; + internal void OnConfigurationChanged(EndPoint endpoint) => OnEndpointChanged(endpoint, ConfigurationChanged); + + /// + /// Raised when nodes are explicitly requested to reconfigure via broadcast. + /// This usually means primary/replica changes. + /// + public event EventHandler? ConfigurationChangedBroadcast; + internal void OnConfigurationChangedBroadcast(EndPoint endpoint) => OnEndpointChanged(endpoint, ConfigurationChangedBroadcast); + + private void OnEndpointChanged(EndPoint endpoint, EventHandler? handler) + { + if (_isDisposed) return; + if (handler != null) + { + CompleteAsWorker(new EndPointEventArgs(handler, this, endpoint)); + } + } + + /// + /// Raised when server indicates a maintenance event is going to happen. + /// + public event EventHandler? ServerMaintenanceEvent; + internal void OnServerMaintenanceEvent(ServerMaintenanceEvent e) => + ServerMaintenanceEvent?.Invoke(this, e); + + /// + /// Raised when a hash-slot has been relocated. + /// + public event EventHandler? HashSlotMoved; + internal void OnHashSlotMoved(int hashSlot, EndPoint? old, EndPoint @new) + { + var handler = HashSlotMoved; + if (handler != null) + { + CompleteAsWorker(new HashSlotMovedEventArgs(handler, this, hashSlot, old, @new)); + } + } + + /// + /// Raised when a server replied with an error message. + /// + public event EventHandler? ErrorMessage; + internal void OnErrorMessage(EndPoint endpoint, string message) + { + if (_isDisposed) return; + var handler = ErrorMessage; + if (handler != null) + { + CompleteAsWorker(new RedisErrorEventArgs(handler, this, endpoint, message)); + } + } +} diff --git a/src/StackExchange.Redis/ConnectionMultiplexer.ExportConfiguration.cs b/src/StackExchange.Redis/ConnectionMultiplexer.ExportConfiguration.cs new file mode 100644 index 000000000..c095ffd53 --- /dev/null +++ b/src/StackExchange.Redis/ConnectionMultiplexer.ExportConfiguration.cs @@ -0,0 +1,142 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.IO.Compression; +using System.Linq; +using System.Threading.Tasks; + +namespace StackExchange.Redis; + +public partial class ConnectionMultiplexer +{ + private const string NoContent = "(no content)"; + + /// + /// Write the configuration of all servers to an output stream. + /// + /// The destination stream to write the export to. + /// The options to use for this export. + public void ExportConfiguration(Stream destination, ExportOptions options = ExportOptions.All) + { + if (destination == null) throw new ArgumentNullException(nameof(destination)); + + // What is possible, given the command map? + ExportOptions mask = 0; + if (CommandMap.IsAvailable(RedisCommand.INFO)) mask |= ExportOptions.Info; + if (CommandMap.IsAvailable(RedisCommand.CONFIG)) mask |= ExportOptions.Config; + if (CommandMap.IsAvailable(RedisCommand.CLIENT)) mask |= ExportOptions.Client; + if (CommandMap.IsAvailable(RedisCommand.CLUSTER)) mask |= ExportOptions.Cluster; + options &= mask; + + using (var zip = new ZipArchive(destination, ZipArchiveMode.Create, true)) + { + var arr = GetServerSnapshot(); + foreach (var server in arr) + { + const CommandFlags flags = CommandFlags.None; + if (!server.IsConnected) continue; + var api = GetServer(server.EndPoint); + + List tasks = new List(); + if ((options & ExportOptions.Info) != 0) + { + tasks.Add(api.InfoRawAsync(flags: flags)); + } + if ((options & ExportOptions.Config) != 0) + { + tasks.Add(api.ConfigGetAsync(flags: flags)); + } + if ((options & ExportOptions.Client) != 0) + { + tasks.Add(api.ClientListAsync(flags: flags)); + } + if ((options & ExportOptions.Cluster) != 0) + { + tasks.Add(api.ClusterNodesRawAsync(flags: flags)); + } + + WaitAllIgnoreErrors(tasks.ToArray()); + + int index = 0; + var prefix = Format.ToString(server.EndPoint); + if ((options & ExportOptions.Info) != 0) + { + Write(zip, prefix + "/info.txt", tasks[index++], WriteNormalizingLineEndings); + } + if ((options & ExportOptions.Config) != 0) + { + Write[]>(zip, prefix + "/config.txt", tasks[index++], (settings, writer) => + { + foreach (var setting in settings) + { + writer.WriteLine("{0}={1}", setting.Key, setting.Value); + } + }); + } + if ((options & ExportOptions.Client) != 0) + { + Write(zip, prefix + "/clients.txt", tasks[index++], (clients, writer) => + { + if (clients == null) + { + writer.WriteLine(NoContent); + } + else + { + foreach (var client in clients) + { + writer.WriteLine(client.Raw); + } + } + }); + } + if ((options & ExportOptions.Cluster) != 0) + { + Write(zip, prefix + "/nodes.txt", tasks[index++], WriteNormalizingLineEndings); + } + } + } + } + + [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Usage", "CA2202:Do not dispose objects multiple times", Justification = "We're not double disposing.")] + private static void Write(ZipArchive zip, string name, Task task, Action callback) + { + var entry = zip.CreateEntry(name, CompressionLevel.Optimal); + using (var stream = entry.Open()) + using (var writer = new StreamWriter(stream)) + { + TaskStatus status = task.Status; + switch (status) + { + case TaskStatus.RanToCompletion: + T val = ((Task)task).Result; + callback(val, writer); + break; + case TaskStatus.Faulted: + writer.WriteLine(string.Join(", ", task.Exception!.InnerExceptions.Select(x => x.Message))); + break; + default: + writer.WriteLine(status.ToString()); + break; + } + } + } + + private static void WriteNormalizingLineEndings(string source, StreamWriter writer) + { + if (source == null) + { + writer.WriteLine(NoContent); + } + else + { + using (var reader = new StringReader(source)) + { + while (reader.ReadLine() is string line) + { + writer.WriteLine(line); // normalize line endings + } + } + } + } +} diff --git a/src/StackExchange.Redis/ConnectionMultiplexer.FeatureFlags.cs b/src/StackExchange.Redis/ConnectionMultiplexer.FeatureFlags.cs new file mode 100644 index 000000000..975da5de1 --- /dev/null +++ b/src/StackExchange.Redis/ConnectionMultiplexer.FeatureFlags.cs @@ -0,0 +1,57 @@ +using System; +using System.ComponentModel; +using System.Threading; + +namespace StackExchange.Redis; + +public partial class ConnectionMultiplexer +{ + private static FeatureFlags s_featureFlags; + + [Flags] + private enum FeatureFlags + { + None, + PreventThreadTheft = 1, + } + + private static void SetAutodetectFeatureFlags() + { + bool value = false; + try + { + // attempt to detect a known problem scenario + value = SynchronizationContext.Current?.GetType()?.Name + == "LegacyAspNetSynchronizationContext"; + } + catch { } + SetFeatureFlag(nameof(FeatureFlags.PreventThreadTheft), value); + } + + /// + /// Enables or disables a feature flag. + /// This should only be used under support guidance, and should not be rapidly toggled. + /// + [EditorBrowsable(EditorBrowsableState.Never)] + [Browsable(false)] + public static void SetFeatureFlag(string flag, bool enabled) + { + if (Enum.TryParse(flag, true, out var flags)) + { + if (enabled) s_featureFlags |= flags; + else s_featureFlags &= ~flags; + } + } + + /// + /// Returns the state of a feature flag. + /// This should only be used under support guidance. + /// + [EditorBrowsable(EditorBrowsableState.Never)] + [Browsable(false)] + public static bool GetFeatureFlag(string flag) + => Enum.TryParse(flag, true, out var flags) + && (s_featureFlags & flags) == flags; + + internal static bool PreventThreadTheft => (s_featureFlags & FeatureFlags.PreventThreadTheft) != 0; +} diff --git a/src/StackExchange.Redis/ConnectionMultiplexer.LibraryName.cs b/src/StackExchange.Redis/ConnectionMultiplexer.LibraryName.cs new file mode 100644 index 000000000..2c79f80c5 --- /dev/null +++ b/src/StackExchange.Redis/ConnectionMultiplexer.LibraryName.cs @@ -0,0 +1,76 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Linq; +using System.Threading; + +namespace StackExchange.Redis; + +public partial class ConnectionMultiplexer +{ + private readonly HashSet _libraryNameSuffixHash = new(); + private string _libraryNameSuffixCombined = ""; + + /// + public void AddLibraryNameSuffix(string suffix) + { + if (string.IsNullOrWhiteSpace(suffix)) return; // trivial + + // sanitize and re-check + suffix = ServerEndPoint.ClientInfoSanitize(suffix ?? "").Trim(); + if (string.IsNullOrWhiteSpace(suffix)) return; // trivial + + lock (_libraryNameSuffixHash) + { + if (!_libraryNameSuffixHash.Add(suffix)) return; // already cited; nothing to do + + _libraryNameSuffixCombined = "-" + string.Join("-", _libraryNameSuffixHash.OrderBy(_ => _)); + } + + // if we get here, we *actually changed something*; we can retroactively fixup the connections + var libName = GetFullLibraryName(); // note this also checks SetClientLibrary + if (string.IsNullOrWhiteSpace(libName) || !CommandMap.IsAvailable(RedisCommand.CLIENT)) return; // disabled on no lib name + + // note that during initial handshake we use raw Message; this is low frequency - no + // concern over overhead of Execute here + var args = new object[] { RedisLiterals.SETINFO, RedisLiterals.lib_name, libName }; + foreach (var server in GetServers()) + { + try + { + // note we can only fixup the *interactive* channel; that's tolerable here + if (server.IsConnected) + { + // best effort only + server.Execute("CLIENT", args, CommandFlags.FireAndForget); + } + } + catch (Exception ex) + { + // if an individual server trips, that's fine - best effort; note we're using + // F+F here anyway, so we don't *expect* any failures + Debug.WriteLine(ex.Message); + } + } + } + + internal string GetFullLibraryName() + { + var config = RawConfig; + if (!config.SetClientLibrary) return ""; // disabled + + var libName = config.LibraryName; + if (string.IsNullOrWhiteSpace(libName)) + { + // defer to provider if missing (note re null vs blank; if caller wants to disable + // it, they should set SetClientLibrary to false, not set the name to empty string) + libName = config.Defaults.LibraryName; + } + + libName = ServerEndPoint.ClientInfoSanitize(libName); + // if no primary name, return nothing, even if suffixes exist + if (string.IsNullOrWhiteSpace(libName)) return ""; + + return libName + Volatile.Read(ref _libraryNameSuffixCombined); + } +} diff --git a/src/StackExchange.Redis/ConnectionMultiplexer.Profiling.cs b/src/StackExchange.Redis/ConnectionMultiplexer.Profiling.cs index 29a1d0246..c60966234 100644 --- a/src/StackExchange.Redis/ConnectionMultiplexer.Profiling.cs +++ b/src/StackExchange.Redis/ConnectionMultiplexer.Profiling.cs @@ -1,18 +1,17 @@ using System; using StackExchange.Redis.Profiling; -namespace StackExchange.Redis +namespace StackExchange.Redis; + +public partial class ConnectionMultiplexer { - public partial class ConnectionMultiplexer - { - private Func _profilingSessionProvider; + private Func? _profilingSessionProvider; - /// - /// Register a callback to provide an on-demand ambient session provider based on the - /// calling context; the implementing code is responsible for reliably resolving the same provider - /// based on ambient context, or returning null to not profile - /// - /// The session provider to register. - public void RegisterProfiler(Func profilingSessionProvider) => _profilingSessionProvider = profilingSessionProvider; - } + /// + /// Register a callback to provide an on-demand ambient session provider based on the + /// calling context; the implementing code is responsible for reliably resolving the same provider + /// based on ambient context, or returning null to not profile. + /// + /// The session provider to register. + public void RegisterProfiler(Func profilingSessionProvider) => _profilingSessionProvider = profilingSessionProvider; } diff --git a/src/StackExchange.Redis/ConnectionMultiplexer.ReaderWriter.cs b/src/StackExchange.Redis/ConnectionMultiplexer.ReaderWriter.cs index f0e9fa6b1..a30da7865 100644 --- a/src/StackExchange.Redis/ConnectionMultiplexer.ReaderWriter.cs +++ b/src/StackExchange.Redis/ConnectionMultiplexer.ReaderWriter.cs @@ -1,18 +1,19 @@ -namespace StackExchange.Redis +using System.Diagnostics.CodeAnalysis; + +namespace StackExchange.Redis; + +public partial class ConnectionMultiplexer { - public partial class ConnectionMultiplexer - { - internal SocketManager SocketManager { get; private set; } + internal SocketManager? SocketManager { get; private set; } - partial void OnCreateReaderWriter(ConfigurationOptions configuration) - { - SocketManager = configuration.SocketManager ?? SocketManager.Shared; - } + [MemberNotNull(nameof(SocketManager))] + private void OnCreateReaderWriter(ConfigurationOptions configuration) + { + SocketManager = configuration.SocketManager ?? SocketManager.Shared; + } - partial void OnCloseReaderWriter() - { - SocketManager = null; - } - partial void OnWriterCreated(); + private void OnCloseReaderWriter() + { + SocketManager = null; } } diff --git a/src/StackExchange.Redis/ConnectionMultiplexer.Sentinel.cs b/src/StackExchange.Redis/ConnectionMultiplexer.Sentinel.cs new file mode 100644 index 000000000..61b36b014 --- /dev/null +++ b/src/StackExchange.Redis/ConnectionMultiplexer.Sentinel.cs @@ -0,0 +1,467 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Net; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.Logging; +using Pipelines.Sockets.Unofficial; + +namespace StackExchange.Redis; + +public partial class ConnectionMultiplexer +{ + internal EndPoint? currentSentinelPrimaryEndPoint; + internal Timer? sentinelPrimaryReconnectTimer; + internal readonly Dictionary sentinelConnectionChildren = new(); + internal ConnectionMultiplexer? sentinelConnection; + + /// + /// Initializes the connection as a Sentinel connection and adds the necessary event handlers to track changes to the managed primaries. + /// + /// The to log to, if any. + internal void InitializeSentinel(ILogger? log) + { + if (ServerSelectionStrategy.ServerType != ServerType.Sentinel) + { + return; + } + + // Subscribe to sentinel change events + ISubscriber sub = GetSubscriber(); + + if (sub.SubscribedEndpoint(RedisChannel.Literal("+switch-master")) == null) + { + sub.Subscribe( + RedisChannel.Literal("+switch-master"), + (__, message) => + { + string[] messageParts = ((string)message!).Split(new[] { ' ' }, StringSplitOptions.RemoveEmptyEntries); + // We don't care about the result of this - we're just trying + _ = Format.TryParseEndPoint(string.Format("{0}:{1}", messageParts[1], messageParts[2]), out var switchBlame); + + lock (sentinelConnectionChildren) + { + // Switch the primary if we have connections for that service + if (sentinelConnectionChildren.TryGetValue(messageParts[0], out ConnectionMultiplexer? child)) + { + // Is the connection still valid? + if (child.IsDisposed) + { + child.ConnectionFailed -= OnManagedConnectionFailed; + child.ConnectionRestored -= OnManagedConnectionRestored; + sentinelConnectionChildren.Remove(messageParts[0]); + } + else + { + SwitchPrimary(switchBlame, child); + } + } + } + }, + CommandFlags.FireAndForget); + } + + // If we lose connection to a sentinel server, + // we need to reconfigure to make sure we still have a subscription to the +switch-master channel + ConnectionFailed += (sender, e) => + // Reconfigure to get subscriptions back online + ReconfigureAsync(first: false, reconfigureAll: true, log, e.EndPoint, "Lost sentinel connection", false).Wait(); + + // Subscribe to new sentinels being added + if (sub.SubscribedEndpoint(RedisChannel.Literal("+sentinel")) == null) + { + sub.Subscribe( + RedisChannel.Literal("+sentinel"), + (_, message) => + { + string[] messageParts = ((string)message!).Split(new[] { ' ' }, StringSplitOptions.RemoveEmptyEntries); + UpdateSentinelAddressList(messageParts[0]); + }, + CommandFlags.FireAndForget); + } + } + + /// + /// Create a new instance that connects to a Sentinel server. + /// + /// The string configuration to use for this multiplexer. + /// The to log to. + public static ConnectionMultiplexer SentinelConnect(string configuration, TextWriter? log = null) => + SentinelConnect(ConfigurationOptions.Parse(configuration), log); + + /// + /// Create a new instance that connects to a Sentinel server. + /// + /// The string configuration to use for this multiplexer. + /// The to log to. + public static Task SentinelConnectAsync(string configuration, TextWriter? log = null) => + SentinelConnectAsync(ConfigurationOptions.Parse(configuration), log); + + /// + /// Create a new instance that connects to a Sentinel server. + /// + /// The configuration options to use for this multiplexer. + /// The to log to. + public static ConnectionMultiplexer SentinelConnect(ConfigurationOptions configuration, TextWriter? log = null) + { + SocketConnection.AssertDependencies(); + Validate(configuration); + + return ConnectImpl(configuration, log, ServerType.Sentinel); + } + + /// + /// Create a new instance that connects to a Sentinel server. + /// + /// The configuration options to use for this multiplexer. + /// The to log to. + public static Task SentinelConnectAsync(ConfigurationOptions configuration, TextWriter? log = null) + { + SocketConnection.AssertDependencies(); + Validate(configuration); + + return ConnectImplAsync(configuration, log, ServerType.Sentinel); + } + + /// + /// Create a new instance that connects to a sentinel server, discovers the current primary server + /// for the specified in the config and returns a managed connection to the current primary server. + /// + /// The configuration options to use for this multiplexer. + /// The to log to. + private static ConnectionMultiplexer SentinelPrimaryConnect(ConfigurationOptions configuration, TextWriter? log = null) + { + var sentinelConnection = SentinelConnect(configuration, log); + + var muxer = sentinelConnection.GetSentinelMasterConnection(configuration, log); + // Set reference to sentinel connection so that we can dispose it + muxer.sentinelConnection = sentinelConnection; + + return muxer; + } + + /// + /// Create a new instance that connects to a sentinel server, discovers the current primary server + /// for the specified in the config and returns a managed connection to the current primary server. + /// + /// The configuration options to use for this multiplexer. + /// The to log to. + private static async Task SentinelPrimaryConnectAsync(ConfigurationOptions configuration, TextWriter? writer = null) + { + var sentinelConnection = await SentinelConnectAsync(configuration, writer).ForAwait(); + + var muxer = sentinelConnection.GetSentinelMasterConnection(configuration, writer); + // Set reference to sentinel connection so that we can dispose it + muxer.sentinelConnection = sentinelConnection; + + return muxer; + } + + /// + /// Returns a managed connection to the primary server indicated by the in the config. + /// + /// The configuration to be used when connecting to the primary. + /// The writer to log to, if any. + public ConnectionMultiplexer GetSentinelMasterConnection(ConfigurationOptions config, TextWriter? log = null) + { + if (ServerSelectionStrategy.ServerType != ServerType.Sentinel) + { + throw new RedisConnectionException( + ConnectionFailureType.UnableToConnect, + "Sentinel: The ConnectionMultiplexer is not a Sentinel connection. Detected as: " + ServerSelectionStrategy.ServerType); + } + + var serviceName = config.ServiceName; + if (serviceName.IsNullOrEmpty()) + { + throw new ArgumentException("A ServiceName must be specified."); + } + + lock (sentinelConnectionChildren) + { + if (sentinelConnectionChildren.TryGetValue(serviceName, out var sentinelConnectionChild) && !sentinelConnectionChild.IsDisposed) + return sentinelConnectionChild; + } + + bool success = false; + ConnectionMultiplexer? connection = null; + EndPointCollection? endpoints = null; + + var sw = ValueStopwatch.StartNew(); + do + { + // Sentinel has some fun race behavior internally - give things a few shots for a quicker overall connect. + const int queryAttempts = 2; + + EndPoint? newPrimaryEndPoint = null; + for (int i = 0; i < queryAttempts && newPrimaryEndPoint is null; i++) + { + newPrimaryEndPoint = GetConfiguredPrimaryForService(serviceName); + } + + if (newPrimaryEndPoint is null) + { + throw new RedisConnectionException( + ConnectionFailureType.UnableToConnect, + $"Sentinel: Failed connecting to configured primary for service: {config.ServiceName}"); + } + + EndPoint[]? replicaEndPoints = null; + for (int i = 0; i < queryAttempts && replicaEndPoints is null; i++) + { + replicaEndPoints = GetReplicasForService(serviceName); + } + + endpoints = config.EndPoints.Clone(); + + // Replace the primary endpoint, if we found another one + // If not, assume the last state is the best we have and minimize the race + if (endpoints.Count == 1) + { + endpoints[0] = newPrimaryEndPoint; + } + else + { + endpoints.Clear(); + endpoints.TryAdd(newPrimaryEndPoint); + } + + if (replicaEndPoints is not null) + { + foreach (var replicaEndPoint in replicaEndPoints) + { + endpoints.TryAdd(replicaEndPoint); + } + } + + connection = ConnectImpl(config, log, endpoints: endpoints); + + // verify role is primary according to: + // https://redis.io/topics/sentinel-clients + if (connection.GetServer(newPrimaryEndPoint)?.Role()?.Value == RedisLiterals.master) + { + success = true; + break; + } + + Thread.Sleep(100); + } + while (sw.ElapsedMilliseconds < config.ConnectTimeout); + + if (!success) + { + throw new RedisConnectionException( + ConnectionFailureType.UnableToConnect, + $"Sentinel: Failed connecting to configured primary for service: {config.ServiceName}"); + } + + // Attach to reconnect event to ensure proper connection to the new primary + connection.ConnectionRestored += OnManagedConnectionRestored; + + // If we lost the connection, run a switch to a least try and get updated info about the primary + connection.ConnectionFailed += OnManagedConnectionFailed; + + lock (sentinelConnectionChildren) + { + sentinelConnectionChildren[serviceName] = connection; + } + + // Perform the initial switchover + SwitchPrimary(endpoints[0], connection, log); + + return connection; + } + + [System.Diagnostics.CodeAnalysis.SuppressMessage("Roslynator", "RCS1075:Avoid empty catch clause that catches System.Exception.", Justification = "We don't care.")] + internal void OnManagedConnectionRestored(object? sender, ConnectionFailedEventArgs e) + { + if (sender is not ConnectionMultiplexer connection) + { + return; // This should never happen - called from non-nullable ConnectionFailedEventArgs + } + + var oldTimer = Interlocked.Exchange(ref connection.sentinelPrimaryReconnectTimer, null); + oldTimer?.Dispose(); + + try + { + // Run a switch to make sure we have update-to-date + // information about which primary we should connect to + SwitchPrimary(e.EndPoint, connection); + + try + { + // Verify that the reconnected endpoint is a primary, + // and the correct one otherwise we should reconnect + if (connection.GetServer(e.EndPoint).IsReplica || e.EndPoint != connection.currentSentinelPrimaryEndPoint) + { + // This isn't a primary, so try connecting again + SwitchPrimary(e.EndPoint, connection); + } + } + catch (Exception) + { + // If we get here it means that we tried to reconnect to a server that is no longer + // considered a primary by Sentinel and was removed from the list of endpoints. + + // If we caught an exception, we may have gotten a stale endpoint + // we are not aware of, so retry + SwitchPrimary(e.EndPoint, connection); + } + } + catch (Exception) + { + // Log, but don't throw in an event handler + // TODO: Log via new event handler? a la ConnectionFailed? + } + } + + [System.Diagnostics.CodeAnalysis.SuppressMessage("Roslynator", "RCS1075:Avoid empty catch clause that catches System.Exception.", Justification = "We don't care.")] + internal void OnManagedConnectionFailed(object? sender, ConnectionFailedEventArgs e) + { + if (sender is not ConnectionMultiplexer connection) + { + return; // This should never happen - called from non-nullable ConnectionFailedEventArgs + } + + // Periodically check to see if we can reconnect to the proper primary. + // This is here in case we lost our subscription to a good sentinel instance + // or if we miss the published primary change. + if (connection.sentinelPrimaryReconnectTimer == null) + { + connection.sentinelPrimaryReconnectTimer = new Timer( + _ => + { + try + { + // Attempt, but do not fail here + SwitchPrimary(e.EndPoint, connection); + } + catch (Exception) + { + } + finally + { + try + { + connection.sentinelPrimaryReconnectTimer?.Change(TimeSpan.FromSeconds(1), Timeout.InfiniteTimeSpan); + } + catch (ObjectDisposedException) + { + // If we get here the managed connection was restored and the timer was + // disposed by another thread, so there's no need to run the timer again. + } + } + }, + null, + TimeSpan.Zero, + Timeout.InfiniteTimeSpan); + } + } + + internal EndPoint? GetConfiguredPrimaryForService(string serviceName) => + _serverSnapshot // same as GetServerSnapshot, but without forcing span + .Where(static s => s.ServerType == ServerType.Sentinel) + .AsParallel() + .Select(s => + { + try { return GetServer(s.EndPoint).SentinelGetMasterAddressByName(serviceName); } + catch { return null; } + }) + .FirstOrDefault(r => r != null); + + internal EndPoint[]? GetReplicasForService(string serviceName) => + _serverSnapshot // same as GetServerSnapshot, but without forcing span + .Where(static s => s.ServerType == ServerType.Sentinel) + .AsParallel() + .Select(s => + { + try { return GetServer(s.EndPoint).SentinelGetReplicaAddresses(serviceName); } + catch { return null; } + }) + .FirstOrDefault(r => r != null); + + /// + /// Switches the SentinelMasterConnection over to a new primary. + /// + /// The endpoint responsible for the switch. + /// The connection that should be switched over to a new primary endpoint. + /// The writer to log to, if any. + internal void SwitchPrimary(EndPoint? switchBlame, ConnectionMultiplexer connection, TextWriter? writer = null) + { + var logger = Logger.With(writer); + if (connection.RawConfig.ServiceName is not string serviceName) + { + logger?.LogInformationServiceNameNotDefined(); + return; + } + + // Get new primary - try twice + EndPoint newPrimaryEndPoint = GetConfiguredPrimaryForService(serviceName) + ?? GetConfiguredPrimaryForService(serviceName) + ?? throw new RedisConnectionException(ConnectionFailureType.UnableToConnect, $"Sentinel: Failed connecting to switch primary for service: {serviceName}"); + + connection.currentSentinelPrimaryEndPoint = newPrimaryEndPoint; + + if (!connection.servers.Contains(newPrimaryEndPoint)) + { + EndPoint[]? replicaEndPoints = GetReplicasForService(serviceName) + ?? GetReplicasForService(serviceName); + + connection.servers.Clear(); + connection.EndPoints.Clear(); + connection.EndPoints.TryAdd(newPrimaryEndPoint); + if (replicaEndPoints is not null) + { + foreach (var replicaEndPoint in replicaEndPoints) + { + connection.EndPoints.TryAdd(replicaEndPoint); + } + } + Trace($"Switching primary to {newPrimaryEndPoint}"); + // Trigger a reconfigure + connection.ReconfigureAsync( + first: false, + reconfigureAll: false, + log: logger, + blame: switchBlame, + cause: $"Primary switch {serviceName}", + publishReconfigure: false, + publishReconfigureFlags: CommandFlags.PreferMaster).Wait(); + + UpdateSentinelAddressList(serviceName); + } + } + + internal void UpdateSentinelAddressList(string serviceName) + { + var firstCompleteRequest = _serverSnapshot // same as GetServerSnapshot, but without forcing span + .Where(static s => s.ServerType == ServerType.Sentinel) + .AsParallel() + .Select(s => + { + try { return GetServer(s.EndPoint).SentinelGetSentinelAddresses(serviceName); } + catch { return null; } + }) + .FirstOrDefault(r => r != null); + + // Ignore errors, as having an updated sentinel list is not essential + if (firstCompleteRequest == null) + return; + + bool hasNew = false; + foreach (EndPoint newSentinel in firstCompleteRequest.Where(x => !EndPoints.Contains(x))) + { + hasNew = true; + EndPoints.TryAdd(newSentinel); + } + + if (hasNew) + { + // Reconfigure the sentinel multiplexer if we added new endpoints + ReconfigureAsync(first: false, reconfigureAll: true, Logger, EndPoints[0], "Updating Sentinel List", false).Wait(); + } + } +} diff --git a/src/StackExchange.Redis/ConnectionMultiplexer.StormLog.cs b/src/StackExchange.Redis/ConnectionMultiplexer.StormLog.cs new file mode 100644 index 000000000..51c62d00e --- /dev/null +++ b/src/StackExchange.Redis/ConnectionMultiplexer.StormLog.cs @@ -0,0 +1,29 @@ +using System.Threading; + +namespace StackExchange.Redis; + +public partial class ConnectionMultiplexer +{ + internal int haveStormLog = 0; + internal string? stormLogSnapshot; + + /// + /// Limit at which to start recording unusual busy patterns (only one log will be retained at a time). + /// Set to a negative value to disable this feature. + /// + public int StormLogThreshold { get; set; } = 15; + + /// + /// Obtains the log of unusual busy patterns. + /// + public string? GetStormLog() => Volatile.Read(ref stormLogSnapshot); + + /// + /// Resets the log of unusual busy patterns. + /// + public void ResetStormLog() + { + Interlocked.Exchange(ref stormLogSnapshot, null); + Interlocked.Exchange(ref haveStormLog, 0); + } +} diff --git a/src/StackExchange.Redis/ConnectionMultiplexer.Threading.cs b/src/StackExchange.Redis/ConnectionMultiplexer.Threading.cs new file mode 100644 index 000000000..a4ad1a025 --- /dev/null +++ b/src/StackExchange.Redis/ConnectionMultiplexer.Threading.cs @@ -0,0 +1,49 @@ +using System; +using System.Threading; +using Pipelines.Sockets.Unofficial; + +namespace StackExchange.Redis; + +public partial class ConnectionMultiplexer +{ + private static readonly WaitCallback s_CompleteAsWorker = s => ((ICompletable)s!).TryComplete(true); + internal static void CompleteAsWorker(ICompletable completable) + { + if (completable is not null) + { + ThreadPool.QueueUserWorkItem(s_CompleteAsWorker, completable); + } + } + + internal static bool TryCompleteHandler(EventHandler? handler, object sender, T args, bool isAsync) where T : EventArgs, ICompletable + { + if (handler is null) return true; + if (isAsync) + { + if (handler.IsSingle()) + { + try + { + handler(sender, args); + } + catch { } + } + else + { + foreach (EventHandler sub in handler.AsEnumerable()) + { + try + { + sub(sender, args); + } + catch { } + } + } + return true; + } + else + { + return false; + } + } +} diff --git a/src/StackExchange.Redis/ConnectionMultiplexer.Verbose.cs b/src/StackExchange.Redis/ConnectionMultiplexer.Verbose.cs new file mode 100644 index 000000000..e4746963b --- /dev/null +++ b/src/StackExchange.Redis/ConnectionMultiplexer.Verbose.cs @@ -0,0 +1,58 @@ +using System; +using System.Diagnostics; +using System.Net; +using System.Runtime.CompilerServices; + +namespace StackExchange.Redis; + +public partial class ConnectionMultiplexer +{ + internal event Action? MessageFaulted; + internal event Action? Closing; + internal event Action? PreTransactionExec, TransactionLog, InfoMessage; + internal event Action? Connecting; + internal event Action? Resurrecting; + + partial void OnTrace(string message, string? category); + static partial void OnTraceWithoutContext(string message, string? category); + + [Conditional("VERBOSE")] + internal void Trace(string message, [CallerMemberName] string? category = null) => OnTrace(message, category); + + [Conditional("VERBOSE")] + internal void Trace(bool condition, string message, [CallerMemberName] string? category = null) + { + if (condition) OnTrace(message, category); + } + + [Conditional("VERBOSE")] + internal static void TraceWithoutContext(string message, [CallerMemberName] string? category = null) => OnTraceWithoutContext(message, category); + + [Conditional("VERBOSE")] + internal static void TraceWithoutContext(bool condition, string message, [CallerMemberName] string? category = null) + { + if (condition) OnTraceWithoutContext(message, category); + } + + [Conditional("VERBOSE")] + internal void OnMessageFaulted(Message? msg, Exception? fault, [CallerMemberName] string? origin = default, [CallerFilePath] string? path = default, [CallerLineNumber] int lineNumber = default) => + MessageFaulted?.Invoke(msg?.CommandAndKey, fault, $"{origin} ({path}#{lineNumber})"); + + [Conditional("VERBOSE")] + internal void OnInfoMessage(string message) => InfoMessage?.Invoke(message); + + [Conditional("VERBOSE")] + internal void OnClosing(bool complete) => Closing?.Invoke(complete); + + [Conditional("VERBOSE")] + internal void OnConnecting(EndPoint endpoint, ConnectionType connectionType) => Connecting?.Invoke(endpoint, connectionType); + + [Conditional("VERBOSE")] + internal void OnResurrecting(EndPoint endpoint, ConnectionType connectionType) => Resurrecting?.Invoke(endpoint, connectionType); + + [Conditional("VERBOSE")] + internal void OnPreTransactionExec(Message message) => PreTransactionExec?.Invoke(message.CommandAndKey); + + [Conditional("VERBOSE")] + internal void OnTransactionLog(string message) => TransactionLog?.Invoke(message); +} diff --git a/src/StackExchange.Redis/ConnectionMultiplexer.cs b/src/StackExchange.Redis/ConnectionMultiplexer.cs old mode 100755 new mode 100644 index 2ddb20e41..a5995046e --- a/src/StackExchange.Redis/ConnectionMultiplexer.cs +++ b/src/StackExchange.Redis/ConnectionMultiplexer.cs @@ -1,610 +1,390 @@ using System; using System.Collections; using System.Collections.Generic; -using System.Diagnostics; +using System.ComponentModel; +using System.Diagnostics.CodeAnalysis; using System.IO; using System.Linq; using System.Net; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; using System.Text; using System.Threading; using System.Threading.Tasks; -using System.Reflection; -using System.IO.Compression; -using System.Runtime.CompilerServices; -using StackExchange.Redis.Profiling; +using Microsoft.Extensions.Logging; using Pipelines.Sockets.Unofficial; -using System.ComponentModel; +using StackExchange.Redis.Profiling; namespace StackExchange.Redis { /// - /// Represents an inter-related group of connections to redis servers + /// Represents an inter-related group of connections to redis servers. + /// A reference to this should be held and re-used. /// + /// public sealed partial class ConnectionMultiplexer : IInternalConnectionMultiplexer // implies : IConnectionMultiplexer and : IDisposable { - [Flags] - private enum FeatureFlags - { - None, - PreventThreadTheft = 1, - } - - private static FeatureFlags s_featureFlags; + // This gets accessed for every received event; let's make sure we can process it "raw" + internal readonly byte[]? ConfigurationChangedChannel; + // Unique identifier used when tracing + internal readonly byte[] UniqueId = Guid.NewGuid().ToByteArray(); /// - /// Enables or disables a feature flag; this should only be used under support guidance, and should not be rapidly toggled + /// Tracks overall connection multiplexer counts. /// - [EditorBrowsable(EditorBrowsableState.Never)] - [Browsable(false)] - public static void SetFeatureFlag(string flag, bool enabled) - { - if (Enum.TryParse(flag, true, out var flags)) - { - if (enabled) s_featureFlags |= flags; - else s_featureFlags &= ~flags; - } - } + internal int _connectAttemptCount = 0, _connectCompletedCount = 0, _connectionCloseCount = 0; + internal long syncOps, asyncOps; + private long syncTimeouts, fireAndForgets, asyncTimeouts; + private string? failureMessage, activeConfigCause; + private TimerToken? pulse; - static ConnectionMultiplexer() - { - bool value = false; - try - { // attempt to detect a known problem scenario - value = SynchronizationContext.Current?.GetType()?.Name - == "LegacyAspNetSynchronizationContext"; - } - catch { } - SetFeatureFlag(nameof(FeatureFlags.PreventThreadTheft), value); - } + private readonly Hashtable servers = new Hashtable(); + private volatile ServerSnapshot _serverSnapshot = ServerSnapshot.Empty; - /// - /// Returns the state of a feature flag; this should only be used under support guidance - /// - [EditorBrowsable(EditorBrowsableState.Never)] - [Browsable(false)] - public static bool GetFeatureFlag(string flag) - => Enum.TryParse(flag, true, out var flags) - && (s_featureFlags & flags) == flags; + private volatile bool _isDisposed; + internal bool IsDisposed => _isDisposed; + internal ILogger? Logger { get; } - internal static bool PreventThreadTheft => (s_featureFlags & FeatureFlags.PreventThreadTheft) != 0; + internal CommandMap CommandMap { get; } + internal EndPointCollection EndPoints { get; } + internal ConfigurationOptions RawConfig { get; } + internal ServerSelectionStrategy ServerSelectionStrategy { get; } + ServerSelectionStrategy IInternalConnectionMultiplexer.ServerSelectionStrategy => ServerSelectionStrategy; + ConnectionMultiplexer IInternalConnectionMultiplexer.UnderlyingMultiplexer => this; + internal Exception? LastException { get; set; } - private static TaskFactory _factory = null; + ConfigurationOptions IInternalConnectionMultiplexer.RawConfig => RawConfig; -#if DEBUG - private static int _collectedWithoutDispose; - internal static int CollectedWithoutDispose => Thread.VolatileRead(ref _collectedWithoutDispose); - /// - /// Invoked by the garbage collector - /// - ~ConnectionMultiplexer() - { - Interlocked.Increment(ref _collectedWithoutDispose); - } -#endif + private int lastReconfigiureTicks = Environment.TickCount; + internal long LastReconfigureSecondsAgo => + unchecked(Environment.TickCount - Volatile.Read(ref lastReconfigiureTicks)) / 1000; + + private int _activeHeartbeatErrors, lastHeartbeatTicks; + internal long LastHeartbeatSecondsAgo => + pulse is null + ? -1 + : unchecked(Environment.TickCount - Volatile.Read(ref lastHeartbeatTicks)) / 1000; + + private static int lastGlobalHeartbeatTicks = Environment.TickCount; + internal static long LastGlobalHeartbeatSecondsAgo => + unchecked(Environment.TickCount - Volatile.Read(ref lastGlobalHeartbeatTicks)) / 1000; - bool IInternalConnectionMultiplexer.AllowConnect + /// + [Obsolete($"Please use {nameof(ConfigurationOptions)}.{nameof(ConfigurationOptions.IncludeDetailInExceptions)} instead - this will be removed in 3.0.")] + [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] + public bool IncludeDetailInExceptions { - get => AllowConnect; - set => AllowConnect = value; + get => RawConfig.IncludeDetailInExceptions; + set => RawConfig.IncludeDetailInExceptions = value; } - bool IInternalConnectionMultiplexer.IgnoreConnect + /// + [Obsolete($"Please use {nameof(ConfigurationOptions)}.{nameof(ConfigurationOptions.IncludePerformanceCountersInExceptions)} instead - this will be removed in 3.0.")] + [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] + public bool IncludePerformanceCountersInExceptions { - get => IgnoreConnect; - set => IgnoreConnect = value; + get => RawConfig.IncludePerformanceCountersInExceptions; + set => RawConfig.IncludePerformanceCountersInExceptions = value; } /// - /// For debugging: when not enabled, servers cannot connect + /// Gets the synchronous timeout associated with the connections. /// - internal volatile bool AllowConnect = true; + public int TimeoutMilliseconds => RawConfig.SyncTimeout; /// - /// For debugging: when not enabled, end-connect is silently ignored (to simulate a long-running connect) + /// Gets the asynchronous timeout associated with the connections. /// - internal volatile bool IgnoreConnect; + internal int AsyncTimeoutMilliseconds => RawConfig.AsyncTimeout; /// - /// Tracks overall connection multiplexer counts + /// Gets the client-name that will be used on all new connections. /// - internal int _connectAttemptCount = 0, _connectCompletedCount = 0, _connectionCloseCount = 0; + /// + /// We null coalesce here instead of in Options so that we don't populate it everywhere (e.g. .ToString()), given it's a default. + /// + public string ClientName => RawConfig.ClientName ?? RawConfig.Defaults.ClientName; /// - /// Provides a way of overriding the default Task Factory. If not set, it will use the default Task.Factory. - /// Useful when top level code sets it's own factory which may interfere with Redis queries. + /// Gets the configuration of the connection. /// - public static TaskFactory Factory - { - get => _factory ?? Task.Factory; - set => _factory = value; - } + public string Configuration => RawConfig.ToString(); /// - /// Get summary statistics associates with this server + /// Indicates whether any servers are connected. /// - public ServerCounters GetCounters() - { - var snapshot = GetServerSnapshot(); - - var counters = new ServerCounters(null); - for (int i = 0; i < snapshot.Length; i++) - { - counters.Add(snapshot[i].GetCounters()); - } - return counters; - } + public bool IsConnected => _serverSnapshot.Any(static s => s.IsConnected); /// - /// Gets the client-name that will be used on all new connections + /// Indicates whether any servers are currently trying to connect. /// - public string ClientName => RawConfig.ClientName ?? GetDefaultClientName(); + public bool IsConnecting => _serverSnapshot.Any(static s => s.IsConnecting); - private static string defaultClientName; - private static string GetDefaultClientName() + static ConnectionMultiplexer() { - return defaultClientName ??= TryGetAzureRoleInstanceIdNoThrow() - ?? Environment.MachineName - ?? Environment.GetEnvironmentVariable("ComputerName") - ?? "StackExchange.Redis"; + SetAutodetectFeatureFlags(); } - /// - /// Tries to get the Roleinstance Id if Microsoft.WindowsAzure.ServiceRuntime is loaded. - /// In case of any failure, swallows the exception and returns null - /// - internal static string TryGetAzureRoleInstanceIdNoThrow() + private ConnectionMultiplexer(ConfigurationOptions configuration, ServerType? serverType = null, EndPointCollection? endpoints = null) { - string roleInstanceId; - // TODO: CoreCLR port pending https://github.com/dotnet/coreclr/issues/919 - try - { - Assembly asm = null; - foreach (var asmb in AppDomain.CurrentDomain.GetAssemblies()) - { - if (asmb.GetName().Name.Equals("Microsoft.WindowsAzure.ServiceRuntime")) - { - asm = asmb; - break; - } - } - if (asm == null) - return null; - - var type = asm.GetType("Microsoft.WindowsAzure.ServiceRuntime.RoleEnvironment"); - - // https://msdn.microsoft.com/en-us/library/microsoft.windowsazure.serviceruntime.roleenvironment.isavailable.aspx - if (!(bool)type.GetProperty("IsAvailable").GetValue(null, null)) - return null; - - var currentRoleInstanceProp = type.GetProperty("CurrentRoleInstance"); - var currentRoleInstanceId = currentRoleInstanceProp.GetValue(null, null); - roleInstanceId = currentRoleInstanceId.GetType().GetProperty("Id").GetValue(currentRoleInstanceId, null).ToString(); - - if (string.IsNullOrEmpty(roleInstanceId)) - { - roleInstanceId = null; - } - } - catch (Exception) - { - //silently ignores the exception - roleInstanceId = null; - } - return roleInstanceId; - } + Interlocked.Increment(ref s_MuxerCreateCount); - /// - /// Gets the configuration of the connection - /// - public string Configuration => RawConfig.ToString(); + RawConfig = configuration ?? throw new ArgumentNullException(nameof(configuration)); + EndPoints = endpoints ?? RawConfig.EndPoints.Clone(); + EndPoints.SetDefaultPorts(serverType, ssl: RawConfig.Ssl); + Logger = configuration.LoggerFactory?.CreateLogger(); - internal void OnConnectionFailed(EndPoint endpoint, ConnectionType connectionType, ConnectionFailureType failureType, Exception exception, bool reconfigure, string physicalName) - { - if (_isDisposed) return; - var handler = ConnectionFailed; - if (handler != null) + var map = CommandMap = configuration.GetCommandMap(serverType); + if (!string.IsNullOrWhiteSpace(configuration.Password) && !configuration.TryResp3()) // RESP3 doesn't need AUTH (can issue as part of HELLO) { - ConnectionMultiplexer.CompleteAsWorker( - new ConnectionFailedEventArgs(handler, this, endpoint, connectionType, failureType, exception, physicalName)); + map.AssertAvailable(RedisCommand.AUTH); } - if (reconfigure) + if (!map.IsAvailable(RedisCommand.ECHO) && !map.IsAvailable(RedisCommand.PING) && !map.IsAvailable(RedisCommand.TIME)) { - ReconfigureIfNeeded(endpoint, false, "connection failed"); + // I mean really, give me a CHANCE! I need *something* to check the server is available to me... + // see also: SendTracer (matching logic) + map.AssertAvailable(RedisCommand.EXISTS); } - } - internal void OnInternalError(Exception exception, EndPoint endpoint = null, ConnectionType connectionType = ConnectionType.None, [CallerMemberName] string origin = null) - { - try - { - if (_isDisposed) return; - Trace("Internal error: " + origin + ", " + exception == null ? "unknown" : exception.Message); - var handler = InternalError; - if (handler != null) - { - ConnectionMultiplexer.CompleteAsWorker( - new InternalErrorEventArgs(handler, this, endpoint, connectionType, exception, origin)); - } - } - catch - { // our internal error event failed; whatcha gonna do, exactly? - } - } + OnCreateReaderWriter(configuration); + ServerSelectionStrategy = new ServerSelectionStrategy(this); - internal void OnConnectionRestored(EndPoint endpoint, ConnectionType connectionType, string physicalName) - { - if (_isDisposed) return; - var handler = ConnectionRestored; - if (handler != null) + var configChannel = configuration.ConfigurationChannel; + if (!string.IsNullOrWhiteSpace(configChannel)) { - ConnectionMultiplexer.CompleteAsWorker( - new ConnectionFailedEventArgs(handler, this, endpoint, connectionType, ConnectionFailureType.None, null, physicalName)); + ConfigurationChangedChannel = Encoding.UTF8.GetBytes(configChannel); } - ReconfigureIfNeeded(endpoint, false, "connection restored"); + lastHeartbeatTicks = Environment.TickCount; } - private void OnEndpointChanged(EndPoint endpoint, EventHandler handler) + private static ConnectionMultiplexer CreateMultiplexer(ConfigurationOptions configuration, ILogger? log, ServerType? serverType, out EventHandler? connectHandler, EndPointCollection? endpoints = null) { - if (_isDisposed) return; - if (handler != null) + var muxer = new ConnectionMultiplexer(configuration, serverType, endpoints); + connectHandler = null; + if (log is not null) { - ConnectionMultiplexer.CompleteAsWorker(new EndPointEventArgs(handler, this, endpoint)); + // Create a detachable event-handler to log detailed errors if something happens during connect/handshake + connectHandler = (_, a) => + { + try + { + lock (log) // Keep the outer and any inner errors contiguous + { + var ex = a.Exception; + log?.LogErrorConnectionFailed(ex, new(a.EndPoint), a.ConnectionType, a.FailureType, ex?.Message ?? "(unknown)"); + while ((ex = ex?.InnerException) != null) + { + log?.LogErrorInnerException(ex, ex.Message); + } + } + } + catch { } + }; + muxer.ConnectionFailed += connectHandler; } + return muxer; } - internal void OnConfigurationChanged(EndPoint endpoint) => OnEndpointChanged(endpoint, ConfigurationChanged); - internal void OnConfigurationChangedBroadcast(EndPoint endpoint) => OnEndpointChanged(endpoint, ConfigurationChangedBroadcast); - /// - /// A server replied with an error message; + /// Get summary statistics associated with all servers in this multiplexer. /// - public event EventHandler ErrorMessage; - internal void OnErrorMessage(EndPoint endpoint, string message) + public ServerCounters GetCounters() { - if (_isDisposed) return; - var handler = ErrorMessage; - if (handler != null) + var counters = new ServerCounters(null); + var snapshot = GetServerSnapshot(); + for (int i = 0; i < snapshot.Length; i++) { - ConnectionMultiplexer.CompleteAsWorker( - new RedisErrorEventArgs(handler, this, endpoint, message) - ); + counters.Add(snapshot[i].GetCounters()); } + return counters; } - [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Usage", "CA2202:Do not dispose objects multiple times")] - private static void Write(ZipArchive zip, string name, Task task, Action callback) - { - var entry = zip.CreateEntry(name, CompressionLevel.Optimal); - using (var stream = entry.Open()) - using (var writer = new StreamWriter(stream)) - { - TaskStatus status = task.Status; - switch (status) - { - case TaskStatus.RanToCompletion: - T val = ((Task)task).Result; - callback(val, writer); - break; - case TaskStatus.Faulted: - writer.WriteLine(string.Join(", ", task.Exception.InnerExceptions.Select(x => x.Message))); - break; - default: - writer.WriteLine(status.ToString()); - break; - } - } - } - /// - /// Write the configuration of all servers to an output stream - /// - /// The destination stream to write the export to. - /// The options to use for this export. - public void ExportConfiguration(Stream destination, ExportOptions options = ExportOptions.All) + internal async Task MakePrimaryAsync(ServerEndPoint server, ReplicationChangeOptions options, TextWriter? writer) { - if (destination == null) throw new ArgumentNullException(nameof(destination)); - - // what is possible, given the command map? - ExportOptions mask = 0; - if (CommandMap.IsAvailable(RedisCommand.INFO)) mask |= ExportOptions.Info; - if (CommandMap.IsAvailable(RedisCommand.CONFIG)) mask |= ExportOptions.Config; - if (CommandMap.IsAvailable(RedisCommand.CLIENT)) mask |= ExportOptions.Client; - if (CommandMap.IsAvailable(RedisCommand.CLUSTER)) mask |= ExportOptions.Cluster; - options &= mask; - - using (var zip = new ZipArchive(destination, ZipArchiveMode.Create, true)) - { - var arr = GetServerSnapshot(); - foreach (var server in arr) - { - const CommandFlags flags = CommandFlags.None; - if (!server.IsConnected) continue; - var api = GetServer(server.EndPoint); - - List tasks = new List(); - if ((options & ExportOptions.Info) != 0) - { - tasks.Add(api.InfoRawAsync(flags: flags)); - } - if ((options & ExportOptions.Config) != 0) - { - tasks.Add(api.ConfigGetAsync(flags: flags)); - } - if ((options & ExportOptions.Client) != 0) - { - tasks.Add(api.ClientListAsync(flags: flags)); - } - if ((options & ExportOptions.Cluster) != 0) - { - tasks.Add(api.ClusterNodesRawAsync(flags: flags)); - } - - WaitAllIgnoreErrors(tasks.ToArray()); - - int index = 0; - var prefix = Format.ToString(server.EndPoint); - if ((options & ExportOptions.Info) != 0) - { - Write(zip, prefix + "/info.txt", tasks[index++], WriteNormalizingLineEndings); - } - if ((options & ExportOptions.Config) != 0) - { - Write[]>(zip, prefix + "/config.txt", tasks[index++], (settings, writer) => - { - foreach (var setting in settings) - { - writer.WriteLine("{0}={1}", setting.Key, setting.Value); - } - }); - } - if ((options & ExportOptions.Client) != 0) - { - Write(zip, prefix + "/clients.txt", tasks[index++], (clients, writer) => - { - if (clients == null) - { - writer.WriteLine(NoContent); - } - else - { - foreach (var client in clients) - { - writer.WriteLine(client.Raw); - } - } - }); - } - if ((options & ExportOptions.Cluster) != 0) - { - Write(zip, prefix + "/nodes.txt", tasks[index++], WriteNormalizingLineEndings); - } - } - } - } + _ = server ?? throw new ArgumentNullException(nameof(server)); + var log = Logger.With(writer); - internal void MakeMaster(ServerEndPoint server, ReplicationChangeOptions options, LogProxy log) - { var cmd = server.GetFeatures().ReplicaCommands ? RedisCommand.REPLICAOF : RedisCommand.SLAVEOF; CommandMap.AssertAvailable(cmd); - if (!RawConfig.AllowAdmin) throw ExceptionFactory.AdminModeNotEnabled(IncludeDetailInExceptions, cmd, null, server); - - if (server == null) throw new ArgumentNullException(nameof(server)); - var srv = new RedisServer(this, server, null); - if (!srv.IsConnected) throw ExceptionFactory.NoConnectionAvailable(this, null, server, GetServerSnapshot(), command: cmd); + if (!RawConfig.AllowAdmin) + { + throw ExceptionFactory.AdminModeNotEnabled(RawConfig.IncludeDetailInExceptions, cmd, null, server); + } + var srv = server.GetRedisServer(null); + if (!srv.IsConnected) + { + throw ExceptionFactory.NoConnectionAvailable(this, null, server, GetServerSnapshot(), command: cmd); + } -#pragma warning disable CS0618 - const CommandFlags flags = CommandFlags.NoRedirect | CommandFlags.HighPriority; -#pragma warning restore CS0618 + const CommandFlags flags = CommandFlags.NoRedirect; Message msg; - log?.WriteLine($"Checking {Format.ToString(srv.EndPoint)} is available..."); + log?.LogInformationCheckingServerAvailable(new(srv.EndPoint)); try { - srv.Ping(flags); // if it isn't happy, we're not happy + await srv.PingAsync(flags).ForAwait(); // if it isn't happy, we're not happy } catch (Exception ex) { - log?.WriteLine($"Operation failed on {Format.ToString(srv.EndPoint)}, aborting: {ex.Message}"); + log?.LogErrorOperationFailedOnServer(ex, new(srv.EndPoint), ex.Message); throw; } - var nodes = GetServerSnapshot(); - RedisValue newMaster = Format.ToString(server.EndPoint); + var nodes = _serverSnapshot; // same as GetServerSnapshot(), but doesn't force span + RedisValue newPrimary = Format.ToString(server.EndPoint); - RedisKey tieBreakerKey = default(RedisKey); // try and write this everywhere; don't worry if some folks reject our advances - if ((options & ReplicationChangeOptions.SetTiebreaker) != 0 && !string.IsNullOrWhiteSpace(RawConfig.TieBreaker) + if (RawConfig.TryGetTieBreaker(out var tieBreakerKey) + && options.HasFlag(ReplicationChangeOptions.SetTiebreaker) && CommandMap.IsAvailable(RedisCommand.SET)) { - tieBreakerKey = RawConfig.TieBreaker; - foreach (var node in nodes) { - if (!node.IsConnected) continue; - log?.WriteLine($"Attempting to set tie-breaker on {Format.ToString(node.EndPoint)}..."); - msg = Message.Create(0, flags, RedisCommand.SET, tieBreakerKey, newMaster); -#pragma warning disable CS0618 - node.WriteDirectFireAndForgetSync(msg, ResultProcessor.DemandOK); -#pragma warning restore CS0618 + if (!node.IsConnected || node.IsReplica) continue; + log?.LogInformationAttemptingToSetTieBreaker(new(node.EndPoint)); + msg = Message.Create(0, flags | CommandFlags.FireAndForget, RedisCommand.SET, tieBreakerKey, newPrimary); + try + { + await node.WriteDirectAsync(msg, ResultProcessor.DemandOK).ForAwait(); + } + catch { } } } // stop replicating, promote to a standalone primary - log?.WriteLine($"Making {Format.ToString(srv.EndPoint)} a master..."); + log?.LogInformationMakingServerPrimary(new(srv.EndPoint)); try { - srv.ReplicaOf(null, flags); + await srv.ReplicaOfAsync(null, flags).ForAwait(); } catch (Exception ex) { - log?.WriteLine($"Operation failed on {Format.ToString(srv.EndPoint)}, aborting: {ex.Message}"); + log?.LogErrorOperationFailedOnServer(ex, new(srv.EndPoint), ex.Message); throw; } // also, in case it was a replica a moment ago, and hasn't got the tie-breaker yet, we re-send the tie-breaker to this one - if (!tieBreakerKey.IsNull) + if (!tieBreakerKey.IsNull && !server.IsReplica) { - log?.WriteLine($"Resending tie-breaker to {Format.ToString(server.EndPoint)}..."); - msg = Message.Create(0, flags, RedisCommand.SET, tieBreakerKey, newMaster); -#pragma warning disable CS0618 - server.WriteDirectFireAndForgetSync(msg, ResultProcessor.DemandOK); -#pragma warning restore CS0618 + log?.LogInformationResendingTieBreaker(new(server.EndPoint)); + msg = Message.Create(0, flags | CommandFlags.FireAndForget, RedisCommand.SET, tieBreakerKey, newPrimary); + try + { + await server.WriteDirectAsync(msg, ResultProcessor.DemandOK).ForAwait(); + } + catch { } } // There's an inherent race here in zero-latency environments (e.g. when Redis is on localhost) when a broadcast is specified // The broadcast can get back from redis and trigger a reconfigure before we get a chance to get to ReconfigureAsync() below - // This results in running an outdated reconfig and the .CompareExchange() (due to already running a reconfig) failing...making our needed reconfig a no-op. + // This results in running an outdated reconfiguration and the .CompareExchange() (due to already running a reconfiguration) + // failing...making our needed reconfiguration a no-op. // If we don't block *that* run, then *our* run (at low latency) gets blocked. Then we're waiting on the // ConfigurationOptions.ConfigCheckSeconds interval to identify the current (created by this method call) topology correctly. - var blockingReconfig = Interlocked.CompareExchange(ref activeConfigCause, "Block: Pending Master Reconfig", null) == null; + var blockingReconfig = Interlocked.CompareExchange(ref activeConfigCause, "Block: Pending Primary Reconfig", null) == null; // Try and broadcast the fact a change happened to all members // We want everyone possible to pick it up. // We broadcast before *and after* the change to remote members, so that they don't go without detecting a change happened. - // This eliminates the race of pub/sub *then* re-slaving happening, since a method both preceeds and follows. - void Broadcast(ReadOnlySpan serverNodes) + // This eliminates the race of pub/sub *then* re-slaving happening, since a method both precedes and follows. + async Task BroadcastAsync(ServerSnapshot serverNodes) { - if ((options & ReplicationChangeOptions.Broadcast) != 0 && ConfigurationChangedChannel != null + if (options.HasFlag(ReplicationChangeOptions.Broadcast) + && ConfigurationChangedChannel != null && CommandMap.IsAvailable(RedisCommand.PUBLISH)) { RedisValue channel = ConfigurationChangedChannel; foreach (var node in serverNodes) { if (!node.IsConnected) continue; - log?.WriteLine($"Broadcasting via {Format.ToString(node.EndPoint)}..."); - msg = Message.Create(-1, flags, RedisCommand.PUBLISH, channel, newMaster); -#pragma warning disable CS0618 - node.WriteDirectFireAndForgetSync(msg, ResultProcessor.Int64); -#pragma warning restore CS0618 + log?.LogInformationBroadcastingViaNode(new(node.EndPoint)); + msg = Message.Create(-1, flags | CommandFlags.FireAndForget, RedisCommand.PUBLISH, channel, newPrimary); + await node.WriteDirectAsync(msg, ResultProcessor.Int64).ForAwait(); } } } // Send a message before it happens - because afterwards a new replica may be unresponsive - Broadcast(nodes); + await BroadcastAsync(nodes).ForAwait(); - if ((options & ReplicationChangeOptions.ReplicateToOtherEndpoints) != 0) + if (options.HasFlag(ReplicationChangeOptions.ReplicateToOtherEndpoints)) { foreach (var node in nodes) { if (node == server || node.ServerType != ServerType.Standalone) continue; - log?.WriteLine($"Replicating to {Format.ToString(node.EndPoint)}..."); + log?.LogInformationReplicatingToNode(new(node.EndPoint)); msg = RedisServer.CreateReplicaOfMessage(node, server.EndPoint, flags); -#pragma warning disable CS0618 - node.WriteDirectFireAndForgetSync(msg, ResultProcessor.DemandOK); -#pragma warning restore CS0618 + await node.WriteDirectAsync(msg, ResultProcessor.DemandOK).ForAwait(); } } // ...and send one after it happens - because the first broadcast may have landed on a secondary client // and it can reconfigure before any topology change actually happened. This is most likely to happen // in low-latency environments. - Broadcast(nodes); + await BroadcastAsync(nodes).ForAwait(); // and reconfigure the muxer - log?.WriteLine("Reconfiguring all endpoints..."); + log?.LogInformationReconfiguringAllEndpoints(); // Yes, there is a tiny latency race possible between this code and the next call, but it's far more minute than before. // The effective gap between 0 and > 0 (likely off-box) latency is something that may never get hit here by anyone. if (blockingReconfig) { Interlocked.Exchange(ref activeConfigCause, null); } - if (!ReconfigureAsync(first: false, reconfigureAll: true, log, srv.EndPoint, "make master").ObserveErrors().Wait(5000)) + if (!await ReconfigureAsync(first: false, reconfigureAll: true, log, srv.EndPoint, cause: nameof(MakePrimaryAsync)).ForAwait()) { - log?.WriteLine("Verifying the configuration was incomplete; please verify"); + log?.LogInformationVerifyingConfigurationIncomplete(); } } internal void CheckMessage(Message message) { if (!RawConfig.AllowAdmin && message.IsAdmin) - throw ExceptionFactory.AdminModeNotEnabled(IncludeDetailInExceptions, message.Command, message, null); - if (message.Command != RedisCommand.UNKNOWN) CommandMap.AssertAvailable(message.Command); - - // using >= here because we will be adding 1 for the command itself (which is an arg for the purposes of the multi-bulk protocol) - if (message.ArgCount >= PhysicalConnection.REDIS_MAX_ARGS) throw ExceptionFactory.TooManyArgs(message.CommandAndKey, message.ArgCount); - } - private const string NoContent = "(no content)"; - private static void WriteNormalizingLineEndings(string source, StreamWriter writer) - { - if (source == null) { - writer.WriteLine(NoContent); + throw ExceptionFactory.AdminModeNotEnabled(RawConfig.IncludeDetailInExceptions, message.Command, message, null); } - else + if (message.Command != RedisCommand.UNKNOWN) { - using (var reader = new StringReader(source)) - { - string line; - while ((line = reader.ReadLine()) != null) - writer.WriteLine(line); // normalize line endings - } + CommandMap.AssertAvailable(message.Command); } - } - - /// - /// Raised whenever a physical connection fails - /// - public event EventHandler ConnectionFailed; - - /// - /// Raised whenever an internal error occurs (this is primarily for debugging) - /// - public event EventHandler InternalError; - - /// - /// Raised whenever a physical connection is established - /// - public event EventHandler ConnectionRestored; - - /// - /// Raised when configuration changes are detected - /// - public event EventHandler ConfigurationChanged; - - /// - /// Raised when nodes are explicitly requested to reconfigure via broadcast; - /// this usually means master/replica changes - /// - public event EventHandler ConfigurationChangedBroadcast; - - /// - /// Gets the synchronous timeout associated with the connections - /// - public int TimeoutMilliseconds { get; } - /// - /// Gets the asynchronous timeout associated with the connections - /// - internal int AsyncTimeoutMilliseconds { get; } - - /// - /// Gets all endpoints defined on the server - /// - /// Whether to get only the endpoints specified explicitly in the config. - public EndPoint[] GetEndPoints(bool configuredOnly = false) - { - if (configuredOnly) return RawConfig.EndPoints.ToArray(); - return _serverSnapshot.GetEndPoints(); + // using >= here because we will be adding 1 for the command itself (which is an argument for the purposes of the multi-bulk protocol) + if (message.ArgCount >= PhysicalConnection.REDIS_MAX_ARGS) + { + throw ExceptionFactory.TooManyArgs(message.CommandAndKey, message.ArgCount); + } } - internal bool TryResend(int hashSlot, Message message, EndPoint endpoint, bool isMoved) + internal bool TryResend(int hashSlot, Message message, EndPoint endpoint, bool isMoved, bool isSelf) { - return ServerSelectionStrategy.TryResend(hashSlot, message, endpoint, isMoved); + // If we're being told to re-send something because the hash slot moved, that means our topology is out of date + // ...and we should re-evaluate what's what. + // Allow for a 5-second back-off so we don't hammer this in a loop though + if (isMoved && LastReconfigureSecondsAgo > 5) + { + // Async kickoff a reconfigure + ReconfigureIfNeeded(endpoint, false, "MOVED encountered"); + } + + return ServerSelectionStrategy.TryResend(hashSlot, message, endpoint, isMoved, isSelf); } /// - /// Wait for a given asynchronous operation to complete (or timeout) + /// Wait for a given asynchronous operation to complete (or timeout). /// /// The task to wait on. public void Wait(Task task) { - if (task == null) throw new ArgumentNullException(nameof(task)); + _ = task ?? throw new ArgumentNullException(nameof(task)); try { - if (!task.Wait(TimeoutMilliseconds)) throw new TimeoutException(); + if (!task.Wait(TimeoutMilliseconds)) + { + throw new TimeoutException(); + } } catch (AggregateException aex) when (IsSingle(aex)) { @@ -613,16 +393,19 @@ public void Wait(Task task) } /// - /// Wait for a given asynchronous operation to complete (or timeout) + /// Wait for a given asynchronous operation to complete (or timeout). /// /// The type contains in the task to wait on. /// The task to wait on. public T Wait(Task task) { - if (task == null) throw new ArgumentNullException(nameof(task)); + _ = task ?? throw new ArgumentNullException(nameof(task)); try { - if (!task.Wait(TimeoutMilliseconds)) throw new TimeoutException(); + if (!task.Wait(TimeoutMilliseconds)) + { + throw new TimeoutException(); + } } catch (AggregateException aex) when (IsSingle(aex)) { @@ -633,43 +416,49 @@ public T Wait(Task task) private static bool IsSingle(AggregateException aex) { - try { return aex != null && aex.InnerExceptions.Count == 1; } - catch { return false; } + try + { + return aex?.InnerExceptions.Count == 1; + } + catch + { + return false; + } } /// - /// Wait for the given asynchronous operations to complete (or timeout) + /// Wait for the given asynchronous operations to complete (or timeout). /// /// The tasks to wait on. public void WaitAll(params Task[] tasks) { - if (tasks == null) throw new ArgumentNullException(nameof(tasks)); + _ = tasks ?? throw new ArgumentNullException(nameof(tasks)); if (tasks.Length == 0) return; - if (!Task.WaitAll(tasks, TimeoutMilliseconds)) throw new TimeoutException(); + if (!Task.WaitAll(tasks, TimeoutMilliseconds)) + { + throw new TimeoutException(); + } } - private bool WaitAllIgnoreErrors(Task[] tasks) => WaitAllIgnoreErrors(tasks, TimeoutMilliseconds); - - private static bool WaitAllIgnoreErrors(Task[] tasks, int timeout) + private bool WaitAllIgnoreErrors(Task[] tasks) { - if (tasks == null) throw new ArgumentNullException(nameof(tasks)); + _ = tasks ?? throw new ArgumentNullException(nameof(tasks)); if (tasks.Length == 0) return true; - var watch = Stopwatch.StartNew(); + var watch = ValueStopwatch.StartNew(); try { - // if none error, great - if (Task.WaitAll(tasks, timeout)) return true; + // If no error, great + if (Task.WaitAll(tasks, TimeoutMilliseconds)) return true; } catch { } - // if we get problems, need to give the non-failing ones time to finish - // to be fair and reasonable + // If we get problems, need to give the non-failing ones time to be fair and reasonable for (int i = 0; i < tasks.Length; i++) { var task = tasks[i]; if (!task.IsCanceled && !task.IsCompleted && !task.IsFaulted) { - var remaining = timeout - checked((int)watch.ElapsedMilliseconds); + var remaining = TimeoutMilliseconds - watch.ElapsedMilliseconds; if (remaining <= 0) return false; try { @@ -682,63 +471,35 @@ private static bool WaitAllIgnoreErrors(Task[] tasks, int timeout) return false; } - internal bool AuthSuspect { get; private set; } - internal void SetAuthSuspect() => AuthSuspect = true; - - private static void LogWithThreadPoolStats(LogProxy log, string message, out int busyWorkerCount) + private static async Task WaitAllIgnoreErrorsAsync(string name, Task[] tasks, int timeoutMilliseconds, ILogger? log, [CallerMemberName] string? caller = null, [CallerLineNumber] int callerLineNumber = 0) { - busyWorkerCount = 0; - if (log != null) + _ = tasks ?? throw new ArgumentNullException(nameof(tasks)); + if (tasks.Length == 0) { - var sb = new StringBuilder(); - sb.Append(message); - busyWorkerCount = PerfCounterHelper.GetThreadPoolStats(out string iocp, out string worker); - sb.Append(", IOCP: ").Append(iocp).Append(", WORKER: ").Append(worker); - log?.WriteLine(sb.ToString()); + log?.LogInformationNoTasksToAwait(); + return true; } - } - - private static bool AllComplete(Task[] tasks) - { - for (int i = 0; i < tasks.Length; i++) + if (AllComplete(tasks)) { - var task = tasks[i]; - if (!task.IsCanceled && !task.IsCompleted && !task.IsFaulted) - return false; - } - return true; - } - - private async Task WaitAllIgnoreErrorsAsync(string name, Task[] tasks, int timeoutMilliseconds, LogProxy log, [CallerMemberName] string caller = null, [CallerLineNumber] int callerLineNumber = 0) - { - if (tasks == null) throw new ArgumentNullException(nameof(tasks)); - if (tasks.Length == 0) - { - log?.WriteLine("No tasks to await"); + log?.LogInformationAllTasksComplete(); return true; } - if (AllComplete(tasks)) - { - log?.WriteLine("All tasks are already complete"); - return true; - } - - var watch = Stopwatch.StartNew(); - LogWithThreadPoolStats(log, $"Awaiting {tasks.Length} {name} task completion(s) for {timeoutMilliseconds}ms", out _); + var watch = ValueStopwatch.StartNew(); + log?.LogWithThreadPoolStats($"Awaiting {tasks.Length} {name} task completion(s) for {timeoutMilliseconds}ms"); try { // if none error, great - var remaining = timeoutMilliseconds - checked((int)watch.ElapsedMilliseconds); + var remaining = timeoutMilliseconds - watch.ElapsedMilliseconds; if (remaining <= 0) { - LogWithThreadPoolStats(log, "Timeout before awaiting for tasks", out _); + log.LogWithThreadPoolStats("Timeout before awaiting for tasks"); return false; } var allTasks = Task.WhenAll(tasks).ObserveErrors(); bool all = await allTasks.TimeoutAfter(timeoutMs: remaining).ObserveErrors().ForAwait(); - LogWithThreadPoolStats(log, all ? $"All {tasks.Length} {name} tasks completed cleanly" : $"Not all {name} tasks completed cleanly (from {caller}#{callerLineNumber}, timeout {timeoutMilliseconds}ms)", out _); + log?.LogWithThreadPoolStats(all ? $"All {tasks.Length} {name} tasks completed cleanly" : $"Not all {name} tasks completed cleanly (from {caller}#{callerLineNumber}, timeout {timeoutMilliseconds}ms)"); return all; } catch @@ -751,471 +512,262 @@ private async Task WaitAllIgnoreErrorsAsync(string name, Task[] tasks, int var task = tasks[i]; if (!task.IsCanceled && !task.IsCompleted && !task.IsFaulted) { - var remaining = timeoutMilliseconds - checked((int)watch.ElapsedMilliseconds); + var remaining = timeoutMilliseconds - watch.ElapsedMilliseconds; if (remaining <= 0) { - LogWithThreadPoolStats(log, "Timeout awaiting tasks", out _); + log.LogWithThreadPoolStats("Timeout awaiting tasks"); return false; } try { - await Task.WhenAny(task, Task.Delay(remaining)).ObserveErrors().ForAwait(); + await task.TimeoutAfter(remaining).ObserveErrors().ForAwait(); } catch { } } } - LogWithThreadPoolStats(log, "Finished awaiting tasks", out _); + log.LogWithThreadPoolStats("Finished awaiting tasks"); return false; } - /// - /// Raised when a hash-slot has been relocated - /// - public event EventHandler HashSlotMoved; - - internal void OnHashSlotMoved(int hashSlot, EndPoint old, EndPoint @new) + private static bool AllComplete(Task[] tasks) { - var handler = HashSlotMoved; - if (handler != null) + for (int i = 0; i < tasks.Length; i++) { - ConnectionMultiplexer.CompleteAsWorker( - new HashSlotMovedEventArgs(handler, this, hashSlot, old, @new)); + var task = tasks[i]; + if (!task.IsCanceled && !task.IsCompleted && !task.IsFaulted) + return false; } + return true; } + internal Exception? AuthException { get; private set; } + internal void SetAuthSuspect(Exception authException) => AuthException ??= authException; + /// - /// Compute the hash-slot of a specified key + /// Creates a new instance. /// - /// The key to get a hash slot ID for. - public int HashSlot(RedisKey key) => ServerSelectionStrategy.HashSlot(key); - - internal ServerEndPoint AnyConnected(ServerType serverType, uint startOffset, RedisCommand command, CommandFlags flags) - { - var tmp = GetServerSnapshot(); - int len = tmp.Length; - ServerEndPoint fallback = null; - for (int i = 0; i < len; i++) - { - var server = tmp[(int)(((uint)i + startOffset) % len)]; - if (server != null && server.ServerType == serverType && server.IsSelectable(command)) - { - if (server.IsReplica) - { - switch (flags) - { - case CommandFlags.DemandReplica: - case CommandFlags.PreferReplica: - return server; - case CommandFlags.PreferMaster: - fallback = server; - break; - } - } - else - { - switch (flags) - { - case CommandFlags.DemandMaster: - case CommandFlags.PreferMaster: - return server; - case CommandFlags.PreferReplica: - fallback = server; - break; - } - } - } - } - return fallback; - } - - private volatile bool _isDisposed; - internal bool IsDisposed => _isDisposed; + /// The string configuration to use for this multiplexer. + /// The to log to. + public static Task ConnectAsync(string configuration, TextWriter? log = null) => + ConnectAsync(ConfigurationOptions.Parse(configuration), log); /// - /// Create a new ConnectionMultiplexer instance + /// Creates a new instance. /// /// The string configuration to use for this multiplexer. + /// Action to further modify the parsed configuration options. /// The to log to. - public static Task ConnectAsync(string configuration, TextWriter log = null) - { - SocketConnection.AssertDependencies(); - return ConnectAsync(ConfigurationOptions.Parse(configuration), log); - } - - private static async Task ConnectImplAsync(ConfigurationOptions configuration, TextWriter log = null) - { - IDisposable killMe = null; - EventHandler connectHandler = null; - ConnectionMultiplexer muxer = null; - using (var logProxy = LogProxy.TryCreate(log)) - { - try - { - muxer = CreateMultiplexer(configuration, logProxy, out connectHandler); - killMe = muxer; - Interlocked.Increment(ref muxer._connectAttemptCount); - bool configured = await muxer.ReconfigureAsync(first: true, reconfigureAll: false, logProxy, null, "connect").ObserveErrors().ForAwait(); - if (!configured) - { - throw ExceptionFactory.UnableToConnect(muxer, muxer.failureMessage); - } - killMe = null; - Interlocked.Increment(ref muxer._connectCompletedCount); - - if (muxer.ServerSelectionStrategy.ServerType == ServerType.Sentinel) - { - // Initialize the Sentinel handlers - muxer.InitializeSentinel(logProxy); - } - return muxer; - } - finally - { - if (connectHandler != null) muxer.ConnectionFailed -= connectHandler; - if (killMe != null) try { killMe.Dispose(); } catch { } - } - } - } + public static Task ConnectAsync(string configuration, Action configure, TextWriter? log = null) => + ConnectAsync(ConfigurationOptions.Parse(configuration).Apply(configure), log); /// - /// Create a new ConnectionMultiplexer instance + /// Creates a new instance. /// /// The configuration options to use for this multiplexer. /// The to log to. - public static Task ConnectAsync(ConfigurationOptions configuration, TextWriter log = null) + /// Note: For Sentinel, do not specify a - this is handled automatically. + public static Task ConnectAsync(ConfigurationOptions configuration, TextWriter? log = null) { SocketConnection.AssertDependencies(); + Validate(configuration); - if (IsSentinel(configuration)) - return SentinelMasterConnectAsync(configuration, log); - - return ConnectImplAsync(PrepareConfig(configuration), log); - } - - private static bool IsSentinel(ConfigurationOptions configuration) - { - return !string.IsNullOrEmpty(configuration?.ServiceName); + return configuration.IsSentinel + ? SentinelPrimaryConnectAsync(configuration, log) + : ConnectImplAsync(configuration, log); } - internal static ConfigurationOptions PrepareConfig(object configuration, bool sentinel = false) + private static async Task ConnectImplAsync(ConfigurationOptions configuration, TextWriter? writer = null, ServerType? serverType = null) { - if (configuration == null) throw new ArgumentNullException(nameof(configuration)); - ConfigurationOptions config; - if (configuration is string s) - { - config = ConfigurationOptions.Parse(s); - } - else if (configuration is ConfigurationOptions configurationOptions) - { - config = (configurationOptions).Clone(); - } - else - { - throw new ArgumentException("Invalid configuration object", nameof(configuration)); - } - if (config.EndPoints.Count == 0) throw new ArgumentException("No endpoints specified", nameof(configuration)); - - if (sentinel) + IDisposable? killMe = null; + EventHandler? connectHandler = null; + ConnectionMultiplexer? muxer = null; + var configLogger = configuration.LoggerFactory?.CreateLogger(); + var log = configLogger.With(writer); + try { - config.SetSentinelDefaults(); - - return config; - } - - config.SetDefaultPorts(); - - return config; - } - - internal class LogProxy : IDisposable - { - public static LogProxy TryCreate(TextWriter writer) - => writer == null ? null : new LogProxy(writer); + var sw = ValueStopwatch.StartNew(); + log?.LogInformationConnectingAsync(RuntimeInformation.FrameworkDescription, Utils.GetLibVersion()); - public override string ToString() - { - string s = null; - if (_log != null) + muxer = CreateMultiplexer(configuration, log, serverType, out connectHandler); + killMe = muxer; + Interlocked.Increment(ref muxer._connectAttemptCount); + bool configured = await muxer.ReconfigureAsync(first: true, reconfigureAll: false, log, null, "connect").ObserveErrors().ForAwait(); + if (!configured) { - lock (SyncLock) - { - s = _log?.ToString(); - } + throw ExceptionFactory.UnableToConnect(muxer, muxer.failureMessage); } - return s ?? base.ToString(); - } - private TextWriter _log; + killMe = null; + Interlocked.Increment(ref muxer._connectCompletedCount); - public object SyncLock => this; - private LogProxy(TextWriter log) => _log = log; - public void WriteLine() - { - if (_log != null) // note: double-checked - { - lock (SyncLock) - { - _log?.WriteLine(); - } - } - } - public void WriteLine(string message = null) - { - if (_log != null) // note: double-checked + if (muxer.ServerSelectionStrategy.ServerType == ServerType.Sentinel) { - lock (SyncLock) - { - _log?.WriteLine(message); - } + // Initialize the Sentinel handlers + muxer.InitializeSentinel(log); } + + await configuration.AfterConnectAsync(muxer, s => log?.LogInformationAfterConnect(s)).ForAwait(); + + log?.LogInformationTotalConnectTime(sw.ElapsedMilliseconds); + + return muxer; } - public void Dispose() + finally { - if (_log != null) // note: double-checked - { - lock (SyncLock) { _log = null; } - } + if (connectHandler != null && muxer != null) muxer.ConnectionFailed -= connectHandler; + if (killMe != null) try { killMe.Dispose(); } catch { } + if (log is TextWriterLogger twLogger) twLogger.Release(); } } - private static ConnectionMultiplexer CreateMultiplexer(ConfigurationOptions configuration, LogProxy log, out EventHandler connectHandler) + + private static void Validate([NotNull] ConfigurationOptions? config) { - var muxer = new ConnectionMultiplexer(configuration); - connectHandler = null; - if (log != null) + if (config is null) { - // create a detachable event-handler to log detailed errors if something happens during connect/handshake - connectHandler = (_, a) => - { - try - { - lock (log.SyncLock) // keep the outer and any inner errors contiguous - { - var ex = a.Exception; - log?.WriteLine($"connection failed: {Format.ToString(a.EndPoint)} ({a.ConnectionType}, {a.FailureType}): {ex?.Message ?? "(unknown)"}"); - while ((ex = ex.InnerException) != null) - { - log?.WriteLine($"> {ex.Message}"); - } - } - } - catch { } - }; - muxer.ConnectionFailed += connectHandler; + throw new ArgumentNullException(nameof(config)); } - return muxer; - } - - /// - /// Create a new ConnectionMultiplexer instance - /// - /// The string configuration to use for this multiplexer. - /// The to log to. - public static ConnectionMultiplexer Connect(string configuration, TextWriter log = null) - { - return Connect(ConfigurationOptions.Parse(configuration), log); - } - - /// - /// Create a new ConnectionMultiplexer instance - /// - /// The configuration options to use for this multiplexer. - /// The to log to. - public static ConnectionMultiplexer Connect(ConfigurationOptions configuration, TextWriter log = null) - { - SocketConnection.AssertDependencies(); - - if (IsSentinel(configuration)) + if (config.EndPoints.Count == 0) { - return SentinelMasterConnect(configuration, log); + throw new ArgumentException("No endpoints specified", nameof(config)); } - - return ConnectImpl(PrepareConfig(configuration), log); } /// - /// Create a new ConnectionMultiplexer instance that connects to a sentinel server + /// Creates a new instance. /// /// The string configuration to use for this multiplexer. /// The to log to. - public static ConnectionMultiplexer SentinelConnect(string configuration, TextWriter log = null) - { - SocketConnection.AssertDependencies(); - return ConnectImpl(PrepareConfig(configuration, sentinel: true), log); - } + public static ConnectionMultiplexer Connect(string configuration, TextWriter? log = null) => + Connect(ConfigurationOptions.Parse(configuration), log); /// - /// Create a new ConnectionMultiplexer instance that connects to a sentinel server + /// Creates a new instance. /// /// The string configuration to use for this multiplexer. + /// Action to further modify the parsed configuration options. /// The to log to. - public static Task SentinelConnectAsync(string configuration, TextWriter log = null) - { - SocketConnection.AssertDependencies(); - return ConnectImplAsync(PrepareConfig(configuration, sentinel: true), log); - } - - /// - /// Create a new ConnectionMultiplexer instance that connects to a sentinel server - /// - /// The configuration options to use for this multiplexer. - /// The to log to. - public static ConnectionMultiplexer SentinelConnect(ConfigurationOptions configuration, TextWriter log = null) - { - SocketConnection.AssertDependencies(); - return ConnectImpl(PrepareConfig(configuration, sentinel: true), log); - } + public static ConnectionMultiplexer Connect(string configuration, Action configure, TextWriter? log = null) => + Connect(ConfigurationOptions.Parse(configuration).Apply(configure), log); /// - /// Create a new ConnectionMultiplexer instance that connects to a sentinel server + /// Creates a new instance. /// /// The configuration options to use for this multiplexer. /// The to log to. - public static Task SentinelConnectAsync(ConfigurationOptions configuration, TextWriter log = null) + /// Note: For Sentinel, do not specify a - this is handled automatically. + public static ConnectionMultiplexer Connect(ConfigurationOptions configuration, TextWriter? log = null) { SocketConnection.AssertDependencies(); - return ConnectImplAsync(PrepareConfig(configuration, sentinel: true), log); - } - - /// - /// Create a new ConnectionMultiplexer instance that connects to a sentinel server, discovers the current master server - /// for the specified ServiceName in the config and returns a managed connection to the current master server - /// - /// The string configuration to use for this multiplexer. - /// The to log to. - private static ConnectionMultiplexer SentinelMasterConnect(string configuration, TextWriter log = null) - { - return SentinelMasterConnect(PrepareConfig(configuration, sentinel: true), log); - } - - /// - /// Create a new ConnectionMultiplexer instance that connects to a sentinel server, discovers the current master server - /// for the specified ServiceName in the config and returns a managed connection to the current master server - /// - /// The configuration options to use for this multiplexer. - /// The to log to. - private static ConnectionMultiplexer SentinelMasterConnect(ConfigurationOptions configuration, TextWriter log = null) - { - var sentinelConnection = SentinelConnect(configuration, log); - - var muxer = sentinelConnection.GetSentinelMasterConnection(configuration, log); - // set reference to sentinel connection so that we can dispose it - muxer.sentinelConnection = sentinelConnection; - - return muxer; - } + Validate(configuration); - /// - /// Create a new ConnectionMultiplexer instance that connects to a sentinel server, discovers the current master server - /// for the specified ServiceName in the config and returns a managed connection to the current master server - /// - /// The string configuration to use for this multiplexer. - /// The to log to. - private static Task SentinelMasterConnectAsync(string configuration, TextWriter log = null) - { - return SentinelMasterConnectAsync(PrepareConfig(configuration, sentinel: true), log); + return configuration.IsSentinel + ? SentinelPrimaryConnect(configuration, log) + : ConnectImpl(configuration, log); } - /// - /// Create a new ConnectionMultiplexer instance that connects to a sentinel server, discovers the current master server - /// for the specified ServiceName in the config and returns a managed connection to the current master server - /// - /// The configuration options to use for this multiplexer. - /// The to log to. - private static async Task SentinelMasterConnectAsync(ConfigurationOptions configuration, TextWriter log = null) + private static ConnectionMultiplexer ConnectImpl(ConfigurationOptions configuration, TextWriter? writer, ServerType? serverType = null, EndPointCollection? endpoints = null) { - var sentinelConnection = await SentinelConnectAsync(configuration, log).ForAwait(); - - var muxer = sentinelConnection.GetSentinelMasterConnection(configuration, log); - // set reference to sentinel connection so that we can dispose it - muxer.sentinelConnection = sentinelConnection; + IDisposable? killMe = null; + EventHandler? connectHandler = null; + ConnectionMultiplexer? muxer = null; + var configLogger = configuration.LoggerFactory?.CreateLogger(); + var log = configLogger.With(writer); + try + { + var sw = ValueStopwatch.StartNew(); + log?.LogInformationConnectingSync(RuntimeInformation.FrameworkDescription, Utils.GetLibVersion()); - return muxer; - } + muxer = CreateMultiplexer(configuration, log, serverType, out connectHandler, endpoints); + killMe = muxer; + Interlocked.Increment(ref muxer._connectAttemptCount); + // note that task has timeouts internally, so it might take *just over* the regular timeout + var task = muxer.ReconfigureAsync(first: true, reconfigureAll: false, log, null, "connect"); - private static ConnectionMultiplexer ConnectImpl(ConfigurationOptions configuration, TextWriter log) - { - IDisposable killMe = null; - EventHandler connectHandler = null; - ConnectionMultiplexer muxer = null; - using (var logProxy = LogProxy.TryCreate(log)) - { - try + if (task.Wait(muxer.SyncConnectTimeout(true))) { - muxer = CreateMultiplexer(configuration, logProxy, out connectHandler); - killMe = muxer; - Interlocked.Increment(ref muxer._connectAttemptCount); - // note that task has timeouts internally, so it might take *just over* the regular timeout - var task = muxer.ReconfigureAsync(first: true, reconfigureAll: false, logProxy, null, "connect"); - - if (!task.Wait(muxer.SyncConnectTimeout(true))) + // completed promptly - we can check the outcome; hard failures + // (such as password problems) should be reported promptly - it + // won't magically start working + if (!task.Result) throw ExceptionFactory.UnableToConnect(muxer, muxer.failureMessage); + } + else + { + // incomplete - most likely slow initial connection; optionally + // allow a soft failure mode + task.ObserveErrors(); + if (muxer.RawConfig.AbortOnConnectFail) { - task.ObserveErrors(); - if (muxer.RawConfig.AbortOnConnectFail) - { - throw ExceptionFactory.UnableToConnect(muxer, "ConnectTimeout"); - } - else - { - muxer.LastException = ExceptionFactory.UnableToConnect(muxer, "ConnectTimeout"); - } + throw ExceptionFactory.UnableToConnect(muxer, "ConnectTimeout"); } - - if (!task.Result) throw ExceptionFactory.UnableToConnect(muxer, muxer.failureMessage); - killMe = null; - Interlocked.Increment(ref muxer._connectCompletedCount); - - if (muxer.ServerSelectionStrategy.ServerType == ServerType.Sentinel) + else { - // Initialize the Sentinel handlers - muxer.InitializeSentinel(logProxy); + var ex = ExceptionFactory.UnableToConnect(muxer, "ConnectTimeout"); + muxer.LastException = ex; + muxer.Logger?.LogErrorSyncConnectTimeout(ex, ex.Message); } - return muxer; } - finally + + killMe = null; + Interlocked.Increment(ref muxer._connectCompletedCount); + + if (muxer.ServerSelectionStrategy.ServerType == ServerType.Sentinel) { - if (connectHandler != null && muxer != null) muxer.ConnectionFailed -= connectHandler; - if (killMe != null) try { killMe.Dispose(); } catch { } + // Initialize the Sentinel handlers + muxer.InitializeSentinel(log); } + + configuration.AfterConnectAsync(muxer, s => log?.LogInformationAfterConnect(s)).Wait(muxer.SyncConnectTimeout(true)); + + log?.LogInformationTotalConnectTime(sw.ElapsedMilliseconds); + + return muxer; + } + finally + { + if (connectHandler != null && muxer != null) muxer.ConnectionFailed -= connectHandler; + if (killMe != null) try { killMe.Dispose(); } catch { } + if (log is TextWriterLogger twLogger) twLogger.Release(); } } - private string failureMessage; - private readonly Hashtable servers = new Hashtable(); - private volatile ServerSnapshot _serverSnapshot = ServerSnapshot.Empty; - - ReadOnlySpan IInternalConnectionMultiplexer.GetServerSnapshot() => GetServerSnapshot(); - internal ReadOnlySpan GetServerSnapshot() => _serverSnapshot.Span; - private sealed class ServerSnapshot + ReadOnlySpan IInternalConnectionMultiplexer.GetServerSnapshot() => _serverSnapshot.AsSpan(); + internal ReadOnlySpan GetServerSnapshot() => _serverSnapshot.AsSpan(); + internal ReadOnlyMemory GetServerSnaphotMemory() => _serverSnapshot.AsMemory(); + internal sealed class ServerSnapshot : IEnumerable { public static ServerSnapshot Empty { get; } = new ServerSnapshot(Array.Empty(), 0); - private ServerSnapshot(ServerEndPoint[] arr, int count) + private ServerSnapshot(ServerEndPoint[] endpoints, int count) { - _arr = arr; + _endpoints = endpoints; _count = count; } - private readonly ServerEndPoint[] _arr; + private readonly ServerEndPoint[] _endpoints; private readonly int _count; - public ReadOnlySpan Span => new ReadOnlySpan(_arr, 0, _count); + public ReadOnlySpan AsSpan() => new ReadOnlySpan(_endpoints, 0, _count); + public ReadOnlyMemory AsMemory() => new ReadOnlyMemory(_endpoints, 0, _count); internal ServerSnapshot Add(ServerEndPoint value) { - if (value == null) return this; + if (value == null) + { + return this; + } - ServerEndPoint[] arr; - if (_arr.Length > _count) + ServerEndPoint[] nextEndpoints; + if (_endpoints.Length > _count) { - arr = _arr; + nextEndpoints = _endpoints; } else { // no more room; need a new array - int newLen = _arr.Length << 1; + int newLen = _endpoints.Length << 1; if (newLen == 0) newLen = 4; - arr = new ServerEndPoint[newLen]; - _arr.CopyTo(arr, 0); + nextEndpoints = new ServerEndPoint[newLen]; + _endpoints.CopyTo(nextEndpoints, 0); } - arr[_count] = value; - return new ServerSnapshot(arr, _count + 1); + nextEndpoints[_count] = value; + return new ServerSnapshot(nextEndpoints, _count + 1); } internal EndPoint[] GetEndPoints() @@ -1225,110 +777,267 @@ internal EndPoint[] GetEndPoints() var arr = new EndPoint[_count]; for (int i = 0; i < _count; i++) { - arr[i] = _arr[i].EndPoint; + arr[i] = _endpoints[i].EndPoint; } return arr; } - } - internal ServerEndPoint GetServerEndPoint(EndPoint endpoint, LogProxy log = null, bool activate = true) - { - if (endpoint == null) return null; - var server = (ServerEndPoint)servers[endpoint]; - if (server == null) + public Enumerator GetEnumerator() => new(_endpoints, _count); + IEnumerator IEnumerable.GetEnumerator() => GetEnumerator(); + IEnumerator IEnumerable.GetEnumerator() => GetEnumerator(); + + public struct Enumerator : IEnumerator { - bool isNew = false; - lock (servers) + private readonly ServerEndPoint[] _endpoints; + private readonly Func? _predicate; + private readonly int _count; + private int _index; + + public ServerEndPoint Current { get; private set; } + + object IEnumerator.Current => Current; + + public bool MoveNext() { - server = (ServerEndPoint)servers[endpoint]; - if (server == null) + while (_index < _count && ++_index < _count) { - if (_isDisposed) throw new ObjectDisposedException(ToString()); - - server = new ServerEndPoint(this, endpoint); - servers.Add(endpoint, server); - isNew = true; - _serverSnapshot = _serverSnapshot.Add(server); + Current = _endpoints[_index]; + if (_predicate is null || _predicate(Current)) + { + return true; + } } + Current = default!; + return false; + } + void IDisposable.Dispose() { } + void IEnumerator.Reset() + { + _index = -1; + Current = default!; } - // spin up the connection if this is new - if (isNew && activate) server.Activate(ConnectionType.Interactive, log); - } - return server; - } - - internal readonly CommandMap CommandMap; - private ConnectionMultiplexer(ConfigurationOptions configuration) - { - IncludeDetailInExceptions = true; - IncludePerformanceCountersInExceptions = false; + public Enumerator(ServerEndPoint[] endpoints, int count, Func? predicate = null) + { + _index = -1; + _endpoints = endpoints; + _count = count; + _predicate = predicate; + Current = default!; + } + } - RawConfig = configuration ?? throw new ArgumentNullException(nameof(configuration)); + public int Count => _count; - var map = CommandMap = configuration.CommandMap; - if (!string.IsNullOrWhiteSpace(configuration.Password)) map.AssertAvailable(RedisCommand.AUTH); + public bool Any(Func? predicate = null) + { + if (_count > 0) + { + if (predicate is null) return true; + foreach (var item in AsSpan()) // span for bounds elision + { + if (predicate(item)) return true; + } + } + return false; + } - if (!map.IsAvailable(RedisCommand.ECHO) && !map.IsAvailable(RedisCommand.PING) && !map.IsAvailable(RedisCommand.TIME)) - { // I mean really, give me a CHANCE! I need *something* to check the server is available to me... - // see also: SendTracer (matching logic) - map.AssertAvailable(RedisCommand.EXISTS); + public ServerSnapshotFiltered Where(CommandFlags flags) + { + var effectiveFlags = flags & (CommandFlags.DemandMaster | CommandFlags.DemandReplica); + return effectiveFlags switch + { + CommandFlags.DemandMaster => Where(static s => !s.IsReplica), + CommandFlags.DemandReplica => Where(static s => s.IsReplica), + _ => Where(null!), + // note we don't need to consider "both", since the composition of the flags-enum precludes that + }; } - TimeoutMilliseconds = configuration.SyncTimeout; - AsyncTimeoutMilliseconds = configuration.AsyncTimeout; + public ServerSnapshotFiltered Where(Func predicate) + => new ServerSnapshotFiltered(_endpoints, _count, predicate); - OnCreateReaderWriter(configuration); - ServerSelectionStrategy = new ServerSelectionStrategy(this); + public readonly struct ServerSnapshotFiltered : IEnumerable + { + private readonly ServerEndPoint[] _endpoints; + private readonly Func? _predicate; + private readonly int _count; - var configChannel = configuration.ConfigurationChannel; - if (!string.IsNullOrWhiteSpace(configChannel)) + public ServerSnapshotFiltered(ServerEndPoint[] endpoints, int count, Func? predicate) + { + _endpoints = endpoints; + _count = count; + _predicate = predicate; + } + + public Enumerator GetEnumerator() => new(_endpoints, _count, _predicate); + IEnumerator IEnumerable.GetEnumerator() => GetEnumerator(); + IEnumerator IEnumerable.GetEnumerator() => GetEnumerator(); + } + } + + ServerEndPoint IInternalConnectionMultiplexer.GetServerEndPoint(EndPoint endpoint) => GetServerEndPoint(endpoint); + + [return: NotNullIfNotNull(nameof(endpoint))] + internal ServerEndPoint? GetServerEndPoint(EndPoint? endpoint, ILogger? log = null, bool activate = true) + { + if (endpoint == null) return null; + var server = (ServerEndPoint?)servers[endpoint]; + if (server == null) { - ConfigurationChangedChannel = Encoding.UTF8.GetBytes(configChannel); + bool isNew = false; + lock (servers) + { + server = (ServerEndPoint?)servers[endpoint]; + if (server == null) + { + if (_isDisposed) throw new ObjectDisposedException(ToString()); + + server = new ServerEndPoint(this, endpoint); + servers.Add(endpoint, server); + isNew = true; + _serverSnapshot = _serverSnapshot.Add(server); + } + } + // spin up the connection if this is new + if (isNew && activate) + { + server.Activate(ConnectionType.Interactive, log); + if (server.SupportsSubscriptions && !server.KnowOrAssumeResp3()) + { + // Intentionally not logging the sub connection + server.Activate(ConnectionType.Subscription, null); + } + } } - lastHeartbeatTicks = Environment.TickCount; + return server; } - partial void OnCreateReaderWriter(ConfigurationOptions configuration); + internal void Root() => pulse?.Root(this); - internal const int MillisecondsPerHeartbeat = 1000; - private sealed class TimerToken + // note that this also acts (conditionally) as the GC root for the multiplexer + // when there are in-flight messages; the timer can then acts as the heartbeat + // to make sure that everything *eventually* completes + private sealed class TimerToken : IDisposable { - public TimerToken(ConnectionMultiplexer muxer) + private TimerToken(ConnectionMultiplexer muxer) { - _ref = new WeakReference(muxer); + _weakRef = new(muxer); } - private Timer _timer; + private Timer? _timer; public void SetTimer(Timer timer) => _timer = timer; - private readonly WeakReference _ref; + + private readonly WeakReference _weakRef; + + private object StrongRefSyncLock => _weakRef; // private and readonly? it'll do + private ConnectionMultiplexer? _strongRef; + private int _strongRefToken; private static readonly TimerCallback Heartbeat = state => { - var token = (TimerToken)state; - var muxer = (ConnectionMultiplexer)(token._ref?.Target); - if (muxer != null) + var token = (TimerToken)state!; + if (token._weakRef.TryGetTarget(out var muxer)) { muxer.OnHeartbeat(); } else { // the muxer got disposed from out of us; kill the timer - var tmp = token._timer; - token._timer = null; - if (tmp != null) try { tmp.Dispose(); } catch { } + token.Dispose(); } }; - internal static IDisposable Create(ConnectionMultiplexer connection) + internal static TimerToken Create(ConnectionMultiplexer connection) { var token = new TimerToken(connection); - var timer = new Timer(Heartbeat, token, MillisecondsPerHeartbeat, MillisecondsPerHeartbeat); + var heartbeatMilliseconds = (int)connection.RawConfig.HeartbeatInterval.TotalMilliseconds; + var timer = new Timer(Heartbeat, token, heartbeatMilliseconds, heartbeatMilliseconds); token.SetTimer(timer); - return timer; + return token; + } + + public void Dispose() + { + var tmp = _timer; + _timer = null; + if (tmp is not null) try { tmp.Dispose(); } catch { } + + _strongRef = null; // note that this shouldn't be relevant since we've unrooted the TimerToken + } + + // explanation of rooting model: + // + // the timer has a reference to the TimerToken; this *always* has a weak-ref, + // and *may* sometimes have a strong-ref; this is so that if a consumer + // drops a multiplexer, it can be garbage collected, i.e. the heartbeat timer + // doesn't keep the entire thing alive forever; instead, if the heartbeat detects + // the weak-ref has been collected, it can cancel the timer and *itself* go away; + // however: this leaves a problem where there is *in flight work* when the consumer + // drops the multiplexer; in particular, if that happens when disconnected, there + // could be consumer-visible pending TCS items *in the backlog queue*; we don't want + // to leave those incomplete, as that fails the contractual expectations of async/await; + // instead we need to root ourselves. The natural place to do this is by rooting the + // multiplexer, allowing the heartbeat to keep poking things, so that the usual + // message-processing and timeout rules apply. This is why we *sometimes* also keep + // a strong-ref to the same multiplexer. + // + // The TimerToken is rooted by the timer callback; this then roots the multiplexer, + // which keeps our bridges and connections in scope - until we're sure we're done + // with them. + // + // 1) any bridge or connection will trigger rooting by calling Root when + // they change from "empty" to "non-empty" i.e. whenever there + // in-flight items; this always changes the token; this includes both the + // backlog and awaiting-reply queues. + // + // 2) the heartbeat is responsible for unrooting, after processing timeouts + // etc; first it checks whether it is needed (IsRooted), which also gives + // it the current token. + // + // 3) if so, the heartbeat will (outside of the lock) query all sources to + // see if they still have outstanding work; if everyone reports negatively, + // then the heartbeat calls UnRoot passing in the old token; if this still + // matches (i.e. no new work came in while we were looking away), then the + // strong reference is removed; note that "has outstanding work" ignores + // internal-call messages; we are only interested in consumer-facing items + // (but we need to check this *here* rather than when adding, as otherwise + // the definition of "is empty, should root" becomes more complicated, which + // impacts the write path, rather than the heartbeat path. + // + // This means that the multiplexer (via the timer) lasts as long as there are + // outstanding messages; if the consumer has dropped the multiplexer, then + // there will be no new incoming messages, and after timeouts: everything + // should drop. + public void Root(ConnectionMultiplexer multiplexer) + { + lock (StrongRefSyncLock) + { + _strongRef = multiplexer; + _strongRefToken++; + } + } + + public bool IsRooted(out int token) + { + lock (StrongRefSyncLock) + { + token = _strongRefToken; + return _strongRef is not null; + } + } + + public void UnRoot(int token) + { + lock (StrongRefSyncLock) + { + if (token == _strongRefToken) + { + _strongRef = null; + } + } } } - private int _activeHeartbeatErrors; private void OnHeartbeat() { try @@ -1339,8 +1048,22 @@ private void OnHeartbeat() Trace("heartbeat"); var tmp = GetServerSnapshot(); + int token = 0; + bool isRooted = pulse?.IsRooted(out token) ?? false, hasPendingCallerFacingItems = false; + for (int i = 0; i < tmp.Length; i++) + { tmp[i].OnHeartbeat(); + if (isRooted && !hasPendingCallerFacingItems) + { + hasPendingCallerFacingItems = tmp[i].HasPendingCallerFacingItems(); + } + } + if (isRooted && !hasPendingCallerFacingItems) + { + // release the GC root on the heartbeat *if* the token still matches + pulse?.UnRoot(token); + } } catch (Exception ex) { @@ -1358,32 +1081,22 @@ private void OnHeartbeat() } } - private int lastHeartbeatTicks; - private static int lastGlobalHeartbeatTicks = Environment.TickCount; - internal long LastHeartbeatSecondsAgo - { - get - { - if (pulse == null) return -1; - return unchecked(Environment.TickCount - Thread.VolatileRead(ref lastHeartbeatTicks)) / 1000; - } - } - - internal Exception LastException { get; set; } - - internal static long LastGlobalHeartbeatSecondsAgo => unchecked(Environment.TickCount - Thread.VolatileRead(ref lastGlobalHeartbeatTicks)) / 1000; - /// - /// Obtain a pub/sub subscriber connection to the specified server + /// Obtain a pub/sub subscriber connection to the specified server. /// /// The async state object to pass to the created . - public ISubscriber GetSubscriber(object asyncState = null) + public ISubscriber GetSubscriber(object? asyncState = null) { - if (RawConfig.Proxy == Proxy.Twemproxy) throw new NotSupportedException("The pub/sub API is not available via twemproxy"); + if (!RawConfig.Proxy.SupportsPubSub()) + { + throw new NotSupportedException($"The pub/sub API is not available via {RawConfig.Proxy}"); + } return new RedisSubscriber(this, asyncState); } - // applies common db number defaults and rules + /// + /// Applies common DB number defaults and rules. + /// internal int ApplyDefaultDatabase(int db) { if (db == -1) @@ -1395,20 +1108,20 @@ internal int ApplyDefaultDatabase(int db) throw new ArgumentOutOfRangeException(nameof(db)); } - if (db != 0 && RawConfig.Proxy == Proxy.Twemproxy) + if (db != 0 && !RawConfig.Proxy.SupportsDatabases()) { - throw new NotSupportedException("Twemproxy only supports database 0"); + throw new NotSupportedException($"{RawConfig.Proxy} only supports database 0"); } return db; } /// - /// Obtain an interactive connection to a database inside redis + /// Obtain an interactive connection to a database inside redis. /// /// The ID to get a database for. /// The async state to pass into the resulting . - public IDatabase GetDatabase(int db = -1, object asyncState = null) + public IDatabase GetDatabase(int db = -1, object? asyncState = null) { db = ApplyDefaultDatabase(db); @@ -1419,87 +1132,146 @@ public IDatabase GetDatabase(int db = -1, object asyncState = null) // DB zero is stored separately, since 0-only is a massively common use-case private const int MaxCachedDatabaseInstance = 16; // 17 items - [0,16] - // side note: "databases 16" is the default in redis.conf; happy to store one extra to get nice alignment etc - private IDatabase dbCacheZero; - private IDatabase[] dbCacheLow; + // Side note: "databases 16" is the default in redis.conf; happy to store one extra to get nice alignment etc + private IDatabase? dbCacheZero; + private IDatabase[]? dbCacheLow; private IDatabase GetCachedDatabaseInstance(int db) // note that we already trust db here; only caller checks range { - // note we don't need to worry about *always* returning the same instance - // - if two threads ask for db 3 at the same time, it is OK for them to get - // different instances, one of which (arbitrarily) ends up cached for later use + // Note: we don't need to worry about *always* returning the same instance. + // If two threads ask for db 3 at the same time, it is OK for them to get + // different instances, one of which (arbitrarily) ends up cached for later use. if (db == 0) { return dbCacheZero ??= new RedisDatabase(this, 0, null); } var arr = dbCacheLow ??= new IDatabase[MaxCachedDatabaseInstance]; - return arr[db - 1] ?? (arr[db - 1] = new RedisDatabase(this, db, null)); + return arr[db - 1] ??= new RedisDatabase(this, db, null); + } + + /// + /// Compute the hash-slot of a specified key. + /// + /// The key to get a hash slot ID for. + public int HashSlot(RedisKey key) => ServerSelectionStrategy.HashSlot(key); + + internal ServerEndPoint? AnyServer(ServerType serverType, uint startOffset, RedisCommand command, CommandFlags flags, bool allowDisconnected) + { + var tmp = GetServerSnapshot(); + int len = tmp.Length; + ServerEndPoint? fallback = null; + for (int i = 0; i < len; i++) + { + var server = tmp[(int)(((uint)i + startOffset) % len)]; + if (server != null && server.ServerType == serverType && server.IsSelectable(command, allowDisconnected)) + { + if (server.IsReplica) + { + switch (flags) + { + case CommandFlags.DemandReplica: + case CommandFlags.PreferReplica: + return server; + case CommandFlags.PreferMaster: + fallback = server; + break; + } + } + else + { + switch (flags) + { + case CommandFlags.DemandMaster: + case CommandFlags.PreferMaster: + return server; + case CommandFlags.PreferReplica: + fallback = server; + break; + } + } + } + } + return fallback; } /// - /// Obtain a configuration API for an individual server + /// Obtain a configuration API for an individual server. /// /// The host to get a server for. /// The port for to get a server for. /// The async state to pass into the resulting . - public IServer GetServer(string host, int port, object asyncState = null) => GetServer(Format.ParseEndPoint(host, port), asyncState); + public IServer GetServer(string host, int port, object? asyncState = null) => + GetServer(Format.ParseEndPoint(host, port), asyncState); /// - /// Obtain a configuration API for an individual server + /// Obtain a configuration API for an individual server. /// /// The "host:port" string to get a server for. /// The async state to pass into the resulting . - public IServer GetServer(string hostAndPort, object asyncState = null) => GetServer(Format.TryParseEndPoint(hostAndPort), asyncState); + public IServer GetServer(string hostAndPort, object? asyncState = null) => + Format.TryParseEndPoint(hostAndPort, out var ep) + ? GetServer(ep, asyncState) + : throw new ArgumentException($"The specified host and port could not be parsed: {hostAndPort}", nameof(hostAndPort)); /// - /// Obtain a configuration API for an individual server + /// Obtain a configuration API for an individual server. /// /// The host to get a server for. /// The port for to get a server for. public IServer GetServer(IPAddress host, int port) => GetServer(new IPEndPoint(host, port)); /// - /// Obtain a configuration API for an individual server + /// Obtain a configuration API for an individual server. /// /// The endpoint to get a server for. /// The async state to pass into the resulting . - public IServer GetServer(EndPoint endpoint, object asyncState = null) + public IServer GetServer(EndPoint? endpoint, object? asyncState = null) { - if (endpoint == null) throw new ArgumentNullException(nameof(endpoint)); - if (RawConfig.Proxy == Proxy.Twemproxy) throw new NotSupportedException("The server API is not available via twemproxy"); - var server = (ServerEndPoint)servers[endpoint]; - if (server == null) throw new ArgumentException("The specified endpoint is not defined", nameof(endpoint)); - return new RedisServer(this, server, asyncState); + _ = endpoint ?? throw new ArgumentNullException(nameof(endpoint)); + if (!RawConfig.Proxy.SupportsServerApi()) + { + throw new NotSupportedException($"The server API is not available via {RawConfig.Proxy}"); + } + var server = servers[endpoint] as ServerEndPoint ?? throw new ArgumentException("The specified endpoint is not defined", nameof(endpoint)); + return server.GetRedisServer(asyncState); } - [Conditional("VERBOSE")] - internal void Trace(string message, [CallerMemberName] string category = null) + /// +#pragma warning disable RS0026 + public IServer GetServer(RedisKey key, object? asyncState = null, CommandFlags flags = CommandFlags.None) +#pragma warning restore RS0026 { - OnTrace(message, category); - } + // We'll spoof the GET command for this; we're not supporting ad-hoc access to the pub/sub channel, because: bad things. + // Any read-only-replica vs writable-primary concerns should be managed by the caller via "flags"; the default is PreferPrimary. + // Note that ServerSelectionStrategy treats "null" (default) keys as NoSlot, aka Any. + return (SelectServer(RedisCommand.GET, flags, key) ?? Throw()).GetRedisServer(asyncState); - [Conditional("VERBOSE")] - internal void Trace(bool condition, string message, [CallerMemberName] string category = null) - { - if (condition) OnTrace(message, category); + [DoesNotReturn] + static ServerEndPoint Throw() => throw new InvalidOperationException("It was not possible to resolve a connection to the server owning the specified key"); } - partial void OnTrace(string message, string category); - static partial void OnTraceWithoutContext(string message, string category); - - [Conditional("VERBOSE")] - internal static void TraceWithoutContext(string message, [CallerMemberName] string category = null) + /// + /// Obtain configuration APIs for all servers in this multiplexer. + /// + public IServer[] GetServers() { - OnTraceWithoutContext(message, category); + var snapshot = GetServerSnapshot(); + var result = new IServer[snapshot.Length]; + for (var i = 0; i < snapshot.Length; i++) + { + result[i] = snapshot[i].GetRedisServer(null); + } + return result; } - [Conditional("VERBOSE")] - internal static void TraceWithoutContext(bool condition, string message, [CallerMemberName] string category = null) - { - if (condition) OnTraceWithoutContext(message, category); - } + /// + /// Get the hash-slot associated with a given key, if applicable. + /// This can be useful for grouping operations. + /// + /// The to determine the hash slot for. + public int GetHashSlot(RedisKey key) => ServerSelectionStrategy.HashSlot(key); /// - /// The number of operations that have been performed on all connections + /// The number of operations that have been performed on all connections. /// public long OperationCount { @@ -1512,67 +1284,42 @@ public long OperationCount } } - private string activeConfigCause; - - internal bool ReconfigureIfNeeded(EndPoint blame, bool fromBroadcast, string cause, bool publishReconfigure = false, CommandFlags flags = CommandFlags.None) - { - if (fromBroadcast) - { - OnConfigurationChangedBroadcast(blame); - } - string activeCause = Volatile.Read(ref activeConfigCause); - if (activeCause == null) - { - bool reconfigureAll = fromBroadcast || publishReconfigure; - Trace("Configuration change detected; checking nodes", "Configuration"); - ReconfigureAsync(first: false, reconfigureAll, null, blame, cause, publishReconfigure, flags).ObserveErrors(); - return true; - } - else - { - Trace("Configuration change skipped; already in progress via " + activeCause, "Configuration"); - return false; - } - } + // note that the RedisChannel->byte[] converter is always direct, so this is not an alloc + // (we deal with channels far less frequently, so pay the encoding cost up-front) + internal byte[] ChannelPrefix => ((byte[]?)RawConfig.ChannelPrefix) ?? []; /// - /// Reconfigure the current connections based on the existing configuration + /// Reconfigure the current connections based on the existing configuration. /// /// The to log to. - public async Task ConfigureAsync(TextWriter log = null) + public bool Configure(TextWriter? log = null) { - using (var logProxy = LogProxy.TryCreate(log)) + // Note we expect ReconfigureAsync to internally allow [n] duration, + // so to avoid near misses, here we wait 2*[n]. + var task = ReconfigureAsync(first: false, reconfigureAll: true, Logger.With(log), null, "configure"); + if (!task.Wait(SyncConnectTimeout(false))) { - return await ReconfigureAsync(first: false, reconfigureAll: true, logProxy, null, "configure").ObserveErrors(); + task.ObserveErrors(); + if (RawConfig.AbortOnConnectFail) + { + throw new TimeoutException(); + } + else + { + LastException = new TimeoutException("ConnectTimeout"); + } + return false; } + return task.Result; } /// - /// Reconfigure the current connections based on the existing configuration + /// Reconfigure the current connections based on the existing configuration. /// /// The to log to. - public bool Configure(TextWriter log = null) + public async Task ConfigureAsync(TextWriter? log = null) { - // note we expect ReconfigureAsync to internally allow [n] duration, - // so to avoid near misses, here we wait 2*[n] - using (var logProxy = LogProxy.TryCreate(log)) - { - var task = ReconfigureAsync(first: false, reconfigureAll: true, logProxy, null, "configure"); - if (!task.Wait(SyncConnectTimeout(false))) - { - task.ObserveErrors(); - if (RawConfig.AbortOnConnectFail) - { - throw new TimeoutException(); - } - else - { - LastException = new TimeoutException("ConnectTimeout"); - } - return false; - } - return task.Result; - } + return await ReconfigureAsync(first: false, reconfigureAll: true, Logger.With(log), null, "configure").ObserveErrors().ForAwait(); } internal int SyncConnectTimeout(bool forConnect) @@ -1589,57 +1336,83 @@ internal int SyncConnectTimeout(bool forConnect) } /// - /// Provides a text overview of the status of all connections + /// Provides a text overview of the status of all connections. /// public string GetStatus() { - using (var sw = new StringWriter()) - { - GetStatus(sw); - return sw.ToString(); - } + using var sw = new StringWriter(); + GetStatus(sw); + return sw.ToString(); } /// - /// Provides a text overview of the status of all connections + /// Provides a text overview of the status of all connections. /// /// The to log to. - public void GetStatus(TextWriter log) - { - using (var proxy = LogProxy.TryCreate(log)) - { - GetStatus(proxy); - } - } - internal void GetStatus(LogProxy log) + public void GetStatus(TextWriter log) => GetStatus(new TextWriterLogger(log, null)); + + internal void GetStatus(ILogger? log) { if (log == null) return; var tmp = GetServerSnapshot(); + log.LogInformationEndpointSummaryHeader(); foreach (var server in tmp) { - log?.WriteLine(server.Summary()); - log?.WriteLine(server.GetCounters().ToString()); - log?.WriteLine(server.GetProfile()); + log.LogInformationServerSummary(server.Summary(), server.GetCounters(), server.GetProfile()); } - log?.WriteLine($"Sync timeouts: {Interlocked.Read(ref syncTimeouts)}; async timeouts: {Interlocked.Read(ref asyncTimeouts)}; fire and forget: {Interlocked.Read(ref fireAndForgets)}; last heartbeat: {LastHeartbeatSecondsAgo}s ago"); + log.LogInformationTimeoutsSummary( + Interlocked.Read(ref syncTimeouts), + Interlocked.Read(ref asyncTimeouts), + Interlocked.Read(ref fireAndForgets), + LastHeartbeatSecondsAgo); } - private void ActivateAllServers(LogProxy log) + private void ActivateAllServers(ILogger? log) { foreach (var server in GetServerSnapshot()) { server.Activate(ConnectionType.Interactive, log); - if (CommandMap.IsAvailable(RedisCommand.SUBSCRIBE)) + if (server.SupportsSubscriptions && !server.KnowOrAssumeResp3()) { - server.Activate(ConnectionType.Subscription, null); // no need to log the SUB stuff + // Intentionally not logging the sub connection + server.Activate(ConnectionType.Subscription, null); } } } - internal async Task ReconfigureAsync(bool first, bool reconfigureAll, LogProxy log, EndPoint blame, string cause, bool publishReconfigure = false, CommandFlags publishReconfigureFlags = CommandFlags.None) + + internal bool ReconfigureIfNeeded(EndPoint? blame, bool fromBroadcast, string cause, bool publishReconfigure = false, CommandFlags flags = CommandFlags.None) + { + if (fromBroadcast) + { + OnConfigurationChangedBroadcast(blame!); + } + string? activeCause = Volatile.Read(ref activeConfigCause); + if (activeCause is null) + { + bool reconfigureAll = fromBroadcast || publishReconfigure; + Trace("Configuration change detected; checking nodes", "Configuration"); + ReconfigureAsync(first: false, reconfigureAll, Logger, blame, cause, publishReconfigure, flags).ObserveErrors(); + return true; + } + else + { + Trace("Configuration change skipped; already in progress via " + activeCause, "Configuration"); + return false; + } + } + + /// + /// Triggers a reconfigure of this multiplexer. + /// This re-assessment of all server endpoints to get the current topology and adjust, the same as if we had first connected. + /// + public Task ReconfigureAsync(string reason) => + ReconfigureAsync(first: false, reconfigureAll: false, log: Logger, blame: null, cause: reason); + + internal async Task ReconfigureAsync(bool first, bool reconfigureAll, ILogger? log, EndPoint? blame, string cause, bool publishReconfigure = false, CommandFlags publishReconfigureFlags = CommandFlags.None) { if (_isDisposed) throw new ObjectDisposedException(ToString()); - bool showStats = log is object; + bool showStats = log is not null; bool ranThisCall = false; try @@ -1649,26 +1422,26 @@ internal async Task ReconfigureAsync(bool first, bool reconfigureAll, LogP if (!ranThisCall) { - log?.WriteLine($"Reconfiguration was already in progress due to: {activeConfigCause}, attempted to run for: {cause}"); + log?.LogInformationReconfigurationInProgress(activeConfigCause, cause); return false; } Trace("Starting reconfiguration..."); Trace(blame != null, "Blaming: " + Format.ToString(blame)); + Interlocked.Exchange(ref lastReconfigiureTicks, Environment.TickCount); - log?.WriteLine(RawConfig.ToString(includePassword: false)); - log?.WriteLine(); + log?.LogInformationConfiguration(new(RawConfig)); if (first) { - if (RawConfig.ResolveDns && RawConfig.HasDnsEndPoints()) + if (RawConfig.ResolveDns && EndPoints.HasDnsEndPoints()) { - var dns = RawConfig.ResolveEndPointsAsync(this, log).ObserveErrors(); + var dns = EndPoints.ResolveEndPointsAsync(this, log).ObserveErrors(); if (!await dns.TimeoutAfter(TimeoutMilliseconds).ForAwait()) { throw new TimeoutException("Timeout resolving endpoints"); } } - foreach (var endpoint in RawConfig.EndPoints) + foreach (var endpoint in EndPoints) { GetServerEndPoint(endpoint, log, false); } @@ -1684,57 +1457,51 @@ internal async Task ReconfigureAsync(bool first, bool reconfigureAll, LogP attemptsLeft--; } int standaloneCount = 0, clusterCount = 0, sentinelCount = 0; - var endpoints = RawConfig.EndPoints; - log?.WriteLine($"{endpoints.Count} unique nodes specified"); + var endpoints = EndPoints; + bool useTieBreakers = RawConfig.TryGetTieBreaker(out var tieBreakerKey); + log?.LogInformationUniqueNodesSpecified(endpoints.Count, useTieBreakers ? "with" : "without"); if (endpoints.Count == 0) { throw new InvalidOperationException("No nodes to consider"); } -#pragma warning disable CS0618 - const CommandFlags flags = CommandFlags.NoRedirect | CommandFlags.HighPriority; -#pragma warning restore CS0618 - List masters = new List(endpoints.Count); - bool useTieBreakers = !string.IsNullOrWhiteSpace(RawConfig.TieBreaker); + List primaries = new List(endpoints.Count); - ServerEndPoint[] servers = null; - Task[] tieBreakers = null; + ServerEndPoint[]? servers = null; bool encounteredConnectedClusterServer = false; - Stopwatch watch = null; + ValueStopwatch? watch = null; int iterCount = first ? 2 : 1; - // this is fix for https://github.com/StackExchange/StackExchange.Redis/issues/300 + // This is fix for https://github.com/StackExchange/StackExchange.Redis/issues/300 // auto discoverability of cluster nodes is made synchronous. - // we try to connect to endpoints specified inside the user provided configuration - // and when we encounter one such endpoint to which we are able to successfully connect, - // we get the list of cluster nodes from this endpoint and try to proactively connect - // to these nodes instead of relying on auto configure + // We try to connect to endpoints specified inside the user provided configuration + // and when we encounter an endpoint to which we are able to successfully connect, + // we get the list of cluster nodes from that endpoint and try to proactively connect + // to listed nodes instead of relying on auto configure. for (int iter = 0; iter < iterCount; ++iter) { if (endpoints == null) break; var available = new Task[endpoints.Count]; - tieBreakers = useTieBreakers ? new Task[endpoints.Count] : null; servers = new ServerEndPoint[available.Length]; - RedisKey tieBreakerKey = useTieBreakers ? (RedisKey)RawConfig.TieBreaker : default(RedisKey); - for (int i = 0; i < available.Length; i++) { Trace("Testing: " + Format.ToString(endpoints[i])); var server = GetServerEndPoint(endpoints[i]); - //server.ReportNextFailure(); + // server.ReportNextFailure(); servers[i] = server; // This awaits either the endpoint's initial connection, or a tracer if we're already connected - // (which is the reconfigure case) - available[i] = server.OnConnectedAsync(log, sendTracerIfConnected: true, autoConfigureIfConnected: reconfigureAll); + // (which is the reconfigure case, except second iteration which is only for newly discovered cluster members). + var isFirstIteration = iter == 0; + available[i] = server.OnConnectedAsync(log, sendTracerIfConnected: isFirstIteration, autoConfigureIfConnected: reconfigureAll); } - watch ??= Stopwatch.StartNew(); - var remaining = RawConfig.ConnectTimeout - checked((int)watch.ElapsedMilliseconds); - log?.WriteLine($"Allowing {available.Length} endpoint(s) {TimeSpan.FromMilliseconds(remaining)} to respond..."); + watch ??= ValueStopwatch.StartNew(); + var remaining = RawConfig.ConnectTimeout - watch.Value.ElapsedMilliseconds; + log?.LogInformationAllowingEndpointsToRespond(available.Length, TimeSpan.FromMilliseconds(remaining)); Trace("Allowing endpoints " + TimeSpan.FromMilliseconds(remaining) + " to respond..."); var allConnected = await WaitAllIgnoreErrorsAsync("available", available, remaining, log).ForAwait(); @@ -1745,34 +1512,21 @@ internal async Task ReconfigureAsync(bool first, bool reconfigureAll, LogP { var server = servers[i]; var task = available[i]; - server.GetOutstandingCount(RedisCommand.PING, out int inst, out int qs, out long @in, out int qu, out bool aw, out long toRead, out long toWrite, out var bs, out var rs, out var ws); - log?.WriteLine($" Server[{i}] ({Format.ToString(server)}) Status: {task.Status} (inst: {inst}, qs: {qs}, in: {@in}, qu: {qu}, aw: {aw}, in-pipe: {toRead}, out-pipe: {toWrite}, bw: {bs}, rs: {rs}. ws: {ws})"); + var bs = server.GetBridgeStatus(ConnectionType.Interactive); + + log?.LogInformationServerStatus(i, new(server), task.Status, bs.MessagesSinceLastHeartbeat, bs.Connection.MessagesSentAwaitingResponse, bs.Connection.BytesAvailableOnSocket, bs.MessagesSinceLastHeartbeat, bs.IsWriterActive, bs.Connection.BytesInReadPipe, bs.Connection.BytesInWritePipe, bs.BacklogStatus, bs.Connection.ReadStatus, bs.Connection.WriteStatus); } } + log?.LogInformationEndpointSummary(); // Log current state after await foreach (var server in servers) { - log?.WriteLine($"{Format.ToString(server.EndPoint)}: Endpoint is {server.ConnectionState}"); - } - - // After we've successfully connected (and authenticated), kickoff tie breakers if needed - if (useTieBreakers) - { - log?.WriteLine($"Election: Gathering tie-breakers..."); - for (int i = 0; i < available.Length; i++) - { - var server = servers[i]; - - log?.WriteLine($"{Format.ToString(server.EndPoint)}: Requesting tie-break (Key=\"{RawConfig.TieBreaker}\")..."); - Message msg = Message.Create(0, flags, RedisCommand.GET, tieBreakerKey); - msg.SetInternalCall(); - msg = LoggingMessage.Create(log, msg); - tieBreakers[i] = server.WriteDirectAsync(msg, ResultProcessor.String); - } + log?.LogInformationEndpointState(new(server.EndPoint), server.InteractiveConnectionState, server.SubscriptionConnectionState); } - EndPointCollection updatedClusterEndpointCollection = null; + log?.LogInformationTaskSummary(); + EndPointCollection? updatedClusterEndpointCollection = null; for (int i = 0; i < available.Length; i++) { var task = available[i]; @@ -1781,29 +1535,30 @@ internal async Task ReconfigureAsync(bool first, bool reconfigureAll, LogP if (task.IsFaulted) { server.SetUnselectable(UnselectableFlags.DidNotRespond); - var aex = task.Exception; + var aex = task.Exception!; foreach (var ex in aex.InnerExceptions) { - log?.WriteLine($"{Format.ToString(server)}: Faulted: {ex.Message}"); + log?.LogErrorServerFaulted(ex, new(server), ex.Message); failureMessage = ex.Message; } } else if (task.IsCanceled) { server.SetUnselectable(UnselectableFlags.DidNotRespond); - log?.WriteLine($"{Format.ToString(server)}: Connect task canceled"); + log?.LogInformationConnectTaskCanceled(new(server)); } else if (task.IsCompleted) { if (task.Result != "Disconnected") { server.ClearUnselectable(UnselectableFlags.DidNotRespond); - log?.WriteLine($"{Format.ToString(server)}: Returned with success as {server.ServerType} {(server.IsReplica ? "replica" : "primary")} (Source: {task.Result})"); + log?.LogInformationServerReturnedSuccess(new(server), server.ServerType, server.IsReplica ? "replica" : "primary", task.Result); - // count the server types + // Count the server types switch (server.ServerType) { case ServerType.Twemproxy: + case ServerType.Envoyproxy: case ServerType.Standalone: standaloneCount++; break; @@ -1815,30 +1570,31 @@ internal async Task ReconfigureAsync(bool first, bool reconfigureAll, LogP break; } - if (clusterCount > 0 && !encounteredConnectedClusterServer) + if (clusterCount > 0 && !encounteredConnectedClusterServer && CommandMap.IsAvailable(RedisCommand.CLUSTER)) { - // we have encountered a connected server with clustertype for the first time. + // We have encountered a connected server with a cluster type for the first time. // so we will get list of other nodes from this server using "CLUSTER NODES" command // and try to connect to these other nodes in the next iteration encounteredConnectedClusterServer = true; updatedClusterEndpointCollection = await GetEndpointsFromClusterNodes(server, log).ForAwait(); } - // set the server UnselectableFlags and update masters list + // Set the server UnselectableFlags and update primaries list switch (server.ServerType) { case ServerType.Twemproxy: + case ServerType.Envoyproxy: case ServerType.Sentinel: case ServerType.Standalone: case ServerType.Cluster: server.ClearUnselectable(UnselectableFlags.ServerType); if (server.IsReplica) - { - server.ClearUnselectable(UnselectableFlags.RedundantMaster); + { + server.ClearUnselectable(UnselectableFlags.RedundantPrimary); } else { - masters.Add(server); + primaries.Add(server); } break; default: @@ -1849,13 +1605,13 @@ internal async Task ReconfigureAsync(bool first, bool reconfigureAll, LogP else { server.SetUnselectable(UnselectableFlags.DidNotRespond); - log?.WriteLine($"{Format.ToString(server)}: Returned, but incorrectly"); + log?.LogInformationServerReturnedIncorrectly(new(server)); } } else { server.SetUnselectable(UnselectableFlags.DidNotRespond); - log?.WriteLine($"{Format.ToString(server)}: Did not respond"); + log?.LogInformationServerDidNotRespond(new(server), task.Status); } } @@ -1865,17 +1621,21 @@ internal async Task ReconfigureAsync(bool first, bool reconfigureAll, LogP } else { - break; // we do not want to repeat the second iteration + break; // We do not want to repeat the second iteration } } if (clusterCount == 0) { - // set the serverSelectionStrategy + // Set the serverSelectionStrategy if (RawConfig.Proxy == Proxy.Twemproxy) { ServerSelectionStrategy.ServerType = ServerType.Twemproxy; } + else if (RawConfig.Proxy == Proxy.Envoyproxy) + { + ServerSelectionStrategy.ServerType = ServerType.Envoyproxy; + } else if (standaloneCount == 0 && sentinelCount > 0) { ServerSelectionStrategy.ServerType = ServerType.Sentinel; @@ -1885,18 +1645,24 @@ internal async Task ReconfigureAsync(bool first, bool reconfigureAll, LogP ServerSelectionStrategy.ServerType = ServerType.Standalone; } - var preferred = await NominatePreferredMaster(log, servers, useTieBreakers, tieBreakers, masters, timeoutMs: RawConfig.ConnectTimeout - checked((int)watch.ElapsedMilliseconds)).ObserveErrors().ForAwait(); - foreach (var master in masters) + // If multiple primaries are detected, nominate the preferred one + // ...but not if the type of server we're connected to supports and expects multiple primaries + // ...for those cases, we want to allow sending to any primary endpoint. + if (ServerSelectionStrategy.ServerType.HasSinglePrimary()) { - if (master == preferred || master.IsReplica) - { - log?.WriteLine($"{Format.ToString(master)}: Clearing as RedundantMaster"); - master.ClearUnselectable(UnselectableFlags.RedundantMaster); - } - else + var preferred = NominatePreferredPrimary(log, servers!, useTieBreakers, primaries); + foreach (var primary in primaries) { - log?.WriteLine($"{Format.ToString(master)}: Setting as RedundantMaster"); - master.SetUnselectable(UnselectableFlags.RedundantMaster); + if (primary == preferred || primary.IsReplica) + { + log?.LogInformationClearingAsRedundantPrimary(new(primary)); + primary.ClearUnselectable(UnselectableFlags.RedundantPrimary); + } + else + { + log?.LogInformationSettingAsRedundantPrimary(new(primary)); + primary.SetUnselectable(UnselectableFlags.RedundantPrimary); + } } } } @@ -1904,18 +1670,19 @@ internal async Task ReconfigureAsync(bool first, bool reconfigureAll, LogP { ServerSelectionStrategy.ServerType = ServerType.Cluster; long coveredSlots = ServerSelectionStrategy.CountCoveredSlots(); - log?.WriteLine($"Cluster: {coveredSlots} of {ServerSelectionStrategy.TotalSlots} slots covered"); + log?.LogInformationClusterSlotsCovered(coveredSlots, ServerSelectionStrategy.TotalSlots); } if (!first) { - long subscriptionChanges = ValidateSubscriptions(); + // Calling the sync path here because it's all fire and forget + long subscriptionChanges = EnsureSubscriptions(CommandFlags.FireAndForget); if (subscriptionChanges == 0) { - log?.WriteLine("No subscription changes necessary"); + log?.LogInformationNoSubscriptionChanges(); } else { - log?.WriteLine($"Subscriptions reconfigured: {subscriptionChanges}"); + log?.LogInformationSubscriptionsAttemptingReconnect(subscriptionChanges); } } if (showStats) @@ -1923,21 +1690,21 @@ internal async Task ReconfigureAsync(bool first, bool reconfigureAll, LogP GetStatus(log); } - string stormLog = GetStormLog(); + string? stormLog = GetStormLog(); if (!string.IsNullOrWhiteSpace(stormLog)) { - log?.WriteLine(); - log?.WriteLine(stormLog); + log?.LogInformationStormLog(stormLog!); } healthy = standaloneCount != 0 || clusterCount != 0 || sentinelCount != 0; if (first && !healthy && attemptsLeft > 0) { - log?.WriteLine("resetting failing connections to retry..."); + log?.LogInformationResettingFailingConnections(); ResetAllNonConnected(); - log?.WriteLine($"retrying; attempts left: {attemptsLeft}..."); + log?.LogInformationRetryingAttempts(attemptsLeft); } - //WTF("?: " + attempts); - } while (first && !healthy && attemptsLeft > 0); + // WTF("?: " + attempts); + } + while (first && !healthy && attemptsLeft > 0); if (first && RawConfig.AbortOnConnectFail && !healthy) { @@ -1945,14 +1712,14 @@ internal async Task ReconfigureAsync(bool first, bool reconfigureAll, LogP } if (first) { - log?.WriteLine("Starting heartbeat..."); + log?.LogInformationStartingHeartbeat(); pulse = TimerToken.Create(this); } if (publishReconfigure) { try { - log?.WriteLine("Broadcasting reconfigure..."); + log?.LogInformationBroadcastingReconfigure(); PublishReconfigureImpl(publishReconfigureFlags); } catch @@ -1968,36 +1735,44 @@ internal async Task ReconfigureAsync(bool first, bool reconfigureAll, LogP finally { Trace("Exiting reconfiguration..."); - OnTraceLog(log); if (ranThisCall) Interlocked.Exchange(ref activeConfigCause, null); - if (!first) OnConfigurationChanged(blame); + if (!first && blame is not null) OnConfigurationChanged(blame); Trace("Reconfiguration exited"); } } - private async Task GetEndpointsFromClusterNodes(ServerEndPoint server, LogProxy log) + /// + /// Gets all endpoints defined on the multiplexer. + /// + /// Whether to get only the endpoints specified explicitly in the config. + public EndPoint[] GetEndPoints(bool configuredOnly = false) => + configuredOnly + ? EndPoints.ToArray() + : _serverSnapshot.GetEndPoints(); + + private async Task GetEndpointsFromClusterNodes(ServerEndPoint server, ILogger? log) { var message = Message.Create(-1, CommandFlags.None, RedisCommand.CLUSTER, RedisLiterals.NODES); try { var clusterConfig = await ExecuteAsyncImpl(message, ResultProcessor.ClusterNodes, null, server).ForAwait(); - var clusterEndpoints = new EndPointCollection(clusterConfig.Nodes.Select(node => node.EndPoint).ToList()); + if (clusterConfig is null) + { + return null; + } + var clusterEndpoints = new EndPointCollection(clusterConfig.Nodes.Where(node => node.EndPoint is not null).Select(node => node.EndPoint!).ToList()); // Loop through nodes in the cluster and update nodes relations to other nodes - ServerEndPoint serverEndpoint = null; + ServerEndPoint? serverEndpoint = null; foreach (EndPoint endpoint in clusterEndpoints) { serverEndpoint = GetServerEndPoint(endpoint); - if (serverEndpoint != null) - { - serverEndpoint.UpdateNodeRelations(clusterConfig); - } - + serverEndpoint?.UpdateNodeRelations(clusterConfig); } return clusterEndpoints; } catch (Exception ex) { - log?.WriteLine($"Encountered error while updating cluster config: {ex.Message}"); + log?.LogErrorEncounteredErrorWhileUpdatingClusterConfig(ex, ex.Message); return null; } } @@ -2011,86 +1786,67 @@ private void ResetAllNonConnected() } } -#pragma warning disable IDE0060 - partial void OnTraceLog(LogProxy log, [CallerMemberName] string caller = null); -#pragma warning restore IDE0060 - - private async Task NominatePreferredMaster(LogProxy log, ServerEndPoint[] servers, bool useTieBreakers, Task[] tieBreakers, List masters, int timeoutMs) + private static ServerEndPoint? NominatePreferredPrimary(ILogger? log, ServerEndPoint[] servers, bool useTieBreakers, List primaries) { - Dictionary uniques = null; + log?.LogInformationElectionSummary(); + + Dictionary? uniques = null; if (useTieBreakers) - { // count the votes + { + // Count the votes uniques = new Dictionary(StringComparer.OrdinalIgnoreCase); - log?.WriteLine("Waiting for tiebreakers..."); - await WaitAllIgnoreErrorsAsync("tiebreaker", tieBreakers, Math.Max(timeoutMs, 200), log).ForAwait(); - for (int i = 0; i < tieBreakers.Length; i++) + for (int i = 0; i < servers.Length; i++) { - var ep = servers[i].EndPoint; - var status = tieBreakers[i].Status; - switch (status) + var server = servers[i]; + string? serverResult = server.TieBreakerResult; + + if (serverResult.IsNullOrWhiteSpace()) { - case TaskStatus.RanToCompletion: - string s = tieBreakers[i].Result; - if (string.IsNullOrWhiteSpace(s)) - { - log?.WriteLine($"Election: {Format.ToString(ep)} had no tiebreaker set"); - } - else - { - log?.WriteLine($"Election: {Format.ToString(ep)} nominates: {s}"); - if (!uniques.TryGetValue(s, out int count)) count = 0; - uniques[s] = count + 1; - } - break; - case TaskStatus.Faulted: - log?.WriteLine($"Election: {Format.ToString(ep)} failed to nominate ({status})"); - foreach (var ex in tieBreakers[i].Exception.InnerExceptions) - { - if (ex.Message.StartsWith("MOVED ") || ex.Message.StartsWith("ASK ")) continue; - log?.WriteLine("> " + ex.Message); - } - break; - default: - log?.WriteLine($"Election: {Format.ToString(ep)} failed to nominate ({status})"); - break; + log?.LogInformationElectionNoTiebreaker(new(server)); + } + else + { + log?.LogInformationElectionNominates(new(server), serverResult); + if (!uniques.TryGetValue(serverResult, out int count)) count = 0; + uniques[serverResult] = count + 1; } } } - switch (masters.Count) + switch (primaries.Count) { case 0: - log?.WriteLine("Election: No masters detected"); + log?.LogInformationElectionNoPrimariesDetected(); return null; case 1: - log?.WriteLine($"Election: Single master detected: {Format.ToString(masters[0].EndPoint)}"); - return masters[0]; + log?.LogInformationElectionSinglePrimaryDetected(new(primaries[0].EndPoint)); + return primaries[0]; default: - log?.WriteLine("Election: Multiple masters detected..."); + log?.LogInformationElectionMultiplePrimariesDetected(); if (useTieBreakers && uniques != null) { switch (uniques.Count) { case 0: - log?.WriteLine("Election: No nominations by tie-breaker"); + log?.LogInformationElectionNoNominationsByTieBreaker(); break; case 1: string unanimous = uniques.Keys.Single(); - log?.WriteLine($"Election: Tie-breaker unanimous: {unanimous}"); + log?.LogInformationElectionTieBreakerUnanimous(unanimous); var found = SelectServerByElection(servers, unanimous, log); if (found != null) { - log?.WriteLine($"Election: Elected: {Format.ToString(found.EndPoint)}"); + log?.LogInformationElectionElected(new(found.EndPoint)); return found; } break; default: - log?.WriteLine("Election is contested:"); - ServerEndPoint highest = null; + log?.LogInformationElectionContested(); + ServerEndPoint? highest = null; bool arbitrary = false; foreach (var pair in uniques.OrderByDescending(x => x.Value)) { - log?.WriteLine($"Election: {pair.Key} has {pair.Value} votes"); + log?.LogInformationElectionVotes(pair.Key, pair.Value); if (highest == null) { highest = SelectServerByElection(servers, pair.Key, log); @@ -2105,11 +1861,11 @@ private async Task NominatePreferredMaster(LogProxy log, ServerE { if (arbitrary) { - log?.WriteLine($"Election: Choosing master arbitrarily: {Format.ToString(highest.EndPoint)}"); + log?.LogInformationElectionChoosingPrimaryArbitrarily(new(highest.EndPoint)); } else { - log?.WriteLine($"Election: Elected: {Format.ToString(highest.EndPoint)}"); + log?.LogInformationElectionElected(new(highest.EndPoint)); } return highest; } @@ -2119,11 +1875,11 @@ private async Task NominatePreferredMaster(LogProxy log, ServerE break; } - log?.WriteLine($"Election: Choosing master arbitrarily: {Format.ToString(masters[0].EndPoint)}"); - return masters[0]; + log?.LogInformationElectionChoosingPrimaryArbitrarily(new(primaries[0].EndPoint)); + return primaries[0]; } - private ServerEndPoint SelectServerByElection(ServerEndPoint[] servers, string endpoint, LogProxy log) + private static ServerEndPoint? SelectServerByElection(ServerEndPoint[] servers, string endpoint, ILogger? log) { if (servers == null || string.IsNullOrWhiteSpace(endpoint)) return null; for (int i = 0; i < servers.Length; i++) @@ -2131,13 +1887,13 @@ private ServerEndPoint SelectServerByElection(ServerEndPoint[] servers, string e if (string.Equals(Format.ToString(servers[i].EndPoint), endpoint, StringComparison.OrdinalIgnoreCase)) return servers[i]; } - log?.WriteLine("...but we couldn't find that"); + log?.LogInformationCouldNotFindThatEndpoint(); var deDottedEndpoint = DeDotifyHost(endpoint); for (int i = 0; i < servers.Length; i++) { if (string.Equals(DeDotifyHost(Format.ToString(servers[i].EndPoint)), deDottedEndpoint, StringComparison.OrdinalIgnoreCase)) { - log?.WriteLine($"...but we did find instead: {deDottedEndpoint}"); + log?.LogInformationFoundAlternativeEndpoint(deDottedEndpoint); return servers[i]; } } @@ -2148,15 +1904,20 @@ private static string DeDotifyHost(string input) { if (string.IsNullOrWhiteSpace(input)) return input; // GIGO - if (!char.IsLetter(input[0])) return input; // need first char to be alpha for this to work + if (!char.IsLetter(input[0])) return input; // Need first char to be alpha for this to work int periodPosition = input.IndexOf('.'); - if (periodPosition <= 0) return input; // no period or starts with a period? nothing useful to split + if (periodPosition <= 0) return input; // No period or starts with a period? Then nothing useful to split int colonPosition = input.IndexOf(':'); if (colonPosition > 0) - { // has a port specifier + { + // Has a port specifier +#if NET + return string.Concat(input.AsSpan(0, periodPosition), input.AsSpan(colonPosition)); +#else return input.Substring(0, periodPosition) + input.Substring(colonPosition); +#endif } else { @@ -2166,44 +1927,52 @@ private static string DeDotifyHost(string input) internal void UpdateClusterRange(ClusterConfiguration configuration) { - if (configuration == null) return; + if (configuration is null) + { + return; + } foreach (var node in configuration.Nodes) { if (node.IsReplica || node.Slots.Count == 0) continue; foreach (var slot in node.Slots) { - var server = GetServerEndPoint(node.EndPoint); - if (server != null) ServerSelectionStrategy.UpdateClusterRange(slot.From, slot.To, server); + if (GetServerEndPoint(node.EndPoint) is ServerEndPoint server) + { + ServerSelectionStrategy.UpdateClusterRange(slot.From, slot.To, server); + } } } } - private IDisposable pulse; + internal ServerEndPoint? SelectServer(Message? message) => + message == null ? null : ServerSelectionStrategy.Select(message); - internal ServerEndPoint SelectServer(Message message) - { - if (message == null) return null; - return ServerSelectionStrategy.Select(message); - } + internal ServerEndPoint? SelectServer(RedisCommand command, CommandFlags flags, in RedisKey key) => + ServerSelectionStrategy.Select(command, key, flags); - internal ServerEndPoint SelectServer(RedisCommand command, CommandFlags flags, in RedisKey key) - { - return ServerSelectionStrategy.Select(command, key, flags); - } + internal ServerEndPoint? SelectServer(RedisCommand command, CommandFlags flags, in RedisChannel channel) => + ServerSelectionStrategy.Select(command, channel, flags); - private bool PrepareToPushMessageToBridge(Message message, ResultProcessor processor, IResultBox resultBox, ref ServerEndPoint server) + private bool PrepareToPushMessageToBridge(Message message, ResultProcessor? processor, IResultBox? resultBox, [NotNullWhen(true)] ref ServerEndPoint? server) { message.SetSource(processor, resultBox); if (server == null) - { // infer a server automatically + { + // Infer a server automatically server = SelectServer(message); + + // If we didn't find one successfully, and we're allowed, queue for any viable server + if (server == null && RawConfig.BacklogPolicy.QueueWhileDisconnected) + { + server = ServerSelectionStrategy.Select(message, allowDisconnected: true); + } } - else // a server was specified; do we trust their choice, though? + else // A server was specified - do we trust their choice, though? { - if (message.IsMasterOnly() && server.IsReplica) + if (message.IsPrimaryOnly() && server.IsReplica) { - throw ExceptionFactory.MasterOnly(IncludeDetailInExceptions, message.Command, message, server); + throw ExceptionFactory.PrimaryOnly(RawConfig.IncludeDetailInExceptions, message.Command, message, server); } switch (server.ServerType) @@ -2211,13 +1980,15 @@ private bool PrepareToPushMessageToBridge(Message message, ResultProcessor case ServerType.Cluster: if (message.GetHashSlot(ServerSelectionStrategy) == ServerSelectionStrategy.MultipleSlots) { - throw ExceptionFactory.MultiSlot(IncludeDetailInExceptions, message); + throw ExceptionFactory.MultiSlot(RawConfig.IncludeDetailInExceptions, message); } break; } - if (!server.IsConnected) + + // If we're not allowed to queue while disconnected, we'll bomb out below. + if (!server.IsConnected && !RawConfig.BacklogPolicy.QueueWhileDisconnected) { - // well, that's no use! + // Well, that's no use! server = null; } } @@ -2235,414 +2006,285 @@ private bool PrepareToPushMessageToBridge(Message message, ResultProcessor int availableDatabases = server.Databases; if (availableDatabases > 0 && message.Db >= availableDatabases) { - throw ExceptionFactory.DatabaseOutfRange(IncludeDetailInExceptions, message.Db, message, server); + throw ExceptionFactory.DatabaseOutfRange(RawConfig.IncludeDetailInExceptions, message.Db, message, server); } } - Trace("Queueing on server: " + message); + Trace("Queuing on server: " + message); return true; } Trace("No server or server unavailable - aborting: " + message); return false; } - private ValueTask TryPushMessageToBridgeAsync(Message message, ResultProcessor processor, IResultBox resultBox, ref ServerEndPoint server) + + private ValueTask TryPushMessageToBridgeAsync(Message message, ResultProcessor? processor, IResultBox? resultBox, [NotNullWhen(true)] ref ServerEndPoint? server) => PrepareToPushMessageToBridge(message, processor, resultBox, ref server) ? server.TryWriteAsync(message) : new ValueTask(WriteResult.NoConnectionAvailable); [Obsolete("prefer async")] -#pragma warning disable CS0618 - private WriteResult TryPushMessageToBridgeSync(Message message, ResultProcessor processor, IResultBox resultBox, ref ServerEndPoint server) + private WriteResult TryPushMessageToBridgeSync(Message message, ResultProcessor? processor, IResultBox? resultBox, [NotNullWhen(true)] ref ServerEndPoint? server) => PrepareToPushMessageToBridge(message, processor, resultBox, ref server) ? server.TryWriteSync(message) : WriteResult.NoConnectionAvailable; -#pragma warning restore CS0618 /// - /// See Object.ToString() + /// Gets the client name for this multiplexer. /// - public override string ToString() - { - string s = ClientName; - if (string.IsNullOrWhiteSpace(s)) s = GetType().Name; - return s; - } - - internal readonly byte[] ConfigurationChangedChannel; // this gets accessed for every received event; let's make sure we can process it "raw" - internal readonly byte[] UniqueId = Guid.NewGuid().ToByteArray(); // unique identifier used when tracing + public override string ToString() => string.IsNullOrWhiteSpace(ClientName) ? GetType().Name : ClientName; - /// - /// Gets or sets whether asynchronous operations should be invoked in a way that guarantees their original delivery order - /// - [Obsolete("Not supported; if you require ordered pub/sub, please see " + nameof(ChannelMessageQueue), false)] - public bool PreserveAsyncOrder + internal Exception GetException(WriteResult result, Message message, ServerEndPoint? server, PhysicalBridge? bridge = null) => result switch { - get => false; - set { } - } + WriteResult.Success => throw new ArgumentOutOfRangeException(nameof(result), "Be sure to check result isn't successful before calling GetException."), + WriteResult.NoConnectionAvailable => ExceptionFactory.NoConnectionAvailable(this, message, server), + WriteResult.TimeoutBeforeWrite => ExceptionFactory.Timeout(this, null, message, server, result, bridge), + _ => ExceptionFactory.ConnectionFailure(RawConfig.IncludeDetailInExceptions, ConnectionFailureType.ProtocolFailure, "An unknown error occurred when writing the message", server), + }; - /// - /// Indicates whether any servers are connected - /// - public bool IsConnected + [System.Diagnostics.CodeAnalysis.SuppressMessage("Usage", "CA1816:Dispose methods should call SuppressFinalize", Justification = "Intentional observation")] + internal static void ThrowFailed(TaskCompletionSource? source, Exception unthrownException) { - get + try { - var tmp = GetServerSnapshot(); - for (int i = 0; i < tmp.Length; i++) - if (tmp[i].IsConnected) return true; - return false; + throw unthrownException; } - } - - /// - /// Indicates whether any servers are currently trying to connect - /// - public bool IsConnecting - { - get + catch (Exception ex) { - var tmp = GetServerSnapshot(); - for (int i = 0; i < tmp.Length; i++) - if (tmp[i].IsConnecting) return true; - return false; + if (source is not null) + { + source.TrySetException(ex); + GC.KeepAlive(source.Task.Exception); + GC.SuppressFinalize(source.Task); + } } } - internal ConfigurationOptions RawConfig { get; } - - internal ServerSelectionStrategy ServerSelectionStrategy { get; } - - internal Timer sentinelMasterReconnectTimer; - - internal Dictionary sentinelConnectionChildren = new Dictionary(); - internal ConnectionMultiplexer sentinelConnection = null; - - /// - /// Initializes the connection as a Sentinel connection and adds - /// the necessary event handlers to track changes to the managed - /// masters. - /// - /// - internal void InitializeSentinel(LogProxy logProxy) + [return: NotNullIfNotNull(nameof(defaultValue))] + internal T? ExecuteSyncImpl(Message message, ResultProcessor? processor, ServerEndPoint? server, T? defaultValue = default) { - if (ServerSelectionStrategy.ServerType != ServerType.Sentinel) + if (_isDisposed) throw new ObjectDisposedException(ToString()); + + if (message is null) // Fire-and forget could involve a no-op, represented by null - for example Increment by 0 { - return; + return defaultValue; } - // Subscribe to sentinel change events - ISubscriber sub = GetSubscriber(); + Interlocked.Increment(ref syncOps); - if (sub.SubscribedEndpoint("+switch-master") == null) + if (message.IsFireAndForget) { - sub.Subscribe("+switch-master", (channel, message) => - { - string[] messageParts = ((string)message).Split(new[] { ' ' }, StringSplitOptions.RemoveEmptyEntries); - EndPoint switchBlame = Format.TryParseEndPoint(string.Format("{0}:{1}", messageParts[1], messageParts[2])); +#pragma warning disable CS0618 // Type or member is obsolete + TryPushMessageToBridgeSync(message, processor, null, ref server); +#pragma warning restore CS0618 + Interlocked.Increment(ref fireAndForgets); + return defaultValue; + } + else + { + var source = SimpleResultBox.Get(); - lock (sentinelConnectionChildren) + bool timeout = false; + WriteResult result; + lock (source) + { +#pragma warning disable CS0618 // Type or member is obsolete + result = TryPushMessageToBridgeSync(message, processor, source, ref server); +#pragma warning restore CS0618 + if (!source.IsFaulted) // if we faulted while writing, we don't need to wait { - // Switch the master if we have connections for that service - if (sentinelConnectionChildren.ContainsKey(messageParts[0])) + if (result != WriteResult.Success) { - ConnectionMultiplexer child = sentinelConnectionChildren[messageParts[0]]; + throw GetException(result, message, server); + } - // Is the connection still valid? - if (child.IsDisposed) - { - child.ConnectionFailed -= OnManagedConnectionFailed; - child.ConnectionRestored -= OnManagedConnectionRestored; - sentinelConnectionChildren.Remove(messageParts[0]); - } - else - { - SwitchMaster(switchBlame, sentinelConnectionChildren[messageParts[0]]); - } + if (Monitor.Wait(source, TimeoutMilliseconds)) + { + Trace("Timely response to " + message); + } + else + { + Trace("Timeout performing " + message); + timeout = true; } } - }); - } - - // If we lose connection to a sentinel server, - // We need to reconfigure to make sure we still have - // a subscription to the +switch-master channel. - ConnectionFailed += (sender, e) => - { - // Reconfigure to get subscriptions back online - ReconfigureAsync(first: false, reconfigureAll: true, logProxy, e.EndPoint, "Lost sentinel connection", false).Wait(); - }; + } - // Subscribe to new sentinels being added - if (sub.SubscribedEndpoint("+sentinel") == null) - { - sub.Subscribe("+sentinel", (channel, message) => + if (timeout) // note we throw *outside* of the main lock to avoid deadlock scenarios (#2376) { - string[] messageParts = ((string)message).Split(new[] { ' ' }, StringSplitOptions.RemoveEmptyEntries); - UpdateSentinelAddressList(messageParts[0]); - }); + Interlocked.Increment(ref syncTimeouts); + // Very important not to return "source" to the pool here + // Also note we return "success" when queueing a messages to the backlog, so we need to manually fake it back here when timing out in the backlog + throw ExceptionFactory.Timeout(this, null, message, server, message.IsBacklogged ? WriteResult.TimeoutBeforeWrite : result, server?.GetBridge(message.Command, create: false)); + } + // Snapshot these so that we can recycle the box + var val = source.GetResult(out var ex, canRecycle: true); // now that we aren't locking it... + if (ex != null) throw ex; + Trace(message + " received " + val); + return val; } } - /// - /// Returns a managed connection to the master server indicated by - /// the ServiceName in the config. - /// - /// the configuration to be used when connecting to the master - /// - public ConnectionMultiplexer GetSentinelMasterConnection(ConfigurationOptions config, TextWriter log = null) + internal Task ExecuteAsyncImpl(Message? message, ResultProcessor? processor, object? state, ServerEndPoint? server, T defaultValue) { - if (ServerSelectionStrategy.ServerType != ServerType.Sentinel) - throw new RedisConnectionException(ConnectionFailureType.UnableToConnect, - "Sentinel: The ConnectionMultiplexer is not a Sentinel connection. Detected as: " + ServerSelectionStrategy.ServerType); - - if (string.IsNullOrEmpty(config.ServiceName)) - throw new ArgumentException("A ServiceName must be specified."); - - lock (sentinelConnectionChildren) + static async Task ExecuteAsyncImpl_Awaited(ConnectionMultiplexer @this, ValueTask write, TaskCompletionSource? tcs, Message message, ServerEndPoint? server, T defaultValue) { - if (sentinelConnectionChildren.TryGetValue(config.ServiceName, out var sentinelConnectionChild) && !sentinelConnectionChild.IsDisposed) - return sentinelConnectionChild; + var result = await write.ForAwait(); + if (result != WriteResult.Success) + { + var ex = @this.GetException(result, message, server); + ThrowFailed(tcs, ex); + } + return tcs == null ? defaultValue : await tcs.Task.ForAwait(); } - bool success = false; - ConnectionMultiplexer connection = null; + if (_isDisposed) throw new ObjectDisposedException(ToString()); - var sw = Stopwatch.StartNew(); - do + if (message == null) { - // Get an initial endpoint - try twice - EndPoint newMasterEndPoint = GetConfiguredMasterForService(config.ServiceName) - ?? GetConfiguredMasterForService(config.ServiceName); - - if (newMasterEndPoint == null) - { - throw new RedisConnectionException(ConnectionFailureType.UnableToConnect, - $"Sentinel: Failed connecting to configured master for service: {config.ServiceName}"); - } + return CompletedTask.FromDefault(defaultValue, state); + } - EndPoint[] replicaEndPoints = GetReplicasForService(config.ServiceName) - ?? GetReplicasForService(config.ServiceName); + Interlocked.Increment(ref asyncOps); - // Replace the master endpoint, if we found another one - // If not, assume the last state is the best we have and minimize the race - if (config.EndPoints.Count == 1) - { - config.EndPoints[0] = newMasterEndPoint; - } - else - { - config.EndPoints.Clear(); - config.EndPoints.TryAdd(newMasterEndPoint); - } + TaskCompletionSource? tcs = null; + IResultBox? source = null; + if (!message.IsFireAndForget) + { + source = TaskResultBox.Create(out tcs, state); + } + var write = TryPushMessageToBridgeAsync(message, processor, source, ref server); + if (!write.IsCompletedSuccessfully) + { + return ExecuteAsyncImpl_Awaited(this, write, tcs, message, server, defaultValue); + } - foreach (var replicaEndPoint in replicaEndPoints) + if (tcs == null) + { + return CompletedTask.FromDefault(defaultValue, null); // F+F explicitly does not get async-state + } + else + { + var result = write.Result; + if (result != WriteResult.Success) { - config.EndPoints.TryAdd(replicaEndPoint); + var ex = GetException(result, message, server); + ThrowFailed(tcs, ex); } + return tcs.Task; + } + } - connection = ConnectImpl(config, log); - - // verify role is master according to: - // https://redis.io/topics/sentinel-clients - if (connection.GetServer(newMasterEndPoint)?.Role().Value == RedisLiterals.master) + internal Task ExecuteAsyncImpl(Message? message, ResultProcessor? processor, object? state, ServerEndPoint? server) + { + [return: NotNullIfNotNull(nameof(tcs))] + static async Task ExecuteAsyncImpl_Awaited(ConnectionMultiplexer @this, ValueTask write, TaskCompletionSource? tcs, Message message, ServerEndPoint? server) + { + var result = await write.ForAwait(); + if (result != WriteResult.Success) { - success = true; - break; + var ex = @this.GetException(result, message, server); + ThrowFailed(tcs, ex); } + return tcs == null ? default : await tcs.Task.ForAwait(); + } - Thread.Sleep(100); - } while (sw.ElapsedMilliseconds < config.ConnectTimeout); + if (_isDisposed) throw new ObjectDisposedException(ToString()); - if (!success) + if (message == null) { - throw new RedisConnectionException(ConnectionFailureType.UnableToConnect, - $"Sentinel: Failed connecting to configured master for service: {config.ServiceName}"); + return CompletedTask.Default(state); } - // Attach to reconnect event to ensure proper connection to the new master - connection.ConnectionRestored += OnManagedConnectionRestored; + Interlocked.Increment(ref asyncOps); - // If we lost the connection, run a switch to a least try and get updated info about the master - connection.ConnectionFailed += OnManagedConnectionFailed; - - lock (sentinelConnectionChildren) + TaskCompletionSource? tcs = null; + IResultBox? source = null; + if (!message.IsFireAndForget) { - sentinelConnectionChildren[connection.RawConfig.ServiceName] = connection; + source = TaskResultBox.Create(out tcs, state); + } + var write = TryPushMessageToBridgeAsync(message, processor, source!, ref server); + if (!write.IsCompletedSuccessfully) + { + return ExecuteAsyncImpl_Awaited(this, write, tcs, message, server); } - // Perform the initial switchover - SwitchMaster(RawConfig.EndPoints[0], connection, log); - - return connection; - } - - internal void OnManagedConnectionRestored(object sender, ConnectionFailedEventArgs e) - { - ConnectionMultiplexer connection = (ConnectionMultiplexer)sender; - - if (connection.sentinelMasterReconnectTimer != null) + if (tcs == null) { - connection.sentinelMasterReconnectTimer.Dispose(); - connection.sentinelMasterReconnectTimer = null; + return CompletedTask.Default(null); // F+F explicitly does not get async-state } - try + else { - - // Run a switch to make sure we have update-to-date - // information about which master we should connect to - SwitchMaster(e.EndPoint, connection); - - try - { - // Verify that the reconnected endpoint is a master, - // and the correct one otherwise we should reconnect - if (connection.GetServer(e.EndPoint).IsReplica || e.EndPoint != connection.currentSentinelMasterEndPoint) - { - // This isn't a master, so try connecting again - SwitchMaster(e.EndPoint, connection); - } - } - catch (Exception) + var result = write.Result; + if (result != WriteResult.Success) { - // If we get here it means that we tried to reconnect to a server that is no longer - // considered a master by Sentinel and was removed from the list of endpoints. - - // If we caught an exception, we may have gotten a stale endpoint - // we are not aware of, so retry - SwitchMaster(e.EndPoint, connection); + var ex = GetException(result, message, server); + ThrowFailed(tcs, ex); } - } - catch (Exception) - { - // Log, but don't throw in an event handler - // TODO: Log via new event handler? a la ConnectionFailed? + return tcs.Task; } } - internal void OnManagedConnectionFailed(object sender, ConnectionFailedEventArgs e) + internal void OnAsyncTimeout() => Interlocked.Increment(ref asyncTimeouts); + + /// + /// Sends request to all compatible clients to reconfigure or reconnect. + /// + /// The command flags to use. + /// The number of instances known to have received the message (however, the actual number can be higher; returns -1 if the operation is pending). + public long PublishReconfigure(CommandFlags flags = CommandFlags.None) { - ConnectionMultiplexer connection = (ConnectionMultiplexer)sender; - // Periodically check to see if we can reconnect to the proper master. - // This is here in case we lost our subscription to a good sentinel instance - // or if we miss the published master change - if (connection.sentinelMasterReconnectTimer == null) + if (ConfigurationChangedChannel is not null) { - connection.sentinelMasterReconnectTimer = new Timer((_) => - { - try - { - // Attempt, but do not fail here - SwitchMaster(e.EndPoint, connection); - } - catch (Exception) - { - - } - }, null, TimeSpan.FromSeconds(0), TimeSpan.FromSeconds(1)); + return ReconfigureIfNeeded(null, false, "PublishReconfigure", true, flags) + ? -1 + : PublishReconfigureImpl(flags); } + return 0; } - internal EndPoint GetConfiguredMasterForService(string serviceName) => - GetServerSnapshot() - .ToArray() - .Where(s => s.ServerType == ServerType.Sentinel) - .AsParallel() - .Select(s => - { - try { return GetServer(s.EndPoint).SentinelGetMasterAddressByName(serviceName); } - catch { return null; } - }) - .FirstOrDefault(r => r != null); - - internal EndPoint currentSentinelMasterEndPoint; + private long PublishReconfigureImpl(CommandFlags flags) => + ConfigurationChangedChannel is byte[] channel + ? GetSubscriber().Publish(RedisChannel.Literal(channel), RedisLiterals.Wildcard, flags) + : 0; - internal EndPoint[] GetReplicasForService(string serviceName) => - GetServerSnapshot() - .ToArray() - .Where(s => s.ServerType == ServerType.Sentinel) - .AsParallel() - .Select(s => - { - try { return GetServer(s.EndPoint).SentinelGetReplicaAddresses(serviceName); } - catch { return null; } - }) - .FirstOrDefault(r => r != null); + /// + /// Sends request to all compatible clients to reconfigure or reconnect. + /// + /// The command flags to use. + /// The number of instances known to have received the message (however, the actual number can be higher). + public Task PublishReconfigureAsync(CommandFlags flags = CommandFlags.None) => + ConfigurationChangedChannel is byte[] channel + ? GetSubscriber().PublishAsync(RedisChannel.Literal(channel), RedisLiterals.Wildcard, flags) + : CompletedTask.Default(null); /// - /// Switches the SentinelMasterConnection over to a new master. + /// Release all resources associated with this object. /// - /// The endpoint responsible for the switch - /// The connection that should be switched over to a new master endpoint - /// Log to write to, if any - internal void SwitchMaster(EndPoint switchBlame, ConnectionMultiplexer connection, TextWriter log = null) + public void Dispose() { - if (log == null) log = TextWriter.Null; - - using (var logProxy = LogProxy.TryCreate(log)) - { - string serviceName = connection.RawConfig.ServiceName; - - // Get new master - try twice - EndPoint newMasterEndPoint = GetConfiguredMasterForService(serviceName) - ?? GetConfiguredMasterForService(serviceName) - ?? throw new RedisConnectionException(ConnectionFailureType.UnableToConnect, - $"Sentinel: Failed connecting to switch master for service: {serviceName}"); - - connection.currentSentinelMasterEndPoint = newMasterEndPoint; - - if (!connection.servers.Contains(newMasterEndPoint)) - { - EndPoint[] replicaEndPoints = GetReplicasForService(serviceName) - ?? GetReplicasForService(serviceName); - - connection.servers.Clear(); - connection.RawConfig.EndPoints.Clear(); - connection.RawConfig.EndPoints.TryAdd(newMasterEndPoint); - foreach (var replicaEndPoint in replicaEndPoints) - { - connection.RawConfig.EndPoints.TryAdd(replicaEndPoint); - } - Trace(string.Format("Switching master to {0}", newMasterEndPoint)); - // Trigger a reconfigure - connection.ReconfigureAsync(first: false, reconfigureAll: false, logProxy, switchBlame, - string.Format("master switch {0}", serviceName), false, CommandFlags.PreferMaster).Wait(); - - UpdateSentinelAddressList(serviceName); - } - } + GC.SuppressFinalize(this); + if (!_isDisposed) Interlocked.Increment(ref s_DisposedCount); + Close(!_isDisposed); // marks disposed + sentinelConnection?.Dispose(); + var oldTimer = Interlocked.Exchange(ref sentinelPrimaryReconnectTimer, null); + oldTimer?.Dispose(); } - internal void UpdateSentinelAddressList(string serviceName) + /// + /// Release all resources associated with this object. + /// + public async ValueTask DisposeAsync() { - var firstCompleteRequest = GetServerSnapshot() - .ToArray() - .Where(s => s.ServerType == ServerType.Sentinel) - .AsParallel() - .Select(s => - { - try { return GetServer(s.EndPoint).SentinelGetSentinelAddresses(serviceName); } - catch { return null; } - }) - .FirstOrDefault(r => r != null); - - // Ignore errors, as having an updated sentinel list is - // not essential - if (firstCompleteRequest == null) - return; - - bool hasNew = false; - foreach (EndPoint newSentinel in firstCompleteRequest.Where(x => !RawConfig.EndPoints.Contains(x))) - { - hasNew = true; - RawConfig.EndPoints.TryAdd(newSentinel); - } - - if (hasNew) + GC.SuppressFinalize(this); + if (!_isDisposed) Interlocked.Increment(ref s_DisposedCount); + await CloseAsync(!_isDisposed).ForAwait(); // marks disposed + if (sentinelConnection is ConnectionMultiplexer sentinel) { - // Reconfigure the sentinel multiplexer if we added new endpoints - ReconfigureAsync(first: false, reconfigureAll: true, null, RawConfig.EndPoints[0], "Updating Sentinel List", false).Wait(); + await sentinel.DisposeAsync().ForAwait(); } + var oldTimer = Interlocked.Exchange(ref sentinelPrimaryReconnectTimer, null); + oldTimer?.Dispose(); } /// - /// Close all connections and release all resources associated with this object + /// Close all connections and release all resources associated with this object. /// /// Whether to allow all in-queue commands to complete first. public void Close(bool allowCommandsToComplete = true) @@ -2668,41 +2310,8 @@ public void Close(bool allowCommandsToComplete = true) Interlocked.Increment(ref _connectionCloseCount); } - partial void OnCloseReaderWriter(); - - private void DisposeAndClearServers() - { - lock (servers) - { - var iter = servers.GetEnumerator(); - while (iter.MoveNext()) - { - var server = (ServerEndPoint)iter.Value; - server.Dispose(); - } - servers.Clear(); - } - } - - private Task[] QuitAllServers() - { - var quits = new Task[2 * servers.Count]; - lock (servers) - { - var iter = servers.GetEnumerator(); - int index = 0; - while (iter.MoveNext()) - { - var server = (ServerEndPoint)iter.Value; - quits[index++] = server.Close(ConnectionType.Interactive); - quits[index++] = server.Close(ConnectionType.Subscription); - } - } - return quits; - } - /// - /// Close all connections and release all resources associated with this object + /// Close all connections and release all resources associated with this object. /// /// Whether to allow all in-queue commands to complete first. public async Task CloseAsync(bool allowCommandsToComplete = true) @@ -2722,230 +2331,37 @@ public async Task CloseAsync(bool allowCommandsToComplete = true) DisposeAndClearServers(); } - /// - /// Release all resources associated with this object - /// - public void Dispose() - { - GC.SuppressFinalize(this); - Close(!_isDisposed); - sentinelConnection?.Dispose(); - } - - internal Task ExecuteAsyncImpl(Message message, ResultProcessor processor, object state, ServerEndPoint server) + private void DisposeAndClearServers() { - if (_isDisposed) throw new ObjectDisposedException(ToString()); - - if (message == null) - { - return CompletedTask.Default(state); - } - - TaskCompletionSource tcs = null; - IResultBox source = null; - if (!message.IsFireAndForget) - { - source = TaskResultBox.Create(out tcs, state); - } - var write = TryPushMessageToBridgeAsync(message, processor, source, ref server); - if (!write.IsCompletedSuccessfully) return ExecuteAsyncImpl_Awaited(this, write, tcs, message, server); - - if (tcs == null) - { - return CompletedTask.Default(null); // F+F explicitly does not get async-state - } - else + lock (servers) { - var result = write.Result; - if (result != WriteResult.Success) + var iter = servers.GetEnumerator(); + while (iter.MoveNext()) { - var ex = GetException(result, message, server); - ThrowFailed(tcs, ex); + (iter.Value as ServerEndPoint)?.Dispose(); } - return tcs.Task; - } - } - - private static async Task ExecuteAsyncImpl_Awaited(ConnectionMultiplexer @this, ValueTask write, TaskCompletionSource tcs, Message message, ServerEndPoint server) - { - var result = await write.ForAwait(); - if (result != WriteResult.Success) - { - var ex = @this.GetException(result, message, server); - ThrowFailed(tcs, ex); - } - return tcs == null ? default(T) : await tcs.Task.ForAwait(); - } - - internal Exception GetException(WriteResult result, Message message, ServerEndPoint server) - { - switch (result) - { - case WriteResult.Success: return null; - case WriteResult.NoConnectionAvailable: - return ExceptionFactory.NoConnectionAvailable(this, message, server); - case WriteResult.TimeoutBeforeWrite: - return ExceptionFactory.Timeout(this, "The timeout was reached before the message could be written to the output buffer, and it was not sent", message, server, result); - case WriteResult.WriteFailure: - default: - return ExceptionFactory.ConnectionFailure(IncludeDetailInExceptions, ConnectionFailureType.ProtocolFailure, "An unknown error occurred when writing the message", server); - } - } - - internal static void ThrowFailed(TaskCompletionSource source, Exception unthrownException) - { - try - { - throw unthrownException; - } - catch (Exception ex) - { - source.TrySetException(ex); - GC.KeepAlive(source.Task.Exception); - GC.SuppressFinalize(source.Task); + servers.Clear(); } } - internal T ExecuteSyncImpl(Message message, ResultProcessor processor, ServerEndPoint server) + private Task[] QuitAllServers() { - if (_isDisposed) throw new ObjectDisposedException(ToString()); - - if (message == null) // fire-and forget could involve a no-op, represented by null - for example Increment by 0 - { - return default(T); - } - - if (message.IsFireAndForget) - { -#pragma warning disable CS0618 - TryPushMessageToBridgeSync(message, processor, null, ref server); -#pragma warning restore CS0618 - Interlocked.Increment(ref fireAndForgets); - return default(T); - } - else + var quits = new Task[2 * servers.Count]; + lock (servers) { - var source = SimpleResultBox.Get(); - - lock (source) + var iter = servers.GetEnumerator(); + int index = 0; + while (iter.MoveNext()) { -#pragma warning disable CS0618 - var result = TryPushMessageToBridgeSync(message, processor, source, ref server); -#pragma warning restore CS0618 - if (result != WriteResult.Success) - { - throw GetException(result, message, server); - } - - if (Monitor.Wait(source, TimeoutMilliseconds)) - { - Trace("Timeley response to " + message); - } - else - { - Trace("Timeout performing " + message); - Interlocked.Increment(ref syncTimeouts); - throw ExceptionFactory.Timeout(this, null, message, server); - // very important not to return "source" to the pool here - } + var server = (ServerEndPoint)iter.Value!; + quits[index++] = server.Close(ConnectionType.Interactive); + quits[index++] = server.Close(ConnectionType.Subscription); } - // snapshot these so that we can recycle the box - var val = source.GetResult(out var ex, canRecycle: true); // now that we aren't locking it... - if (ex != null) throw ex; - Trace(message + " received " + val); - return val; - } - } - - /// - /// Should exceptions include identifiable details? (key names, additional .Data annotations) - /// - public bool IncludeDetailInExceptions { get; set; } - - /// - /// Should exceptions include performance counter details? (CPU usage, etc - note that this can be problematic on some platforms) - /// - public bool IncludePerformanceCountersInExceptions { get; set; } - - internal int haveStormLog = 0; - internal string stormLogSnapshot; - /// - /// Limit at which to start recording unusual busy patterns (only one log will be retained at a time; - /// set to a negative value to disable this feature) - /// - public int StormLogThreshold { get; set; } = 15; - - /// - /// Obtains the log of unusual busy patterns - /// - public string GetStormLog() - { - return Volatile.Read(ref stormLogSnapshot); - } - /// - /// Resets the log of unusual busy patterns - /// - public void ResetStormLog() - { - Interlocked.Exchange(ref stormLogSnapshot, null); - Interlocked.Exchange(ref haveStormLog, 0); - } - - private long syncTimeouts, fireAndForgets, asyncTimeouts; - - internal void OnAsyncTimeout() => Interlocked.Increment(ref asyncTimeouts); - - /// - /// Request all compatible clients to reconfigure or reconnect - /// - /// The command flags to use.2 - /// The number of instances known to have received the message (however, the actual number can be higher; returns -1 if the operation is pending) - public long PublishReconfigure(CommandFlags flags = CommandFlags.None) - { - byte[] channel = ConfigurationChangedChannel; - if (channel == null) return 0; - if (ReconfigureIfNeeded(null, false, "PublishReconfigure", true, flags)) - { - return -1; - } - else - { - return PublishReconfigureImpl(flags); } + return quits; } - private long PublishReconfigureImpl(CommandFlags flags) - { - byte[] channel = ConfigurationChangedChannel; - if (channel == null) return 0; - return GetSubscriber().Publish(channel, RedisLiterals.Wildcard, flags); - } - - /// - /// Request all compatible clients to reconfigure or reconnect - /// - /// The command flags to use. - /// The number of instances known to have received the message (however, the actual number can be higher) - public Task PublishReconfigureAsync(CommandFlags flags = CommandFlags.None) - { - byte[] channel = ConfigurationChangedChannel; - if (channel == null) return CompletedTask.Default(null); - - return GetSubscriber().PublishAsync(channel, RedisLiterals.Wildcard, flags); - } - - /// - /// Get the hash-slot associated with a given key, if applicable; this can be useful for grouping operations - /// - /// The to determine the hash slot for. - public int GetHashSlot(RedisKey key) => ServerSelectionStrategy.HashSlot(key); - } - - internal enum WriteResult - { - Success, - NoConnectionAvailable, - TimeoutBeforeWrite, - WriteFailure, + long? IInternalConnectionMultiplexer.GetConnectionId(EndPoint endpoint, ConnectionType type) + => GetServerEndPoint(endpoint)?.GetBridge(type)?.ConnectionId; } } diff --git a/src/StackExchange.Redis/CursorEnumerable.cs b/src/StackExchange.Redis/CursorEnumerable.cs index fbe170e51..921d83ce0 100644 --- a/src/StackExchange.Redis/CursorEnumerable.cs +++ b/src/StackExchange.Redis/CursorEnumerable.cs @@ -10,19 +10,20 @@ namespace StackExchange.Redis { /// - /// Provides the ability to iterate over a cursor-based sequence of redis data, synchronously or asynchronously + /// Provides the ability to iterate over a cursor-based sequence of redis data, synchronously or asynchronously. /// + /// The type of the data in the cursor. internal abstract class CursorEnumerable : IEnumerable, IScanningCursor, IAsyncEnumerable { private readonly RedisBase redis; - private readonly ServerEndPoint server; + private readonly ServerEndPoint? server; private protected readonly int db; private protected readonly CommandFlags flags; private protected readonly int pageSize, initialOffset; private protected readonly RedisValue initialCursor; - private volatile IScanningCursor activeCursor; + private volatile IScanningCursor? activeCursor; - private protected CursorEnumerable(RedisBase redis, ServerEndPoint server, int db, int pageSize, in RedisValue cursor, int pageOffset, CommandFlags flags) + private protected CursorEnumerable(RedisBase redis, ServerEndPoint? server, int db, int pageSize, in RedisValue cursor, int pageOffset, CommandFlags flags) { if (pageOffset < 0) throw new ArgumentOutOfRangeException(nameof(pageOffset)); this.redis = redis; @@ -35,11 +36,12 @@ private protected CursorEnumerable(RedisBase redis, ServerEndPoint server, int d } /// - /// Gets an enumerator for the sequence + /// Gets an enumerator for the sequence. /// public Enumerator GetEnumerator() => new Enumerator(this, default); + /// - /// Gets an enumerator for the sequence + /// Gets an enumerator for the sequence. /// public Enumerator GetAsyncEnumerator(CancellationToken cancellationToken) => new Enumerator(this, cancellationToken); @@ -50,10 +52,10 @@ private protected CursorEnumerable(RedisBase redis, ServerEndPoint server, int d internal readonly struct ScanResult { public readonly RedisValue Cursor; - public readonly T[] ValuesOversized; + public readonly T[]? ValuesOversized; public readonly int Count; public readonly bool IsPooled; - public ScanResult(RedisValue cursor, T[] valuesOversized, int count, bool isPooled) + public ScanResult(RedisValue cursor, T[]? valuesOversized, int count, bool isPooled) { Cursor = cursor; ValuesOversized = valuesOversized; @@ -62,11 +64,11 @@ public ScanResult(RedisValue cursor, T[] valuesOversized, int count, bool isPool } } - private protected abstract Message CreateMessage(in RedisValue cursor); + private protected abstract Message? CreateMessage(in RedisValue cursor); - private protected abstract ResultProcessor Processor { get; } + private protected abstract ResultProcessor? Processor { get; } - private protected virtual Task GetNextPageAsync(IScanningCursor obj, RedisValue cursor, out Message message) + private protected virtual Task GetNextPageAsync(IScanningCursor obj, RedisValue cursor, out Message? message) { activeCursor = obj; message = CreateMessage(cursor); @@ -74,7 +76,7 @@ private protected virtual Task GetNextPageAsync(IScanningCursor obj, } /// - /// Provides the ability to iterate over a cursor-based sequence of redis data, synchronously or asynchronously + /// Provides the ability to iterate over a cursor-based sequence of redis data, synchronously or asynchronously. /// public class Enumerator : IEnumerator, IScanningCursor, IAsyncEnumerator { @@ -88,24 +90,26 @@ internal Enumerator(CursorEnumerable parent, CancellationToken cancellationTo } /// - /// Gets the current value of the enumerator + /// Gets the current value of the enumerator. /// - public T Current { + public T Current + { [MethodImpl(MethodImplOptions.AggressiveInlining)] get { - Debug.Assert(_pageOffset >= 0 & _pageOffset < _pageCount & _pageOversized.Length >= _pageCount); + Debug.Assert(_pageOffset >= 0 & _pageOffset < _pageCount & _pageOversized!.Length >= _pageCount); return _pageOversized[_pageOffset]; } } /// - /// Release all resources associated with this enumerator + /// Release all resources associated with this enumerator. /// public void Dispose() { _state = State.Disposed; SetComplete(); + GC.SuppressFinalize(this); } private void SetComplete() @@ -122,32 +126,33 @@ private void SetComplete() } /// - /// Release all resources associated with this enumerator + /// Release all resources associated with this enumerator. /// public ValueTask DisposeAsync() { Dispose(); + GC.SuppressFinalize(this); return default; } - - object IEnumerator.Current => _pageOversized[_pageOffset]; + object? IEnumerator.Current => _pageOversized![_pageOffset]; private bool SimpleNext() { if (_pageOffset + 1 < _pageCount) { + cancellationToken.ThrowIfCancellationRequested(); _pageOffset++; return true; } return false; } - private T[] _pageOversized; + private T[]? _pageOversized; private int _pageCount, _pageOffset, _pageIndex = -1; private bool _isPooled; - private Task _pending; - private Message _pendingMessage; + private Task? _pending; + private Message? _pendingMessage; private RedisValue _currentCursor, _nextCursor; private volatile State _state; @@ -163,13 +168,14 @@ private void ProcessReply(in ScanResult result, bool isInitial) { _currentCursor = _nextCursor; _nextCursor = result.Cursor; - _pageOffset = isInitial ? parent.initialOffset - 1 : -1; + _pageOffset = isInitial ? parent.initialOffset - 1 : -1; Recycle(ref _pageOversized, ref _isPooled); // recycle any existing data _pageOversized = result.ValuesOversized ?? Array.Empty(); _isPooled = result.IsPooled; _pageCount = result.Count; if (_nextCursor == RedisBase.CursorUtils.Origin) - { // eof + { + // EOF _pending = null; _pendingMessage = null; } @@ -181,15 +187,15 @@ private void ProcessReply(in ScanResult result, bool isInitial) } /// - /// Try to move to the next item in the sequence + /// Try to move to the next item in the sequence. /// public bool MoveNext() => SimpleNext() || SlowNextSync(); - bool SlowNextSync() + private bool SlowNextSync() { var pending = SlowNextAsync(); if (pending.IsCompletedSuccessfully) return pending.Result; - return Wait(pending.AsTask(), _pendingMessage); + return Wait(pending.AsTask(), _pendingMessage!); } private protected TResult Wait(Task pending, Message message) @@ -199,11 +205,11 @@ private protected TResult Wait(Task pending, Message message) } /// - /// Try to move to the next item in the sequence + /// Try to move to the next item in the sequence. /// public ValueTask MoveNextAsync() { - if(SimpleNext()) return new ValueTask(true); + if (SimpleNext()) return new ValueTask(true); return SlowNextAsync(); } @@ -219,8 +225,8 @@ private ValueTask SlowNextAsync() _state = State.Running; goto case State.Running; case State.Running: - Task pending; - while ((pending = _pending) != null & _state == State.Running) + Task? pending; + while ((pending = _pending) != null && _state == State.Running) { if (!pending.IsCompleted) return AwaitedNextAsync(isInitial); ProcessReply(pending.Result, isInitial); @@ -263,15 +269,15 @@ private void TryAppendExceptionState(Exception ex) private async ValueTask AwaitedNextAsync(bool isInitial) { - Task pending; - while ((pending = _pending) != null & _state == State.Running) + Task? pending; + while ((pending = _pending) != null && _state == State.Running) { ScanResult scanResult; try { - scanResult = await pending.ForAwait(); + scanResult = await pending.WaitAsync(cancellationToken).ForAwait(); } - catch(Exception ex) + catch (Exception ex) { TryAppendExceptionState(ex); throw; @@ -285,21 +291,26 @@ private async ValueTask AwaitedNextAsync(bool isInitial) return false; } - static void Recycle(ref T[] array, ref bool isPooled) + private static void Recycle(ref T[]? array, ref bool isPooled) { var tmp = array; array = null; if (tmp != null && tmp.Length != 0 && isPooled) + { ArrayPool.Shared.Return(tmp); + } isPooled = false; } /// - /// Reset the enumerator + /// Reset the enumerator. /// public void Reset() { - if (_state == State.Disposed) throw new ObjectDisposedException(GetType().Name); + if (_state == State.Disposed) + { + throw new ObjectDisposedException(GetType().Name); + } _nextCursor = _currentCursor = parent.initialCursor; _pageOffset = parent.initialOffset; // don't -1 here; this makes it look "right" before incremented _state = State.Initial; @@ -318,31 +329,31 @@ public void Reset() int IScanningCursor.PageOffset => _pageOffset; } - long IScanningCursor.Cursor // this may fail on cluster-proxy; I'm OK with this for now - { - get { var tmp = activeCursor; return tmp?.Cursor ?? (long)initialCursor; } - } + /// + /// The cursor position. + /// + /// + /// This may fail on cluster-proxy - I'm OK with this for now. + /// + long IScanningCursor.Cursor => activeCursor?.Cursor ?? (long)initialCursor; int IScanningCursor.PageSize => pageSize; - int IScanningCursor.PageOffset - { - get { var tmp = activeCursor; return tmp?.PageOffset ?? initialOffset; } - } + int IScanningCursor.PageOffset => activeCursor?.PageOffset ?? initialOffset; - internal static CursorEnumerable From(RedisBase redis, ServerEndPoint server, Task pending, int pageOffset) + internal static CursorEnumerable From(RedisBase redis, ServerEndPoint? server, Task pending, int pageOffset) => new SingleBlockEnumerable(redis, server, pending, pageOffset); - class SingleBlockEnumerable : CursorEnumerable + private sealed class SingleBlockEnumerable : CursorEnumerable { private readonly Task _pending; - public SingleBlockEnumerable(RedisBase redis, ServerEndPoint server, - Task pending, int pageOffset) : base(redis, server, 0, int.MaxValue, 0, pageOffset, default) + public SingleBlockEnumerable(RedisBase redis, ServerEndPoint? server, Task pending, int pageOffset) + : base(redis, server, 0, int.MaxValue, 0, pageOffset, default) { _pending = pending; } - private protected override Task GetNextPageAsync(IScanningCursor obj, RedisValue cursor, out Message message) + private protected override Task GetNextPageAsync(IScanningCursor obj, RedisValue cursor, out Message? message) { message = null; return AwaitedGetNextPageAsync(); @@ -352,8 +363,8 @@ private async Task AwaitedGetNextPageAsync() var arr = (await _pending.ForAwait()) ?? Array.Empty(); return new ScanResult(RedisBase.CursorUtils.Origin, arr, arr.Length, false); } - private protected override ResultProcessor Processor => null; - private protected override Message CreateMessage(in RedisValue cursor) => null; + private protected override ResultProcessor? Processor => null; + private protected override Message? CreateMessage(in RedisValue cursor) => null; } } } diff --git a/src/StackExchange.Redis/DebuggingAids.cs b/src/StackExchange.Redis/DebuggingAids.cs index 00f8dde90..46e81611f 100644 --- a/src/StackExchange.Redis/DebuggingAids.cs +++ b/src/StackExchange.Redis/DebuggingAids.cs @@ -1,10 +1,6 @@ -using System; -using System.Diagnostics; - -namespace StackExchange.Redis +namespace StackExchange.Redis { #if VERBOSE - partial class ConnectionMultiplexer { private readonly int epoch = Environment.TickCount; @@ -19,14 +15,6 @@ static partial void OnTraceWithoutContext(string message, string category) { Debug.WriteLine(message, Environment.CurrentManagedThreadId + " ~ " + category); } - - partial void OnTraceLog(LogProxy log, string caller) - { - lock (UniqueId) - { - Trace(log.ToString(), caller); // note that this won't always be useful, but we only do it in debug builds anyway - } - } } #endif diff --git a/src/StackExchange.Redis/EndPointCollection.cs b/src/StackExchange.Redis/EndPointCollection.cs index 99bd065a7..359f6d811 100644 --- a/src/StackExchange.Redis/EndPointCollection.cs +++ b/src/StackExchange.Redis/EndPointCollection.cs @@ -3,45 +3,56 @@ using System.Collections.Generic; using System.Collections.ObjectModel; using System.Net; +using System.Threading.Tasks; +using Microsoft.Extensions.Logging; namespace StackExchange.Redis { /// - /// A list of endpoints + /// A list of endpoints. /// public sealed class EndPointCollection : Collection, IEnumerable { + private static class DefaultPorts + { + public static int Standard => 6379; + public static int Ssl => 6380; + public static int Sentinel => 26379; + } + /// - /// Create a new EndPointCollection + /// Create a new . /// - public EndPointCollection() {} + public EndPointCollection() { } /// - /// Create a new EndPointCollection + /// Create a new . /// /// The endpoints to add to the collection. - public EndPointCollection(IList endpoints) : base(endpoints) {} + public EndPointCollection(IList endpoints) : base(endpoints) { } /// - /// Format an endpoint + /// Format an . /// /// The endpoint to get a string representation for. - public static string ToString(EndPoint endpoint) => Format.ToString(endpoint); + public static string ToString(EndPoint? endpoint) => Format.ToString(endpoint); /// - /// Attempt to parse a string into an EndPoint + /// Attempt to parse a string into an . /// /// The endpoint string to parse. - public static EndPoint TryParse(string endpoint) => Format.TryParseEndPoint(endpoint); + public static EndPoint? TryParse(string endpoint) => Format.TryParseEndPoint(endpoint, out var result) ? result : null; /// - /// Adds a new endpoint to the list + /// Adds a new endpoint to the list. /// /// The host:port string to add an endpoint for to the collection. public void Add(string hostAndPort) { - var endpoint = Format.TryParseEndPoint(hostAndPort); - if (endpoint == null) throw new ArgumentException(); + if (!Format.TryParseEndPoint(hostAndPort, out var endpoint)) + { + throw new ArgumentException($"Could not parse host and port from '{hostAndPort}'", nameof(hostAndPort)); + } Add(endpoint); } @@ -63,10 +74,13 @@ public void Add(string hostAndPort) /// Try adding a new endpoint to the list. /// /// The endpoint to add. - /// True if the endpoint was added or false if not. + /// if the endpoint was added, if not. public bool TryAdd(EndPoint endpoint) { - if (endpoint == null) throw new ArgumentNullException(nameof(endpoint)); + if (endpoint == null) + { + throw new ArgumentNullException(nameof(endpoint)); + } if (!Contains(endpoint)) { @@ -80,39 +94,60 @@ public bool TryAdd(EndPoint endpoint) } /// - /// See Collection<T>.InsertItem() + /// See . /// /// The index to add into the collection at. /// The item to insert at . protected override void InsertItem(int index, EndPoint item) { - if (item == null) throw new ArgumentNullException(nameof(item)); - if (Contains(item)) throw new ArgumentException("EndPoints must be unique", nameof(item)); + if (item == null) + { + throw new ArgumentNullException(nameof(item)); + } + if (Contains(item)) + { + throw new ArgumentException("EndPoints must be unique", nameof(item)); + } + base.InsertItem(index, item); } + /// - /// See Collection<T>.SetItem() + /// See . /// /// The index to replace an endpoint at. /// The item to replace the existing endpoint at . protected override void SetItem(int index, EndPoint item) { - if (item == null) throw new ArgumentNullException(nameof(item)); + if (item == null) + { + throw new ArgumentNullException(nameof(item)); + } int existingIndex; try { existingIndex = IndexOf(item); - } catch(NullReferenceException) + } + catch (NullReferenceException) { // mono has a nasty bug in DnsEndPoint.Equals; if they do bad things here: sorry, I can't help existingIndex = -1; } - if (existingIndex >= 0 && existingIndex != index) throw new ArgumentException("EndPoints must be unique", nameof(item)); + if (existingIndex >= 0 && existingIndex != index) + { + throw new ArgumentException("EndPoints must be unique", nameof(item)); + } base.SetItem(index, item); } - internal void SetDefaultPorts(int defaultPort) + internal void SetDefaultPorts(ServerType? serverType, bool ssl = false) { + int defaultPort = serverType switch + { + ServerType.Sentinel => DefaultPorts.Sentinel, + _ => ssl ? DefaultPorts.Ssl : DefaultPorts.Standard, + }; + for (int i = 0; i < Count; i++) { switch (this[i]) @@ -130,7 +165,7 @@ internal void SetDefaultPorts(int defaultPort) IEnumerator IEnumerable.GetEnumerator() => GetEnumerator(); IEnumerator IEnumerable.GetEnumerator() => GetEnumerator(); - /// + /// public new IEnumerator GetEnumerator() { // this does *not* need to handle all threading scenarios; but we do @@ -142,5 +177,58 @@ internal void SetDefaultPorts(int defaultPort) yield return this[i]; } } + + internal bool HasDnsEndPoints() + { + foreach (var endpoint in this) + { + if (endpoint is DnsEndPoint) + { + return true; + } + } + return false; + } + + internal async Task ResolveEndPointsAsync(ConnectionMultiplexer multiplexer, ILogger? log) + { + var cache = new Dictionary(StringComparer.OrdinalIgnoreCase); + for (int i = 0; i < Count; i++) + { + if (this[i] is DnsEndPoint dns) + { + try + { + if (dns.Host == ".") + { + this[i] = new IPEndPoint(IPAddress.Loopback, dns.Port); + } + else if (cache.TryGetValue(dns.Host, out IPAddress? ip)) + { // use cache + this[i] = new IPEndPoint(ip, dns.Port); + } + else + { + log?.LogInformationUsingDnsToResolve(dns.Host); + var ips = await Dns.GetHostAddressesAsync(dns.Host).ObserveErrors().ForAwait(); + if (ips.Length == 1) + { + ip = ips[0]; + log?.LogInformationDnsResolutionResult(dns.Host, ip); + cache[dns.Host] = ip; + this[i] = new IPEndPoint(ip, dns.Port); + } + } + } + catch (Exception ex) + { + multiplexer.OnInternalError(ex); + log?.LogErrorDnsResolution(ex, ex.Message); + } + } + } + } + + internal EndPointCollection Clone() => new EndPointCollection(new List(Items)); } } diff --git a/src/StackExchange.Redis/EndPointEventArgs.cs b/src/StackExchange.Redis/EndPointEventArgs.cs index 69be9bdf3..bef0db9b6 100644 --- a/src/StackExchange.Redis/EndPointEventArgs.cs +++ b/src/StackExchange.Redis/EndPointEventArgs.cs @@ -5,13 +5,14 @@ namespace StackExchange.Redis { /// - /// Event information related to redis endpoints + /// Event information related to redis endpoints. /// public class EndPointEventArgs : EventArgs, ICompletable { - private readonly EventHandler handler; + private readonly EventHandler? handler; private readonly object sender; - internal EndPointEventArgs(EventHandler handler, object sender, EndPoint endpoint) + + internal EndPointEventArgs(EventHandler? handler, object sender, EndPoint endpoint) { this.handler = handler; this.sender = sender; @@ -24,21 +25,17 @@ internal EndPointEventArgs(EventHandler handler, object sende /// The source of the event. /// Redis endpoint. public EndPointEventArgs(object sender, EndPoint endpoint) - : this (null, sender, endpoint) + : this(null, sender, endpoint) { } /// - /// The endpoint involved in this event (this can be null) + /// The endpoint involved in this event (this can be null). /// public EndPoint EndPoint { get; } - void ICompletable.AppendStormLog(StringBuilder sb) - { - sb.Append("event, endpoint: "); - if (EndPoint == null) sb.Append("n/a"); - else sb.Append(Format.ToString(EndPoint)); - } + void ICompletable.AppendStormLog(StringBuilder sb) => + sb.Append("event, endpoint: ").Append(EndPoint != null ? Format.ToString(EndPoint) : "n/a"); bool ICompletable.TryComplete(bool isAsync) => ConnectionMultiplexer.TryCompleteHandler(handler, sender, this, isAsync); } diff --git a/src/StackExchange.Redis/Enums/Aggregate.cs b/src/StackExchange.Redis/Enums/Aggregate.cs index 662eca989..41e1d435d 100644 --- a/src/StackExchange.Redis/Enums/Aggregate.cs +++ b/src/StackExchange.Redis/Enums/Aggregate.cs @@ -1,21 +1,23 @@ namespace StackExchange.Redis { /// - /// Specifies how elements should be aggregated when combining sorted sets + /// Specifies how elements should be aggregated when combining sorted sets. /// public enum Aggregate { /// - /// The values of the combined elements are added + /// The values of the combined elements are added. /// Sum, + /// - /// The least value of the combined elements is used + /// The least value of the combined elements is used. /// Min, + /// - /// The greatest value of the combined elements is used + /// The greatest value of the combined elements is used. /// - Max + Max, } } diff --git a/src/StackExchange.Redis/Enums/Bitwise.cs b/src/StackExchange.Redis/Enums/Bitwise.cs index ebaaceee9..82e70b38a 100644 --- a/src/StackExchange.Redis/Enums/Bitwise.cs +++ b/src/StackExchange.Redis/Enums/Bitwise.cs @@ -9,17 +9,45 @@ public enum Bitwise /// And /// And, + /// /// Or /// Or, + /// /// Xor /// Xor, + /// /// Not /// - Not + Not, + + /// + /// DIFF operation: members of X that are not members of any of Y1, Y2, ... + /// Equivalent to X ∧ ¬(Y1 ∨ Y2 ∨ ...) + /// + Diff, + + /// + /// DIFF1 operation: members of one or more of Y1, Y2, ... that are not members of X + /// Equivalent to ¬X ∧ (Y1 ∨ Y2 ∨ ...) + /// + Diff1, + + /// + /// ANDOR operation: members of X that are also members of one or more of Y1, Y2, ... + /// Equivalent to X ∧ (Y1 ∨ Y2 ∨ ...) + /// + AndOr, + + /// + /// ONE operation: members of exactly one of X1, X2, ... + /// For two bitmaps this is equivalent to XOR. For more than two bitmaps, + /// this returns bits that are set in exactly one of the input bitmaps. + /// + One, } } diff --git a/src/StackExchange.Redis/Enums/ClientFlags.cs b/src/StackExchange.Redis/Enums/ClientFlags.cs index cf87a2b3a..eb687bba6 100644 --- a/src/StackExchange.Redis/Enums/ClientFlags.cs +++ b/src/StackExchange.Redis/Enums/ClientFlags.cs @@ -5,84 +5,174 @@ namespace StackExchange.Redis { /// /// The client flags can be a combination of: - /// O: the client is a replica in MONITOR mode - /// S: the client is a normal replica server - /// M: the client is a master - /// x: the client is in a MULTI/EXEC context - /// b: the client is waiting in a blocking operation - /// i: the client is waiting for a VM I/O (deprecated) - /// d: a watched keys has been modified - EXEC will fail - /// c: connection to be closed after writing entire reply - /// u: the client is unblocked - /// A: connection to be closed ASAP - /// N: no specific flag set + /// + /// + /// A + /// Connection to be closed ASAP. + /// + /// + /// b + /// The client is waiting in a blocking operation. + /// + /// + /// c + /// Connection to be closed after writing entire reply. + /// + /// + /// d + /// A watched keys has been modified - EXEC will fail. + /// + /// + /// i + /// The client is waiting for a VM I/O (deprecated). + /// + /// + /// M + /// The client is a primary. + /// + /// + /// N + /// No specific flag set. + /// + /// + /// O + /// The client is a replica in MONITOR mode. + /// + /// + /// P + /// The client is a Pub/Sub subscriber. + /// + /// + /// r + /// The client is in readonly mode against a cluster node. + /// + /// + /// S + /// The client is a normal replica server. + /// + /// + /// u + /// The client is unblocked. + /// + /// + /// U + /// The client is unblocked. + /// + /// + /// x + /// The client is in a MULTI/EXEC context. + /// + /// + /// t + /// The client enabled keys tracking in order to perform client side caching. + /// + /// + /// R + /// The client tracking target client is invalid. + /// + /// + /// B + /// The client enabled broadcast tracking mode. + /// + /// /// + /// [Flags] + [System.Diagnostics.CodeAnalysis.SuppressMessage("Design", "CA1069:Enums values should not be duplicated", Justification = "Compatibility")] public enum ClientFlags : long { /// - /// no specific flag set + /// No specific flag set. /// None = 0, + /// - /// the client is a replica in MONITOR mode + /// The client is a replica in MONITOR mode. /// - [Obsolete("Starting with Redis version 5, Redis has moved to 'replica' terminology. Please use " + nameof(ReplicaMonitor) + " instead.")] + [Obsolete("Starting with Redis version 5, Redis has moved to 'replica' terminology. Please use " + nameof(ReplicaMonitor) + " instead, this will be removed in 3.0.")] [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] SlaveMonitor = 1, + /// - /// the client is a replica in MONITOR mode + /// The client is a replica in MONITOR mode. /// ReplicaMonitor = 1, // as an implementation detail, note that enum.ToString on [Flags] prefers *later* options when naming Flags + /// - /// the client is a normal replica server + /// The client is a normal replica server. /// - [Obsolete("Starting with Redis version 5, Redis has moved to 'replica' terminology. Please use " + nameof(Replica) + " instead.")] + [Obsolete("Starting with Redis version 5, Redis has moved to 'replica' terminology. Please use " + nameof(Replica) + " instead, this will be removed in 3.0.")] [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] Slave = 2, + /// - /// the client is a normal replica server + /// The client is a normal replica server. /// Replica = 2, // as an implementation detail, note that enum.ToString on [Flags] prefers *later* options when naming Flags + /// - /// the client is a master + /// The client is a primary. /// Master = 4, + /// - /// the client is in a MULTI/EXEC context + /// The client is in a MULTI/EXEC context. /// Transaction = 8, + /// - /// the client is waiting in a blocking operation + /// The client is waiting in a blocking operation. /// Blocked = 16, + /// - /// a watched keys has been modified - EXEC will fail + /// A watched keys has been modified - EXEC will fail. /// TransactionDoomed = 32, + /// - /// connection to be closed after writing entire reply + /// Connection to be closed after writing entire reply. /// Closing = 64, + /// - /// the client is unblocked + /// The client is unblocked. /// Unblocked = 128, + /// - /// connection to be closed ASAP + /// Connection to be closed ASAP. /// CloseASAP = 256, + /// - /// the client is a Pub/Sub subscriber + /// The client is a Pub/Sub subscriber. /// PubSubSubscriber = 512, + /// - /// the client is in readonly mode against a cluster node + /// The client is in readonly mode against a cluster node. /// ReadOnlyCluster = 1024, + /// - /// the client is connected via a Unix domain socket + /// The client is connected via a Unix domain socket. /// UnixDomainSocket = 2048, + + /// + /// The client enabled keys tracking in order to perform client side caching. + /// + KeysTracking = 4096, + + /// + /// The client tracking target client is invalid. + /// + TrackingTargetInvalid = 8192, + + /// + /// The client enabled broadcast tracking mode. + /// + BroadcastTracking = 16384, } } diff --git a/src/StackExchange.Redis/Enums/ClientType.cs b/src/StackExchange.Redis/Enums/ClientType.cs index 2bf8a1c1e..c2b003d9a 100644 --- a/src/StackExchange.Redis/Enums/ClientType.cs +++ b/src/StackExchange.Redis/Enums/ClientType.cs @@ -4,26 +4,30 @@ namespace StackExchange.Redis { /// - /// The class of the connection + /// The class of the connection. /// + [System.Diagnostics.CodeAnalysis.SuppressMessage("Design", "CA1069:Enums values should not be duplicated", Justification = "Compatibility")] public enum ClientType { /// - /// Regular connections, including MONITOR connections + /// Regular connections, including MONITOR connections. /// Normal = 0, + /// - /// Replication connections + /// Replication connections. /// - Replica = 1, // / as an implementation detail, note that enum.ToString without [Flags] preferes *earlier* values + Replica = 1, // as an implementation detail, note that enum.ToString without [Flags] prefers *earlier* values + /// - /// Replication connections + /// Replication connections. /// - [Obsolete("Starting with Redis version 5, Redis has moved to 'replica' terminology. Please use " + nameof(Replica) + " instead.")] + [Obsolete("Starting with Redis version 5, Redis has moved to 'replica' terminology. Please use " + nameof(Replica) + " instead, this will be removed in 3.0.")] [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] Slave = 1, + /// - /// Subscription connections + /// Subscription connections. /// PubSub = 2, } diff --git a/src/StackExchange.Redis/Enums/CommandFlags.cs b/src/StackExchange.Redis/Enums/CommandFlags.cs index dffbad6f1..83331a3c5 100644 --- a/src/StackExchange.Redis/Enums/CommandFlags.cs +++ b/src/StackExchange.Redis/Enums/CommandFlags.cs @@ -3,10 +3,11 @@ namespace StackExchange.Redis { - /// - /// Behaviour markers associated with a given command + /// + /// Behaviour markers associated with a given command. /// [Flags] + [System.Diagnostics.CodeAnalysis.SuppressMessage("Design", "CA1069:Enums values should not be duplicated", Justification = "Compatibility")] public enum CommandFlags { /// @@ -15,10 +16,12 @@ public enum CommandFlags None = 0, /// - /// From 2.0, this flag is not used + /// From 2.0, this flag is not used. /// - [Obsolete("From 2.0, this flag is not used", false)] + [Obsolete("From 2.0, this flag is not used, this will be removed in 3.0.", false)] + [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] HighPriority = 1, + /// /// The caller is not interested in the result; the caller will immediately receive a default-value /// of the expected return type (this value is not indicative of anything at the server). @@ -26,50 +29,71 @@ public enum CommandFlags FireAndForget = 2, /// - /// This operation should be performed on the master if it is available, but read operations may - /// be performed on a replica if no master is available. This is the default option. + /// This operation should be performed on the primary if it is available, but read operations may + /// be performed on a replica if no primary is available. This is the default option. /// PreferMaster = 0, +#if NET8_0_OR_GREATER /// - /// This operation should only be performed on the master. + /// This operation should be performed on the replica if it is available, but will be performed on + /// a primary if no replicas are available. Suitable for read operations only. /// - DemandMaster = 4, - + [Obsolete("Starting with Redis version 5, Redis has moved to 'replica' terminology. Please use " + nameof(PreferReplica) + " instead, this will be removed in 3.0.")] + [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] + PreferSlave = 8, +#endif + /// + /// This operation should only be performed on the primary. + /// + DemandMaster = 4, +#if !NET8_0_OR_GREATER /// /// This operation should be performed on the replica if it is available, but will be performed on - /// a master if no replicas are available. Suitable for read operations only. + /// a primary if no replicas are available. Suitable for read operations only. /// - [Obsolete("Starting with Redis version 5, Redis has moved to 'replica' terminology. Please use " + nameof(PreferReplica) + " instead.")] + [Obsolete("Starting with Redis version 5, Redis has moved to 'replica' terminology. Please use " + nameof(PreferReplica) + " instead, this will be removed in 3.0.")] [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] PreferSlave = 8, +#endif /// /// This operation should be performed on the replica if it is available, but will be performed on - /// a master if no replicas are available. Suitable for read operations only. + /// a primary if no replicas are available. Suitable for read operations only. /// PreferReplica = 8, // note: we're using a 2-bit set here, which [Flags] formatting hates; position is doing the best we can for reasonable outcomes here +#if NET8_0_OR_GREATER + /// + /// This operation should only be performed on a replica. Suitable for read operations only. + /// + [Obsolete("Starting with Redis version 5, Redis has moved to 'replica' terminology. Please use " + nameof(DemandReplica) + " instead, this will be removed in 3.0.")] + [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] + DemandSlave = 12, +#endif + /// /// This operation should only be performed on a replica. Suitable for read operations only. /// DemandReplica = 12, // note: we're using a 2-bit set here, which [Flags] formatting hates; position is doing the best we can for reasonable outcomes here +#if !NET8_0_OR_GREATER /// /// This operation should only be performed on a replica. Suitable for read operations only. /// - [Obsolete("Starting with Redis version 5, Redis has moved to 'replica' terminology. Please use " + nameof(DemandReplica) + " instead.")] + [Obsolete("Starting with Redis version 5, Redis has moved to 'replica' terminology. Please use " + nameof(DemandReplica) + " instead, this will be removed in 3.0.")] [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] DemandSlave = 12, +#endif // 16: reserved for additional "demand/prefer" options // 32: used for "asking" flag; never user-specified, so not visible on the public API /// - /// Indicates that this operation should not be forwarded to other servers as a result of an ASK or MOVED response + /// Indicates that this operation should not be forwarded to other servers as a result of an ASK or MOVED response. /// NoRedirect = 64, @@ -78,10 +102,12 @@ public enum CommandFlags // 256: used for "script unavailable"; never user-specified, so not visible on the public API /// - /// Indicates that script-related operations should use EVAL, not SCRIPT LOAD + EVALSHA + /// Indicates that script-related operations should use EVAL, not SCRIPT LOAD + EVALSHA. /// NoScriptCache = 512, - // 1024: used for timed-out; never user-specified, so not visible on the public API + // 1024: Removed - was used for async timeout checks; never user-specified, so not visible on the public API + + // 2048: Use subscription connection type; never user-specified, so not visible on the public API } } diff --git a/src/StackExchange.Redis/Enums/CommandStatus.cs b/src/StackExchange.Redis/Enums/CommandStatus.cs index 550e2e8aa..c4de5753d 100644 --- a/src/StackExchange.Redis/Enums/CommandStatus.cs +++ b/src/StackExchange.Redis/Enums/CommandStatus.cs @@ -1,21 +1,28 @@ namespace StackExchange.Redis { /// - /// track status of a command while communicating with Redis + /// Track status of a command while communicating with Redis. /// public enum CommandStatus { /// - /// command status unknown + /// Command status unknown. /// Unknown, + /// - /// ConnectionMultiplexer has not yet started writing this command to redis + /// ConnectionMultiplexer has not yet started writing this command to Redis. /// WaitingToBeSent, + /// - /// command has been sent to Redis + /// Command has been sent to Redis. /// Sent, + + /// + /// Command is in the backlog, waiting to be processed and written to Redis. + /// + WaitingInBacklog, } } diff --git a/src/StackExchange.Redis/Enums/ConnectionFailureType.cs b/src/StackExchange.Redis/Enums/ConnectionFailureType.cs index 9213de8b0..55eeacef6 100644 --- a/src/StackExchange.Redis/Enums/ConnectionFailureType.cs +++ b/src/StackExchange.Redis/Enums/ConnectionFailureType.cs @@ -1,49 +1,63 @@ namespace StackExchange.Redis { /// - /// The known types of connection failure + /// The known types of connection failure. /// public enum ConnectionFailureType { /// - /// This event is not a failure + /// This event is not a failure. /// None, + /// - /// No viable connections were available for this operation + /// No viable connections were available for this operation. /// UnableToResolvePhysicalConnection, + /// - /// The socket for this connection failed + /// The socket for this connection failed. /// SocketFailure, + /// - /// Either SSL Stream or Redis authentication failed + /// Either SSL Stream or Redis authentication failed. /// AuthenticationFailure, + /// - /// An unexpected response was received from the server + /// An unexpected response was received from the server. /// ProtocolFailure, + /// - /// An unknown internal error occurred + /// An unknown internal error occurred. /// InternalFailure, + /// - /// The socket was closed + /// The socket was closed. /// SocketClosed, + /// - /// The socket was closed + /// The socket was closed. /// ConnectionDisposed, + /// - /// The database is loading and is not available for use + /// The database is loading and is not available for use. /// Loading, + /// - /// It has not been possible to create an intial connection to the redis server(s) + /// It has not been possible to create an initial connection to the redis server(s). /// - UnableToConnect + UnableToConnect, + + /// + /// High-integrity mode was enabled, and a failure was detected. + /// + ResponseIntegrityFailure, } } diff --git a/src/StackExchange.Redis/Enums/ConnectionType.cs b/src/StackExchange.Redis/Enums/ConnectionType.cs index efba46677..8db655c08 100644 --- a/src/StackExchange.Redis/Enums/ConnectionType.cs +++ b/src/StackExchange.Redis/Enums/ConnectionType.cs @@ -1,21 +1,23 @@ namespace StackExchange.Redis { /// - /// The type of a connection + /// The type of a connection. /// public enum ConnectionType { /// - /// Not connection-type related + /// Not connection-type related. /// None = 0, + /// - /// An interactive connection handles request/response commands for accessing data on demand + /// An interactive connection handles request/response commands for accessing data on demand. /// Interactive, + /// - /// A subscriber connection recieves unsolicted messages from the server as pub/sub events occur + /// A subscriber connection receives unsolicited messages from the server as pub/sub events occur. /// - Subscription + Subscription, } } diff --git a/src/StackExchange.Redis/Enums/Exclude.cs b/src/StackExchange.Redis/Enums/Exclude.cs index b8a9db0b2..912a2af95 100644 --- a/src/StackExchange.Redis/Enums/Exclude.cs +++ b/src/StackExchange.Redis/Enums/Exclude.cs @@ -4,26 +4,29 @@ namespace StackExchange.Redis { /// /// When performing a range query, by default the start / stop limits are inclusive; - /// however, both can also be specified separately as exclusive + /// however, both can also be specified separately as exclusive. /// [Flags] public enum Exclude { /// - /// Both start and stop are inclusive + /// Both start and stop are inclusive. /// None = 0, + /// - /// Start is exclusive, stop is inclusive + /// Start is exclusive, stop is inclusive. /// Start = 1, + /// - /// Start is inclusive, stop is exclusive + /// Start is inclusive, stop is exclusive. /// Stop = 2, + /// - /// Both start and stop are exclusive + /// Both start and stop are exclusive. /// - Both = Start | Stop + Both = Start | Stop, } } diff --git a/src/StackExchange.Redis/Enums/ExpireResult.cs b/src/StackExchange.Redis/Enums/ExpireResult.cs new file mode 100644 index 000000000..6211492e6 --- /dev/null +++ b/src/StackExchange.Redis/Enums/ExpireResult.cs @@ -0,0 +1,27 @@ +namespace StackExchange.Redis; + +/// +/// Specifies the result of operation to set expire time. +/// +public enum ExpireResult +{ + /// + /// Field deleted because the specified expiration time is due. + /// + Due = 2, + + /// + /// Expiration time/duration updated successfully. + /// + Success = 1, + + /// + /// Expiration not set because of a specified NX | XX | GT | LT condition not met. + /// + ConditionNotMet = 0, + + /// + /// No such field. + /// + NoSuchField = -2, +} diff --git a/src/StackExchange.Redis/Enums/ExpireWhen.cs b/src/StackExchange.Redis/Enums/ExpireWhen.cs new file mode 100644 index 000000000..2637e7625 --- /dev/null +++ b/src/StackExchange.Redis/Enums/ExpireWhen.cs @@ -0,0 +1,46 @@ +using System; + +namespace StackExchange.Redis; + +/// +/// Specifies when to set the expiry for a key. +/// +public enum ExpireWhen +{ + /// + /// Set expiry whether or not there is an existing expiry. + /// + Always, + + /// + /// Set expiry only when the new expiry is greater than current one. + /// + GreaterThanCurrentExpiry, + + /// + /// Set expiry only when the key has an existing expiry. + /// + HasExpiry, + + /// + /// Set expiry only when the key has no expiry. + /// + HasNoExpiry, + + /// + /// Set expiry only when the new expiry is less than current one. + /// + LessThanCurrentExpiry, +} + +internal static class ExpiryOptionExtensions +{ + internal static RedisValue ToLiteral(this ExpireWhen op) => op switch + { + ExpireWhen.HasNoExpiry => RedisLiterals.NX, + ExpireWhen.HasExpiry => RedisLiterals.XX, + ExpireWhen.GreaterThanCurrentExpiry => RedisLiterals.GT, + ExpireWhen.LessThanCurrentExpiry => RedisLiterals.LT, + _ => throw new ArgumentOutOfRangeException(nameof(op)), + }; +} diff --git a/src/StackExchange.Redis/Enums/ExportOptions.cs b/src/StackExchange.Redis/Enums/ExportOptions.cs index c6891b537..594651955 100644 --- a/src/StackExchange.Redis/Enums/ExportOptions.cs +++ b/src/StackExchange.Redis/Enums/ExportOptions.cs @@ -3,34 +3,39 @@ namespace StackExchange.Redis { /// - /// Which settings to export + /// Which settings to export. /// [Flags] public enum ExportOptions { /// - /// No options + /// No options. /// None = 0, + /// - /// The output of INFO + /// The output of INFO. /// Info = 1, + /// - /// The output of CONFIG GET * + /// The output of CONFIG GET *. /// Config = 2, + /// - /// The output of CLIENT LIST + /// The output of CLIENT LIST. /// Client = 4, + /// - /// The output of CLUSTER NODES + /// The output of CLUSTER NODES. /// Cluster = 8, + /// - /// Everything available + /// Everything available. /// - All = -1 + All = -1, } } diff --git a/src/StackExchange.Redis/Enums/GeoUnit.cs b/src/StackExchange.Redis/Enums/GeoUnit.cs index a88e2deec..99ab0a143 100644 --- a/src/StackExchange.Redis/Enums/GeoUnit.cs +++ b/src/StackExchange.Redis/Enums/GeoUnit.cs @@ -1,28 +1,42 @@ -using System; -using System.ComponentModel; +using System; namespace StackExchange.Redis { /// - /// Units associated with Geo Commands + /// Units associated with Geo Commands. /// public enum GeoUnit { /// - /// Meters + /// Meters. /// Meters, + /// - /// Kilometers + /// Kilometers. /// Kilometers, + /// - /// Miles + /// Miles. /// Miles, + /// - /// Feet + /// Feet. /// - Feet + Feet, + } + + internal static class GeoUnitExtensions + { + internal static RedisValue ToLiteral(this GeoUnit unit) => unit switch + { + GeoUnit.Feet => RedisLiterals.ft, + GeoUnit.Kilometers => RedisLiterals.km, + GeoUnit.Meters => RedisLiterals.m, + GeoUnit.Miles => RedisLiterals.mi, + _ => throw new ArgumentOutOfRangeException(nameof(unit)), + }; } -} \ No newline at end of file +} diff --git a/src/StackExchange.Redis/Enums/ListSide.cs b/src/StackExchange.Redis/Enums/ListSide.cs new file mode 100644 index 000000000..8d326a8af --- /dev/null +++ b/src/StackExchange.Redis/Enums/ListSide.cs @@ -0,0 +1,30 @@ +using System; + +namespace StackExchange.Redis +{ + /// + /// Specifies what side of the list to refer to. + /// + public enum ListSide + { + /// + /// The head of the list. + /// + Left, + + /// + /// The tail of the list. + /// + Right, + } + + internal static class ListSideExtensions + { + internal static RedisValue ToLiteral(this ListSide side) => side switch + { + ListSide.Left => RedisLiterals.LEFT, + ListSide.Right => RedisLiterals.RIGHT, + _ => throw new ArgumentOutOfRangeException(nameof(side)), + }; + } +} diff --git a/src/StackExchange.Redis/Enums/MigrateOptions.cs b/src/StackExchange.Redis/Enums/MigrateOptions.cs index 68095bd77..fbfdaa731 100644 --- a/src/StackExchange.Redis/Enums/MigrateOptions.cs +++ b/src/StackExchange.Redis/Enums/MigrateOptions.cs @@ -3,22 +3,24 @@ namespace StackExchange.Redis { /// - /// Additional options for the MIGRATE command + /// Additional options for the MIGRATE command. /// [Flags] public enum MigrateOptions { /// - /// No options specified + /// No options specified. /// None = 0, + /// /// Do not remove the key from the local instance. /// Copy = 1, + /// /// Replace existing key on the remote instance. /// - Replace = 2 + Replace = 2, } } diff --git a/src/StackExchange.Redis/Enums/Order.cs b/src/StackExchange.Redis/Enums/Order.cs index 34f4cb36f..99d989006 100644 --- a/src/StackExchange.Redis/Enums/Order.cs +++ b/src/StackExchange.Redis/Enums/Order.cs @@ -1,17 +1,30 @@ -namespace StackExchange.Redis +using System; + +namespace StackExchange.Redis { /// - /// The direction in which to sequence elements + /// The direction in which to sequence elements. /// public enum Order { /// - /// Ordered from low values to high values + /// Ordered from low values to high values. /// Ascending, + /// - /// Ordered from high values to low values + /// Ordered from high values to low values. /// - Descending + Descending, + } + + internal static class OrderExtensions + { + internal static RedisValue ToLiteral(this Order order) => order switch + { + Order.Ascending => RedisLiterals.ASC, + Order.Descending => RedisLiterals.DESC, + _ => throw new ArgumentOutOfRangeException(nameof(order)), + }; } } diff --git a/src/StackExchange.Redis/Enums/PersistResult.cs b/src/StackExchange.Redis/Enums/PersistResult.cs new file mode 100644 index 000000000..91fdf9fa7 --- /dev/null +++ b/src/StackExchange.Redis/Enums/PersistResult.cs @@ -0,0 +1,22 @@ +namespace StackExchange.Redis; + +/// +/// Specifies the result of operation to remove the expire time. +/// +public enum PersistResult +{ + /// + /// Expiration removed successfully. + /// + Success = 1, + + /// + /// Expiration not removed because of a specified NX | XX | GT | LT condition not met. + /// + ConditionNotMet = -1, + + /// + /// No such field. + /// + NoSuchField = -2, +} diff --git a/src/StackExchange.Redis/Enums/PositionKind.cs b/src/StackExchange.Redis/Enums/PositionKind.cs index e52b7a17d..81a705090 100644 --- a/src/StackExchange.Redis/Enums/PositionKind.cs +++ b/src/StackExchange.Redis/Enums/PositionKind.cs @@ -4,6 +4,6 @@ internal enum PositionKind { Beginning = 0, Explicit = 1, - New = 2 + New = 2, } } diff --git a/src/StackExchange.Redis/Enums/Proxy.cs b/src/StackExchange.Redis/Enums/Proxy.cs index ed87ba495..9dc1d3770 100644 --- a/src/StackExchange.Redis/Enums/Proxy.cs +++ b/src/StackExchange.Redis/Enums/Proxy.cs @@ -1,17 +1,56 @@ namespace StackExchange.Redis { /// - /// Specifies the proxy that is being used to communicate to redis + /// Specifies the proxy that is being used to communicate to redis. /// public enum Proxy { /// - /// Direct communication to the redis server(s) + /// Direct communication to the redis server(s). /// None, + /// - /// Communication via twemproxy + /// Communication via twemproxy. /// - Twemproxy + Twemproxy, + + /// + /// Communication via envoyproxy. + /// + Envoyproxy, + } + + internal static class ProxyExtensions + { + /// + /// Whether a proxy supports databases (e.g. database > 0). + /// + internal static bool SupportsDatabases(this Proxy proxy) => proxy switch + { + Proxy.Twemproxy => false, + Proxy.Envoyproxy => false, + _ => true, + }; + + /// + /// Whether a proxy supports pub/sub. + /// + internal static bool SupportsPubSub(this Proxy proxy) => proxy switch + { + Proxy.Twemproxy => false, + Proxy.Envoyproxy => false, + _ => true, + }; + + /// + /// Whether a proxy supports the ConnectionMultiplexer.GetServer. + /// + internal static bool SupportsServerApi(this Proxy proxy) => proxy switch + { + Proxy.Twemproxy => false, + Proxy.Envoyproxy => false, + _ => true, + }; } } diff --git a/src/StackExchange.Redis/Enums/RedisCommand.cs b/src/StackExchange.Redis/Enums/RedisCommand.cs index 3f9eeab45..2a1c7695e 100644 --- a/src/StackExchange.Redis/Enums/RedisCommand.cs +++ b/src/StackExchange.Redis/Enums/RedisCommand.cs @@ -1,216 +1,569 @@ -namespace StackExchange.Redis +using System; +using RESPite; + +namespace StackExchange.Redis; + +// ReSharper disable InconsistentNaming +internal enum RedisCommand +{ + NONE, // must be first for "zero reasons" + + APPEND, + ASKING, + AUTH, + + BGREWRITEAOF, + BGSAVE, + BITCOUNT, + BITOP, + BITPOS, + BLPOP, + BRPOP, + BRPOPLPUSH, + + CLIENT, + CLUSTER, + CONFIG, + COPY, + COMMAND, + + DBSIZE, + DEBUG, + DECR, + DECRBY, + DEL, + DELEX, + DIGEST, + DISCARD, + DUMP, + + ECHO, + EVAL, + EVALSHA, + EVAL_RO, + EVALSHA_RO, + EXEC, + EXISTS, + EXPIRE, + EXPIREAT, + EXPIRETIME, + + FLUSHALL, + FLUSHDB, + + GEOADD, + GEODIST, + GEOHASH, + GEOPOS, + GEORADIUS, + GEORADIUSBYMEMBER, + GEOSEARCH, + GEOSEARCHSTORE, + + GET, + GETBIT, + GETDEL, + GETEX, + GETRANGE, + GETSET, + + HDEL, + HELLO, + HEXISTS, + HEXPIRE, + HEXPIREAT, + HEXPIRETIME, + HGET, + HGETEX, + HGETDEL, + HGETALL, + HINCRBY, + HINCRBYFLOAT, + HKEYS, + HLEN, + HMGET, + HMSET, + HOTKEYS, + HPERSIST, + HPEXPIRE, + HPEXPIREAT, + HPEXPIRETIME, + HPTTL, + HRANDFIELD, + HSCAN, + HSET, + HSETEX, + HSETNX, + HSTRLEN, + HVALS, + + INCR, + INCRBY, + INCRBYFLOAT, + INFO, + + KEYS, + + LASTSAVE, + LATENCY, + LCS, + LINDEX, + LINSERT, + LLEN, + LMOVE, + LMPOP, + LPOP, + LPOS, + LPUSH, + LPUSHX, + LRANGE, + LREM, + LSET, + LTRIM, + + MEMORY, + MGET, + MIGRATE, + MONITOR, + MOVE, + MSET, + MSETEX, + MSETNX, + MULTI, + + OBJECT, + + PERSIST, + PEXPIRE, + PEXPIREAT, + PEXPIRETIME, + PFADD, + PFCOUNT, + PFMERGE, + PING, + PSETEX, + PSUBSCRIBE, + PTTL, + PUBLISH, + PUBSUB, + PUNSUBSCRIBE, + + QUIT, + + RANDOMKEY, + READONLY, + READWRITE, + RENAME, + RENAMENX, + REPLICAOF, + RESTORE, + ROLE, + RPOP, + RPOPLPUSH, + RPUSH, + RPUSHX, + + SADD, + SAVE, + SCAN, + SCARD, + SCRIPT, + SDIFF, + SDIFFSTORE, + SELECT, + SENTINEL, + SET, + SETBIT, + SETEX, + SETNX, + SETRANGE, + SHUTDOWN, + SINTER, + SINTERCARD, + SINTERSTORE, + SISMEMBER, + SLAVEOF, + SLOWLOG, + SMEMBERS, + SMISMEMBER, + SMOVE, + SORT, + SORT_RO, + SPOP, + SPUBLISH, + SRANDMEMBER, + SREM, + STRLEN, + SUBSCRIBE, + SUNION, + SUNIONSTORE, + SSCAN, + SSUBSCRIBE, + SUNSUBSCRIBE, + SWAPDB, + SYNC, + + TIME, + TOUCH, + TTL, + TYPE, + + UNLINK, + UNSUBSCRIBE, + UNWATCH, + + VADD, + VCARD, + VDIM, + VEMB, + VGETATTR, + VINFO, + VISMEMBER, + VLINKS, + VRANDMEMBER, + VRANGE, + VREM, + VSETATTR, + VSIM, + + WATCH, + + XACK, + XACKDEL, + XADD, + XAUTOCLAIM, + XCLAIM, + XCFGSET, + XDEL, + XDELEX, + XGROUP, + XINFO, + XLEN, + XPENDING, + XRANGE, + XREAD, + XREADGROUP, + XREVRANGE, + XTRIM, + + ZADD, + ZCARD, + ZCOUNT, + ZDIFF, + ZDIFFSTORE, + ZINCRBY, + ZINTER, + ZINTERCARD, + ZINTERSTORE, + ZLEXCOUNT, + ZMPOP, + ZMSCORE, + ZPOPMAX, + ZPOPMIN, + ZRANDMEMBER, + ZRANGE, + ZRANGEBYLEX, + ZRANGEBYSCORE, + ZRANGESTORE, + ZRANK, + ZREM, + ZREMRANGEBYLEX, + ZREMRANGEBYRANK, + ZREMRANGEBYSCORE, + ZREVRANGE, + ZREVRANGEBYLEX, + ZREVRANGEBYSCORE, + ZREVRANK, + ZSCAN, + ZSCORE, + ZUNION, + ZUNIONSTORE, + + UNKNOWN, +} + +internal static partial class RedisCommandMetadata +{ + [AsciiHash(CaseSensitive = false)] + public static partial bool TryParseCI(ReadOnlySpan command, out RedisCommand value); + + [AsciiHash(CaseSensitive = false)] + public static partial bool TryParseCI(ReadOnlySpan command, out RedisCommand value); +} + +// ReSharper restore InconsistentNaming +internal static class RedisCommandExtensions { - internal enum RedisCommand + /// + /// Gets whether a given command can be issued only to a primary, or if any server is eligible. + /// + /// The to check. + /// if the command is primary-only, otherwise. + [System.Diagnostics.CodeAnalysis.SuppressMessage("Style", "IDE0066:Convert switch statement to expression", Justification = "No, it'd be ridiculous.")] + internal static bool IsPrimaryOnly(this RedisCommand command) { - NONE, // must be first for "zero reasons" - - APPEND, - ASKING, - AUTH, - - BGREWRITEAOF, - BGSAVE, - BITCOUNT, - BITOP, - BITPOS, - BLPOP, - BRPOP, - BRPOPLPUSH, - - CLIENT, - CLUSTER, - CONFIG, - - DBSIZE, - DEBUG, - DECR, - DECRBY, - DEL, - DISCARD, - DUMP, - - ECHO, - EVAL, - EVALSHA, - EXEC, - EXISTS, - EXPIRE, - EXPIREAT, - - FLUSHALL, - FLUSHDB, - - GEOADD, - GEODIST, - GEOHASH, - GEOPOS, - GEORADIUS, - GEORADIUSBYMEMBER, - - GET, - GETBIT, - GETRANGE, - GETSET, - - HDEL, - HEXISTS, - HGET, - HGETALL, - HINCRBY, - HINCRBYFLOAT, - HKEYS, - HLEN, - HMGET, - HMSET, - HSCAN, - HSET, - HSETNX, - HSTRLEN, - HVALS, - - INCR, - INCRBY, - INCRBYFLOAT, - INFO, - - KEYS, - - LASTSAVE, - LATENCY, - LINDEX, - LINSERT, - LLEN, - LPOP, - LPUSH, - LPUSHX, - LRANGE, - LREM, - LSET, - LTRIM, - - MEMORY, - MGET, - MIGRATE, - MONITOR, - MOVE, - MSET, - MSETNX, - MULTI, - - OBJECT, - - PERSIST, - PEXPIRE, - PEXPIREAT, - PFADD, - PFCOUNT, - PFMERGE, - PING, - PSETEX, - PSUBSCRIBE, - PTTL, - PUBLISH, - PUBSUB, - PUNSUBSCRIBE, - - QUIT, - - RANDOMKEY, - READONLY, - READWRITE, - RENAME, - RENAMENX, - REPLICAOF, - RESTORE, - ROLE, - RPOP, - RPOPLPUSH, - RPUSH, - RPUSHX, - - SADD, - SAVE, - SCAN, - SCARD, - SCRIPT, - SDIFF, - SDIFFSTORE, - SELECT, - SENTINEL, - SET, - SETBIT, - SETEX, - SETNX, - SETRANGE, - SHUTDOWN, - SINTER, - SINTERSTORE, - SISMEMBER, - SLAVEOF, - SLOWLOG, - SMEMBERS, - SMOVE, - SORT, - SPOP, - SRANDMEMBER, - SREM, - STRLEN, - SUBSCRIBE, - SUNION, - SUNIONSTORE, - SSCAN, - SWAPDB, - SYNC, - - TIME, - TOUCH, - TTL, - TYPE, - - UNLINK, - UNSUBSCRIBE, - UNWATCH, - - WATCH, - - XACK, - XADD, - XCLAIM, - XDEL, - XGROUP, - XINFO, - XLEN, - XPENDING, - XRANGE, - XREAD, - XREADGROUP, - XREVRANGE, - XTRIM, - - ZADD, - ZCARD, - ZCOUNT, - ZINCRBY, - ZINTERSTORE, - ZLEXCOUNT, - ZPOPMAX, - ZPOPMIN, - ZRANGE, - ZRANGEBYLEX, - ZRANGEBYSCORE, - ZRANK, - ZREM, - ZREMRANGEBYLEX, - ZREMRANGEBYRANK, - ZREMRANGEBYSCORE, - ZREVRANGE, - ZREVRANGEBYLEX, - ZREVRANGEBYSCORE, - ZREVRANK, - ZSCAN, - ZSCORE, - ZUNIONSTORE, - - UNKNOWN, + switch (command) + { + // Commands that can only be issued to a primary (writable) server + // If a command *may* be writable (e.g. an EVAL script), it should *not* be primary-only + // because that'd block a legitimate use case of a read-only script on replica servers, + // for example spreading load via a .DemandReplica flag in the caller. + // Basically: would it fail on a read-only replica in 100% of cases? Then it goes in the list. + case RedisCommand.APPEND: + case RedisCommand.BITOP: + case RedisCommand.BLPOP: + case RedisCommand.BRPOP: + case RedisCommand.BRPOPLPUSH: + case RedisCommand.DECR: + case RedisCommand.DECRBY: + case RedisCommand.DEL: + case RedisCommand.DELEX: + case RedisCommand.DIGEST: + case RedisCommand.EXPIRE: + case RedisCommand.EXPIREAT: + case RedisCommand.FLUSHALL: + case RedisCommand.FLUSHDB: + case RedisCommand.GEOSEARCHSTORE: + case RedisCommand.GETDEL: + case RedisCommand.GETEX: + case RedisCommand.GETSET: + case RedisCommand.HDEL: + case RedisCommand.HEXPIRE: + case RedisCommand.HEXPIREAT: + case RedisCommand.HGETDEL: + case RedisCommand.HGETEX: + case RedisCommand.HINCRBY: + case RedisCommand.HINCRBYFLOAT: + case RedisCommand.HMSET: + case RedisCommand.HPERSIST: + case RedisCommand.HPEXPIRE: + case RedisCommand.HPEXPIREAT: + case RedisCommand.HSET: + case RedisCommand.HSETEX: + case RedisCommand.HSETNX: + case RedisCommand.INCR: + case RedisCommand.INCRBY: + case RedisCommand.INCRBYFLOAT: + case RedisCommand.LINSERT: + case RedisCommand.LMOVE: + case RedisCommand.LMPOP: + case RedisCommand.LPOP: + case RedisCommand.LPUSH: + case RedisCommand.LPUSHX: + case RedisCommand.LREM: + case RedisCommand.LSET: + case RedisCommand.LTRIM: + case RedisCommand.MIGRATE: + case RedisCommand.MOVE: + case RedisCommand.MSET: + case RedisCommand.MSETEX: + case RedisCommand.MSETNX: + case RedisCommand.PERSIST: + case RedisCommand.PEXPIRE: + case RedisCommand.PEXPIREAT: + case RedisCommand.PFADD: + case RedisCommand.PFMERGE: + case RedisCommand.PSETEX: + case RedisCommand.RENAME: + case RedisCommand.RENAMENX: + case RedisCommand.RESTORE: + case RedisCommand.RPOP: + case RedisCommand.RPOPLPUSH: + case RedisCommand.RPUSH: + case RedisCommand.RPUSHX: + case RedisCommand.SADD: + case RedisCommand.SDIFFSTORE: + case RedisCommand.SET: + case RedisCommand.SETBIT: + case RedisCommand.SETEX: + case RedisCommand.SETNX: + case RedisCommand.SETRANGE: + case RedisCommand.SINTERSTORE: + case RedisCommand.SMOVE: + case RedisCommand.SPOP: + case RedisCommand.SREM: + case RedisCommand.SUNIONSTORE: + case RedisCommand.SWAPDB: + case RedisCommand.TOUCH: + case RedisCommand.UNLINK: + case RedisCommand.VADD: + case RedisCommand.VREM: + case RedisCommand.VSETATTR: + case RedisCommand.XAUTOCLAIM: + case RedisCommand.XCFGSET: + case RedisCommand.ZADD: + case RedisCommand.ZDIFFSTORE: + case RedisCommand.ZINTERSTORE: + case RedisCommand.ZINCRBY: + case RedisCommand.ZMPOP: + case RedisCommand.ZPOPMAX: + case RedisCommand.ZPOPMIN: + case RedisCommand.ZRANGESTORE: + case RedisCommand.ZREM: + case RedisCommand.ZREMRANGEBYLEX: + case RedisCommand.ZREMRANGEBYRANK: + case RedisCommand.ZREMRANGEBYSCORE: + case RedisCommand.ZUNIONSTORE: + return true; + // Commands that can be issued anywhere + case RedisCommand.NONE: + case RedisCommand.ASKING: + case RedisCommand.AUTH: + case RedisCommand.BGREWRITEAOF: + case RedisCommand.BGSAVE: + case RedisCommand.BITCOUNT: + case RedisCommand.BITPOS: + case RedisCommand.CLIENT: + case RedisCommand.CLUSTER: + case RedisCommand.COMMAND: + case RedisCommand.CONFIG: + case RedisCommand.DBSIZE: + case RedisCommand.DEBUG: + case RedisCommand.DISCARD: + case RedisCommand.DUMP: + case RedisCommand.ECHO: + case RedisCommand.EVAL: + case RedisCommand.EVALSHA: + case RedisCommand.EVAL_RO: + case RedisCommand.EVALSHA_RO: + case RedisCommand.EXEC: + case RedisCommand.EXISTS: + case RedisCommand.EXPIRETIME: + case RedisCommand.GEODIST: + case RedisCommand.GEOHASH: + case RedisCommand.GEOPOS: + case RedisCommand.GEORADIUS: + case RedisCommand.GEORADIUSBYMEMBER: + case RedisCommand.GEOSEARCH: + case RedisCommand.GET: + case RedisCommand.GETBIT: + case RedisCommand.GETRANGE: + case RedisCommand.HELLO: + case RedisCommand.HEXISTS: + case RedisCommand.HEXPIRETIME: + case RedisCommand.HGET: + case RedisCommand.HGETALL: + case RedisCommand.HKEYS: + case RedisCommand.HLEN: + case RedisCommand.HMGET: + case RedisCommand.HOTKEYS: + case RedisCommand.HPEXPIRETIME: + case RedisCommand.HPTTL: + case RedisCommand.HRANDFIELD: + case RedisCommand.HSCAN: + case RedisCommand.HSTRLEN: + case RedisCommand.HVALS: + case RedisCommand.INFO: + case RedisCommand.KEYS: + case RedisCommand.LASTSAVE: + case RedisCommand.LATENCY: + case RedisCommand.LCS: + case RedisCommand.LINDEX: + case RedisCommand.LLEN: + case RedisCommand.LPOS: + case RedisCommand.LRANGE: + case RedisCommand.MEMORY: + case RedisCommand.MGET: + case RedisCommand.MONITOR: + case RedisCommand.MULTI: + case RedisCommand.OBJECT: + case RedisCommand.PEXPIRETIME: + case RedisCommand.PFCOUNT: + case RedisCommand.PING: + case RedisCommand.PSUBSCRIBE: + case RedisCommand.PTTL: + case RedisCommand.PUBLISH: + case RedisCommand.PUBSUB: + case RedisCommand.PUNSUBSCRIBE: + case RedisCommand.QUIT: + case RedisCommand.RANDOMKEY: + case RedisCommand.READONLY: + case RedisCommand.READWRITE: + case RedisCommand.REPLICAOF: + case RedisCommand.ROLE: + case RedisCommand.SAVE: + case RedisCommand.SCAN: + case RedisCommand.SCARD: + case RedisCommand.SCRIPT: + case RedisCommand.SDIFF: + case RedisCommand.SELECT: + case RedisCommand.SENTINEL: + case RedisCommand.SHUTDOWN: + case RedisCommand.SINTER: + case RedisCommand.SINTERCARD: + case RedisCommand.SISMEMBER: + case RedisCommand.SLAVEOF: + case RedisCommand.SLOWLOG: + case RedisCommand.SMEMBERS: + case RedisCommand.SMISMEMBER: + case RedisCommand.SORT_RO: + case RedisCommand.SPUBLISH: + case RedisCommand.SRANDMEMBER: + case RedisCommand.SSUBSCRIBE: + case RedisCommand.STRLEN: + case RedisCommand.SUBSCRIBE: + case RedisCommand.SUNION: + case RedisCommand.SUNSUBSCRIBE: + case RedisCommand.SSCAN: + case RedisCommand.SYNC: + case RedisCommand.TIME: + case RedisCommand.TTL: + case RedisCommand.TYPE: + case RedisCommand.UNSUBSCRIBE: + case RedisCommand.UNWATCH: + case RedisCommand.WATCH: + case RedisCommand.XINFO: + case RedisCommand.XLEN: + case RedisCommand.XPENDING: + case RedisCommand.XRANGE: + case RedisCommand.XREAD: + case RedisCommand.XREVRANGE: + case RedisCommand.ZCARD: + case RedisCommand.ZCOUNT: + case RedisCommand.ZDIFF: + case RedisCommand.ZINTER: + case RedisCommand.ZINTERCARD: + case RedisCommand.ZLEXCOUNT: + case RedisCommand.ZMSCORE: + case RedisCommand.ZRANDMEMBER: + case RedisCommand.ZRANGE: + case RedisCommand.ZRANGEBYLEX: + case RedisCommand.ZRANGEBYSCORE: + case RedisCommand.ZRANK: + case RedisCommand.ZREVRANGE: + case RedisCommand.ZREVRANGEBYLEX: + case RedisCommand.ZREVRANGEBYSCORE: + case RedisCommand.ZREVRANK: + case RedisCommand.ZSCAN: + case RedisCommand.ZSCORE: + case RedisCommand.ZUNION: + case RedisCommand.UNKNOWN: + case RedisCommand.VCARD: + case RedisCommand.VDIM: + case RedisCommand.VEMB: + case RedisCommand.VGETATTR: + case RedisCommand.VINFO: + case RedisCommand.VISMEMBER: + case RedisCommand.VLINKS: + case RedisCommand.VRANDMEMBER: + case RedisCommand.VRANGE: + case RedisCommand.VSIM: + // Writable commands, but allowed for the writable-replicas scenario + case RedisCommand.COPY: + case RedisCommand.GEOADD: + case RedisCommand.SORT: + case RedisCommand.XACK: + case RedisCommand.XACKDEL: + case RedisCommand.XADD: + case RedisCommand.XCLAIM: + case RedisCommand.XDEL: + case RedisCommand.XDELEX: + case RedisCommand.XGROUP: + case RedisCommand.XREADGROUP: + case RedisCommand.XTRIM: + return false; + default: + throw new ArgumentOutOfRangeException(nameof(command), $"Every RedisCommand must be defined in Message.IsPrimaryOnly, unknown command '{command}' encountered."); + } } } diff --git a/src/StackExchange.Redis/Enums/RedisType.cs b/src/StackExchange.Redis/Enums/RedisType.cs index 54d49cd03..90a41165b 100644 --- a/src/StackExchange.Redis/Enums/RedisType.cs +++ b/src/StackExchange.Redis/Enums/RedisType.cs @@ -1,52 +1,75 @@ namespace StackExchange.Redis { /// - /// The intrinsinc data-types supported by redis + /// The intrinsic data-types supported by redis. /// - /// https://redis.io/topics/data-types + /// public enum RedisType { /// - /// The specified key does not exist + /// The specified key does not exist. /// None, + /// - /// Strings are the most basic kind of Redis value. Redis Strings are binary safe, this means that a Redis string can contain any kind of data, for instance a JPEG image or a serialized Ruby object. + /// Strings are the most basic kind of Redis value. Redis Strings are binary safe, this means that + /// a Redis string can contain any kind of data, for instance a JPEG image or a serialized Ruby object. /// A String value can be at max 512 Megabytes in length. /// - /// https://redis.io/commands#string + /// String, + /// - /// Redis Lists are simply lists of strings, sorted by insertion order. It is possible to add elements to a Redis List pushing new elements on the head (on the left) or on the tail (on the right) of the list. + /// Redis Lists are simply lists of strings, sorted by insertion order. + /// It is possible to add elements to a Redis List pushing new elements on the head (on the left) or + /// on the tail (on the right) of the list. /// - /// https://redis.io/commands#list + /// List, + /// - /// Redis Sets are an unordered collection of Strings. It is possible to add, remove, and test for existence of members in O(1) (constant time regardless of the number of elements contained inside the Set). - /// Redis Sets have the desirable property of not allowing repeated members. Adding the same element multiple times will result in a set having a single copy of this element. Practically speaking this means that adding a member does not require a check if exists then add operation. + /// Redis Sets are an unordered collection of Strings. It is possible to add, remove, and test for + /// existence of members in O(1) (constant time regardless of the number of elements contained inside the Set). + /// Redis Sets have the desirable property of not allowing repeated members. + /// Adding the same element multiple times will result in a set having a single copy of this element. + /// Practically speaking this means that adding a member does not require a check if exists then add operation. /// - /// https://redis.io/commands#set + /// Set, + /// - /// Redis Sorted Sets are, similarly to Redis Sets, non repeating collections of Strings. The difference is that every member of a Sorted Set is associated with score, that is used in order to take the sorted set ordered, from the smallest to the greatest score. While members are unique, scores may be repeated. + /// Redis Sorted Sets are, similarly to Redis Sets, non repeating collections of Strings. + /// The difference is that every member of a Sorted Set is associated with score, that is used + /// in order to take the sorted set ordered, from the smallest to the greatest score. + /// While members are unique, scores may be repeated. /// - /// https://redis.io/commands#sorted_set + /// SortedSet, + /// - /// Redis Hashes are maps between string fields and string values, so they are the perfect data type to represent objects (eg: A User with a number of fields like name, surname, age, and so forth) + /// Redis Hashes are maps between string fields and string values, so they are the perfect data type + /// to represent objects (e.g. A User with a number of fields like name, surname, age, and so forth). /// - /// https://redis.io/commands#hash + /// Hash, + /// /// A Redis Stream is a data structure which models the behavior of an append only log but it has more /// advanced features for manipulating the data contained within the stream. Each entry in a /// stream contains a unique message ID and a list of name/value pairs containing the entry's data. /// - /// https://redis.io/commands#stream + /// Stream, + /// - /// The data-type was not recognised by the client library + /// The data-type was not recognised by the client library. /// Unknown, + + /// + /// Vector sets are a data type similar to sorted sets, but instead of a score, + /// vector set elements have a string representation of a vector. + /// + VectorSet, } } diff --git a/src/StackExchange.Redis/Enums/ReplicationChangeOptions.cs b/src/StackExchange.Redis/Enums/ReplicationChangeOptions.cs index 63412c594..897ebbb6c 100644 --- a/src/StackExchange.Redis/Enums/ReplicationChangeOptions.cs +++ b/src/StackExchange.Redis/Enums/ReplicationChangeOptions.cs @@ -4,35 +4,41 @@ namespace StackExchange.Redis { /// - /// Additional operations to perform when making a server a master + /// Additional operations to perform when making a server a primary. /// [Flags] + [System.Diagnostics.CodeAnalysis.SuppressMessage("Design", "CA1069:Enums values should not be duplicated", Justification = "Compatibility")] public enum ReplicationChangeOptions { /// - /// No additional operations + /// No additional operations. /// None = 0, + /// - /// Set the tie-breaker key on all available masters, to specify this server + /// Set the tie-breaker key on all available primaries, to specify this server. /// SetTiebreaker = 1, + /// - /// Broadcast to the pub-sub channel to listening clients to reconfigure themselves + /// Broadcast to the pub-sub channel to listening clients to reconfigure themselves. /// Broadcast = 2, + /// - /// Issue a REPLICAOF to all other known nodes, making this this master of all + /// Issue a REPLICAOF to all other known nodes, making this primary of all. /// - [Obsolete("Starting with Redis version 5, Redis has moved to 'replica' terminology. Please use " + nameof(ReplicateToOtherEndpoints) + " instead.")] + [Obsolete("Starting with Redis version 5, Redis has moved to 'replica' terminology. Please use " + nameof(ReplicateToOtherEndpoints) + " instead, this will be removed in 3.0.")] [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] EnslaveSubordinates = 4, + /// - /// Issue a REPLICAOF to all other known nodes, making this this master of all + /// Issue a REPLICAOF to all other known nodes, making this primary of all. /// ReplicateToOtherEndpoints = 4, // note ToString prefers *later* options + /// - /// All additional operations + /// All additional operations. /// All = SetTiebreaker | Broadcast | ReplicateToOtherEndpoints, } diff --git a/src/StackExchange.Redis/Enums/ResultType.cs b/src/StackExchange.Redis/Enums/ResultType.cs index e9a8c4e17..63e267a91 100644 --- a/src/StackExchange.Redis/Enums/ResultType.cs +++ b/src/StackExchange.Redis/Enums/ResultType.cs @@ -1,33 +1,108 @@ -namespace StackExchange.Redis +using System; +using System.ComponentModel; + +namespace StackExchange.Redis { /// - /// The underlying result type as defined by redis + /// The underlying result type as defined by Redis. /// public enum ResultType : byte { /// - /// No value was received + /// No value was received. /// None = 0, + + // RESP 2 + /// - /// Basic strings typically represent status results such as "OK" + /// Basic strings typically represent status results such as "OK". /// SimpleString = 1, + /// - /// Error strings represent invalid operation results from the server + /// Error strings represent invalid operation results from the server. /// Error = 2, + /// - /// Integers are returned for count operations and some integer-based increment operations + /// Integers are returned for count operations and some integer-based increment operations. /// Integer = 3, + /// - /// Bulk strings represent typical user content values + /// Bulk strings represent typical user content values. /// BulkString = 4, + /// - /// Multi-bulk replies represent complex results such as arrays + /// Array of results (former Multi-bulk). /// - MultiBulk = 5 + Array = 5, + + /// + /// Multi-bulk replies represent complex results such as arrays. + /// + [Obsolete("Please use " + nameof(Array))] + [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] + MultiBulk = 5, + + // RESP3: https://github.com/redis/redis-specifications/blob/master/protocol/RESP3.md + + // note: we will arrange the values such as the last 3 bits are the RESP2 equivalent, + // and then we count up from there + + /// + /// A single null value replacing RESP v2 blob and multi-bulk nulls. + /// + Null = (1 << 3) | None, + + /// + /// True or false. + /// + Boolean = (1 << 3) | Integer, + + /// + /// A floating point number. + /// + Double = (1 << 3) | SimpleString, + + /// + /// A large number non representable by the type. + /// + BigInteger = (2 << 3) | SimpleString, + + /// + /// Binary safe error code and message. + /// + BlobError = (1 << 3) | Error, + + /// + /// A binary safe string that should be displayed to humans without any escaping or filtering. For instance the output of LATENCY DOCTOR in Redis. + /// + VerbatimString = (1 << 3) | BulkString, + + /// + /// An unordered collection of key-value pairs. Keys and values can be any other RESP3 type. + /// + Map = (1 << 3) | Array, + + /// + /// An unordered collection of N other types. + /// + Set = (2 << 3) | Array, + + /// + /// Like the type, but the client should keep reading the reply ignoring the attribute type, and return it to the client as additional information. + /// + Attribute = (3 << 3) | Array, + + /// + /// Out of band data. The format is like the type, but the client should just check the first string element, + /// stating the type of the out of band data, a call a callback if there is one registered for this specific type of push information. + /// Push types are not related to replies, since they are information that the server may push at any time in the connection, + /// so the client should keep reading if it is reading the reply of a command. + /// + Push = (4 << 3) | Array, } } diff --git a/src/StackExchange.Redis/Enums/RetransmissionReasonType.cs b/src/StackExchange.Redis/Enums/RetransmissionReasonType.cs index 7fdf3847e..6bd9d43e6 100644 --- a/src/StackExchange.Redis/Enums/RetransmissionReasonType.cs +++ b/src/StackExchange.Redis/Enums/RetransmissionReasonType.cs @@ -13,16 +13,18 @@ public enum RetransmissionReasonType { /// - /// No stated reason + /// No stated reason. /// None = 0, + /// - /// Issued to investigate which node owns a key + /// Issued to investigate which node owns a key. /// Ask, + /// - /// A node has indicated that it does *not* own the given key + /// A node has indicated that it does *not* own the given key. /// - Moved + Moved, } } diff --git a/src/StackExchange.Redis/Enums/SaveType.cs b/src/StackExchange.Redis/Enums/SaveType.cs index 39325f3d7..5296d110e 100644 --- a/src/StackExchange.Redis/Enums/SaveType.cs +++ b/src/StackExchange.Redis/Enums/SaveType.cs @@ -3,25 +3,32 @@ namespace StackExchange.Redis { /// - /// The type of save operation to perform + /// The type of save operation to perform. /// public enum SaveType { /// - /// Instruct Redis to start an Append Only File rewrite process. The rewrite will create a small optimized version of the current Append Only File. + /// Instruct Redis to start an Append Only File rewrite process. + /// The rewrite will create a small optimized version of the current Append Only File. /// - /// https://redis.io/commands/bgrewriteaof + /// BackgroundRewriteAppendOnlyFile, + /// - /// Save the DB in background. The OK code is immediately returned. Redis forks, the parent continues to serve the clients, the child saves the DB on disk then exits. A client my be able to check if the operation succeeded using the LASTSAVE command. + /// Save the DB in background. The OK code is immediately returned. + /// Redis forks, the parent continues to serve the clients, the child saves the DB on disk then exits. + /// A client my be able to check if the operation succeeded using the LASTSAVE command. /// - /// https://redis.io/commands/bgsave + /// BackgroundSave, + /// - /// Save the DB in foreground. This is almost never a good thing to do, and could cause significant blocking. Only do this if you know you need to save + /// Save the DB in foreground. + /// This is almost never a good thing to do, and could cause significant blocking. + /// Only do this if you know you need to save. /// - /// https://redis.io/commands/save + /// [Obsolete("Saving on the foreground can cause significant blocking; use with extreme caution")] - ForegroundSave + ForegroundSave, } } diff --git a/src/StackExchange.Redis/Enums/ServerType.cs b/src/StackExchange.Redis/Enums/ServerType.cs index 80072f34a..ef49a8449 100644 --- a/src/StackExchange.Redis/Enums/ServerType.cs +++ b/src/StackExchange.Redis/Enums/ServerType.cs @@ -1,25 +1,55 @@ namespace StackExchange.Redis { /// - /// Indicates the flavor of a particular redis server + /// Indicates the flavor of a particular redis server. /// public enum ServerType { /// - /// Classic redis-server server + /// Classic redis-server server. /// Standalone, + /// - /// Monitoring/configuration redis-sentinel server + /// Monitoring/configuration redis-sentinel server. /// Sentinel, + /// - /// Distributed redis-cluster server + /// Distributed redis-cluster server. /// Cluster, + /// - /// Distributed redis installation via twemproxy + /// Distributed redis installation via twemproxy. /// - Twemproxy + Twemproxy, + + /// + /// Redis cluster via envoyproxy. + /// + Envoyproxy, + } + + internal static class ServerTypeExtensions + { + /// + /// Whether a server type can have only a single primary, meaning an election if multiple are found. + /// + internal static bool HasSinglePrimary(this ServerType type) => type switch + { + ServerType.Envoyproxy => false, + _ => true, + }; + + /// + /// Whether a server type supports . + /// + internal static bool SupportsAutoConfigure(this ServerType type) => type switch + { + ServerType.Twemproxy => false, + ServerType.Envoyproxy => false, + _ => true, + }; } } diff --git a/src/StackExchange.Redis/Enums/SetOperation.cs b/src/StackExchange.Redis/Enums/SetOperation.cs index fdb1acda4..a529d348e 100644 --- a/src/StackExchange.Redis/Enums/SetOperation.cs +++ b/src/StackExchange.Redis/Enums/SetOperation.cs @@ -1,7 +1,9 @@ -namespace StackExchange.Redis +using System; + +namespace StackExchange.Redis { /// - /// Describes an algebraic set operation that can be performed to combine multiple sets + /// Describes an algebraic set operation that can be performed to combine multiple sets. /// public enum SetOperation { @@ -9,13 +11,29 @@ public enum SetOperation /// Returns the members of the set resulting from the union of all the given sets. /// Union, + /// /// Returns the members of the set resulting from the intersection of all the given sets. /// Intersect, + /// /// Returns the members of the set resulting from the difference between the first set and all the successive sets. /// - Difference + Difference, + } + + internal static class SetOperationExtensions + { + internal static RedisCommand ToCommand(this SetOperation operation, bool store) => operation switch + { + SetOperation.Intersect when store => RedisCommand.ZINTERSTORE, + SetOperation.Intersect => RedisCommand.ZINTER, + SetOperation.Union when store => RedisCommand.ZUNIONSTORE, + SetOperation.Union => RedisCommand.ZUNION, + SetOperation.Difference when store => RedisCommand.ZDIFFSTORE, + SetOperation.Difference => RedisCommand.ZDIFF, + _ => throw new ArgumentOutOfRangeException(nameof(operation)), + }; } } diff --git a/src/StackExchange.Redis/Enums/ShutdownMode.cs b/src/StackExchange.Redis/Enums/ShutdownMode.cs index 3a0abacc3..a8b701ea8 100644 --- a/src/StackExchange.Redis/Enums/ShutdownMode.cs +++ b/src/StackExchange.Redis/Enums/ShutdownMode.cs @@ -1,21 +1,23 @@ namespace StackExchange.Redis { /// - /// Defines the persistence behaviour of the server during shutdown + /// Defines the persistence behaviour of the server during shutdown. /// public enum ShutdownMode { /// - /// The data is persisted if save points are configured + /// The data is persisted if save points are configured. /// Default, + /// - /// The data is NOT persisted even if save points are configured + /// The data is NOT persisted even if save points are configured. /// Never, + /// - /// The data is persisted even if save points are NOT configured + /// The data is persisted even if save points are NOT configured. /// - Always + Always, } } diff --git a/src/StackExchange.Redis/Enums/SimulatedFailureType.cs b/src/StackExchange.Redis/Enums/SimulatedFailureType.cs new file mode 100644 index 000000000..7f2968eca --- /dev/null +++ b/src/StackExchange.Redis/Enums/SimulatedFailureType.cs @@ -0,0 +1,22 @@ +using System; + +namespace StackExchange.Redis +{ + [Flags] + internal enum SimulatedFailureType + { + None = 0, + InteractiveInbound = 1 << 0, + InteractiveOutbound = 1 << 1, + SubscriptionInbound = 1 << 2, + SubscriptionOutbound = 1 << 3, + + AllInbound = InteractiveInbound | SubscriptionInbound, + AllOutbound = InteractiveOutbound | SubscriptionOutbound, + + AllInteractive = InteractiveInbound | InteractiveOutbound, + AllSubscription = SubscriptionInbound | SubscriptionOutbound, + + All = AllInbound | AllOutbound, + } +} diff --git a/src/StackExchange.Redis/Enums/SortType.cs b/src/StackExchange.Redis/Enums/SortType.cs index a1a034fc6..48a3596b6 100644 --- a/src/StackExchange.Redis/Enums/SortType.cs +++ b/src/StackExchange.Redis/Enums/SortType.cs @@ -1,17 +1,19 @@ namespace StackExchange.Redis { /// - /// Specifies how to compare elements for sorting + /// Specifies how to compare elements for sorting. /// public enum SortType { /// - /// Elements are interpreted as a double-precision floating point number and sorted numerically + /// Elements are interpreted as a double-precision floating point number and sorted numerically. /// Numeric, + /// - /// Elements are sorted using their alphabetic form (Redis is UTF-8 aware as long as the !LC_COLLATE environment variable is set at the server) + /// Elements are sorted using their alphabetic form + /// (Redis is UTF-8 aware as long as the !LC_COLLATE environment variable is set at the server). /// - Alphabetic + Alphabetic, } } diff --git a/src/StackExchange.Redis/Enums/SortedSetOrder.cs b/src/StackExchange.Redis/Enums/SortedSetOrder.cs new file mode 100644 index 000000000..474cd3612 --- /dev/null +++ b/src/StackExchange.Redis/Enums/SortedSetOrder.cs @@ -0,0 +1,32 @@ +namespace StackExchange.Redis; + +/// +/// Enum to manage ordering in sorted sets. +/// +public enum SortedSetOrder +{ + /// + /// Bases ordering off of the rank in the sorted set. This means that your start and stop inside the sorted set will be some offset into the set. + /// + ByRank, + + /// + /// Bases ordering off of the score in the sorted set. This means your start/stop will be some number which is the score for each member in the sorted set. + /// + ByScore, + + /// + /// Bases ordering off of lexicographical order, this is only appropriate in an instance where all the members of your sorted set are given the same score. + /// + ByLex, +} + +internal static class SortedSetOrderByExtensions +{ + internal static RedisValue GetLiteral(this SortedSetOrder sortedSetOrder) => sortedSetOrder switch + { + SortedSetOrder.ByLex => RedisLiterals.BYLEX, + SortedSetOrder.ByScore => RedisLiterals.BYSCORE, + _ => RedisValue.Null, + }; +} diff --git a/src/StackExchange.Redis/Enums/SortedSetWhen.cs b/src/StackExchange.Redis/Enums/SortedSetWhen.cs new file mode 100644 index 000000000..517aaeaa5 --- /dev/null +++ b/src/StackExchange.Redis/Enums/SortedSetWhen.cs @@ -0,0 +1,56 @@ +using System; + +namespace StackExchange.Redis +{ + /// + /// Indicates when this operation should be performed (only some variations are legal in a given context). + /// + [Flags] + public enum SortedSetWhen + { + /// + /// The operation won't be prevented. + /// + Always = 0, + + /// + /// The operation should only occur when there is an existing value. + /// + Exists = 1 << 0, + + /// + /// The operation should only occur when the new score is greater than the current score. + /// + GreaterThan = 1 << 1, + + /// + /// The operation should only occur when the new score is less than the current score. + /// + LessThan = 1 << 2, + + /// + /// The operation should only occur when there is not an existing value. + /// + NotExists = 1 << 3, + } + + internal static class SortedSetWhenExtensions + { + internal static uint CountBits(this SortedSetWhen when) + { + uint v = (uint)when; + v -= (v >> 1) & 0x55555555; // reuse input as temporary + v = (v & 0x33333333) + ((v >> 2) & 0x33333333); // temp + uint c = ((v + (v >> 4) & 0xF0F0F0F) * 0x1010101) >> 24; // count + return c; + } + + internal static SortedSetWhen Parse(When when) => when switch + { + When.Always => SortedSetWhen.Always, + When.Exists => SortedSetWhen.Exists, + When.NotExists => SortedSetWhen.NotExists, + _ => throw new ArgumentOutOfRangeException(nameof(when)), + }; + } +} diff --git a/src/StackExchange.Redis/Enums/StreamTrimMode.cs b/src/StackExchange.Redis/Enums/StreamTrimMode.cs new file mode 100644 index 000000000..2033e8414 --- /dev/null +++ b/src/StackExchange.Redis/Enums/StreamTrimMode.cs @@ -0,0 +1,24 @@ +namespace StackExchange.Redis; + +/// +/// Determines how stream trimming works. +/// +public enum StreamTrimMode +{ + /// + /// Trims the stream according to the specified policy (MAXLEN or MINID) regardless of whether entries are referenced by any consumer groups, but preserves existing references to these entries in all consumer groups' PEL. + /// + KeepReferences = 0, + + /// + /// Trims the stream according to the specified policy and also removes all references to the trimmed entries from all consumer groups' PEL. + /// + /// Requires server 8.2 or above. + DeleteReferences = 1, + + /// + /// With ACKED: Only trims entries that were read and acknowledged by all consumer groups. + /// + /// Requires server 8.2 or above. + Acknowledged = 2, +} diff --git a/src/StackExchange.Redis/Enums/StreamTrimResult.cs b/src/StackExchange.Redis/Enums/StreamTrimResult.cs new file mode 100644 index 000000000..e58c321ab --- /dev/null +++ b/src/StackExchange.Redis/Enums/StreamTrimResult.cs @@ -0,0 +1,24 @@ +namespace StackExchange.Redis; + +/// +/// Determines how stream trimming works. +/// +public enum StreamTrimResult +{ + /// + /// No such id exists in the provided stream key. + /// + NotFound = -1, + + /// + /// Entry was deleted from the stream. + /// + Deleted = 1, + + /// + /// Entry was not deleted because it has either not been delivered to any consumer, or + /// still has references in the consumer groups' Pending Entries List (PEL). + /// + /// This response relates to the mode. + NotDeleted = 2, +} diff --git a/src/StackExchange.Redis/Enums/StringIndexType.cs b/src/StackExchange.Redis/Enums/StringIndexType.cs new file mode 100644 index 000000000..fcb41e391 --- /dev/null +++ b/src/StackExchange.Redis/Enums/StringIndexType.cs @@ -0,0 +1,29 @@ +using System; + +namespace StackExchange.Redis; + +/// +/// Indicates if we index into a string based on bits or bytes. +/// +public enum StringIndexType +{ + /// + /// Indicates the index is the number of bytes into a string. + /// + Byte, + + /// + /// Indicates the index is the number of bits into a string. + /// + Bit, +} + +internal static class StringIndexTypeExtensions +{ + internal static RedisValue ToLiteral(this StringIndexType indexType) => indexType switch + { + StringIndexType.Bit => RedisLiterals.BIT, + StringIndexType.Byte => RedisLiterals.BYTE, + _ => throw new ArgumentOutOfRangeException(nameof(indexType)), + }; +} diff --git a/src/StackExchange.Redis/Enums/When.cs b/src/StackExchange.Redis/Enums/When.cs index d72deba29..412e4064a 100644 --- a/src/StackExchange.Redis/Enums/When.cs +++ b/src/StackExchange.Redis/Enums/When.cs @@ -1,21 +1,23 @@ namespace StackExchange.Redis { /// - /// Indicates when this operation should be performed (only some variations are legal in a given context) + /// Indicates when this operation should be performed (only some variations are legal in a given context). /// public enum When { /// - /// The operation should occur whether or not there is an existing value + /// The operation should occur whether or not there is an existing value. /// Always, + /// - /// The operation should only occur when there is an existing value + /// The operation should only occur when there is an existing value. /// Exists, + /// - /// The operation should only occur when there is not an existing value + /// The operation should only occur when there is not an existing value. /// - NotExists + NotExists, } } diff --git a/src/StackExchange.Redis/ExceptionFactory.cs b/src/StackExchange.Redis/ExceptionFactory.cs index 0885a33e7..434abce17 100644 --- a/src/StackExchange.Redis/ExceptionFactory.cs +++ b/src/StackExchange.Redis/ExceptionFactory.cs @@ -1,6 +1,6 @@ using System; using System.Collections.Generic; -using System.Reflection; +using System.Security.Authentication; using System.Text; using System.Threading; @@ -12,9 +12,9 @@ private const string DataCommandKey = "redis-command", DataSentStatusKey = "request-sent-status", DataServerKey = "redis-server", - timeoutHelpLink = "https://stackexchange.github.io/StackExchange.Redis/Timeouts"; + TimeoutHelpLink = "https://stackexchange.github.io/StackExchange.Redis/Timeouts"; - internal static Exception AdminModeNotEnabled(bool includeDetail, RedisCommand command, Message message, ServerEndPoint server) + internal static Exception AdminModeNotEnabled(bool includeDetail, RedisCommand command, Message? message, ServerEndPoint? server) { string s = GetLabel(includeDetail, command, message); var ex = new RedisCommandException("This operation is not available unless admin mode is enabled: " + s); @@ -30,7 +30,7 @@ internal static Exception CommandDisabled(string command) internal static Exception TooManyArgs(string command, int argCount) => new RedisCommandException($"This operation would involve too many arguments ({argCount + 1} vs the redis limit of {PhysicalConnection.REDIS_MAX_ARGS}): {command}"); - internal static Exception ConnectionFailure(bool includeDetail, ConnectionFailureType failureType, string message, ServerEndPoint server) + internal static Exception ConnectionFailure(bool includeDetail, ConnectionFailureType failureType, string message, ServerEndPoint? server) { var ex = new RedisConnectionException(failureType, message); if (includeDetail) AddExceptionDetail(ex, null, server, null); @@ -60,7 +60,7 @@ internal static Exception DatabaseRequired(bool includeDetail, RedisCommand comm return ex; } - internal static Exception MasterOnly(bool includeDetail, RedisCommand command, Message message, ServerEndPoint server) + internal static Exception PrimaryOnly(bool includeDetail, RedisCommand command, Message? message, ServerEndPoint? server) { string s = GetLabel(includeDetail, command, message); var ex = new RedisCommandException("Command cannot be issued to a replica: " + s); @@ -75,7 +75,7 @@ internal static Exception MultiSlot(bool includeDetail, Message message) return ex; } - internal static string GetInnerMostExceptionMessage(Exception e) + internal static string GetInnerMostExceptionMessage(Exception? e) { if (e == null) { @@ -93,21 +93,21 @@ internal static string GetInnerMostExceptionMessage(Exception e) internal static Exception NoConnectionAvailable( ConnectionMultiplexer multiplexer, - Message message, - ServerEndPoint server, + Message? message, + ServerEndPoint? server, ReadOnlySpan serverSnapshot = default, RedisCommand command = default) { - string commandLabel = GetLabel(multiplexer.IncludeDetailInExceptions, message?.Command ?? command, message); + string commandLabel = GetLabel(multiplexer.RawConfig.IncludeDetailInExceptions, message?.Command ?? command, message); if (server != null) { - //if we already have the serverEndpoint for connection failure use that - //otherwise it would output state of all the endpoints + // If we already have the serverEndpoint for connection failure use that, + // otherwise it would output state of all the endpoints. serverSnapshot = new ServerEndPoint[] { server }; } - var innerException = PopulateInnerExceptions(serverSnapshot == default ? multiplexer.GetServerSnapshot() : serverSnapshot); + var innerException = PopulateInnerExceptions(serverSnapshot.IsEmpty ? multiplexer.GetServerSnapshot() : serverSnapshot); // Try to get a useful error message for the user. long attempts = multiplexer._connectAttemptCount, completions = multiplexer._connectCompletedCount; @@ -122,9 +122,14 @@ internal static Exception NoConnectionAvailable( else if (!multiplexer.RawConfig.AbortOnConnectFail && attempts > multiplexer.RawConfig.ConnectRetry && completions == 0) { // Attempted use after a full initial retry connect count # of failures - // This can happen in Azure often, where user disables abort and has the wrong config + // This can happen in cloud environments often, where user disables abort and has the wrong config initialMessage = $"Connection to Redis never succeeded (attempts: {attempts} - check your config), unable to service operation: "; } + else if (message is not null && message.IsPrimaryOnly() && multiplexer.IsConnected) + { + // If we know it's a primary-only command, indicate that in the error message + initialMessage = "No connection (requires writable - not eligible for replica) is active/available to service this operation: "; + } else { // Default if we don't have a more useful error message here based on circumstances @@ -140,38 +145,35 @@ internal static Exception NoConnectionAvailable( } // Add counters and exception data if we have it - List> data = null; - if (multiplexer.IncludeDetailInExceptions) + List>? data = null; + if (multiplexer.RawConfig.IncludeDetailInExceptions) { data = new List>(); AddCommonDetail(data, sb, message, multiplexer, server); } var ex = new RedisConnectionException(ConnectionFailureType.UnableToResolvePhysicalConnection, sb.ToString(), innerException, message?.Status ?? CommandStatus.Unknown); - if (multiplexer.IncludeDetailInExceptions) + if (multiplexer.RawConfig.IncludeDetailInExceptions) { CopyDataToException(data, ex); - sb.Append("; ").Append(PerfCounterHelper.GetThreadPoolAndCPUSummary(multiplexer.IncludePerformanceCountersInExceptions)); + sb.Append("; ").Append(PerfCounterHelper.GetThreadPoolAndCPUSummary()); AddExceptionDetail(ex, message, server, commandLabel); } return ex; } -#pragma warning disable RCS1231 // Make parameter ref read-only. - spans are tiny! - internal static Exception PopulateInnerExceptions(ReadOnlySpan serverSnapshot) -#pragma warning restore RCS1231 // Make parameter ref read-only. + internal static Exception? PopulateInnerExceptions(ReadOnlySpan serverSnapshot) { var innerExceptions = new List(); - if (serverSnapshot.Length > 0 && serverSnapshot[0].Multiplexer.LastException != null) + if (serverSnapshot.Length > 0 && serverSnapshot[0].Multiplexer.LastException is Exception ex) { - innerExceptions.Add(serverSnapshot[0].Multiplexer.LastException); + innerExceptions.Add(ex); } for (int i = 0; i < serverSnapshot.Length; i++) { - if (serverSnapshot[i].LastException != null) + if (serverSnapshot[i].LastException is Exception lastException) { - var lastException = serverSnapshot[i].LastException; innerExceptions.Add(lastException); } } @@ -201,18 +203,7 @@ internal static Exception NoCursor(RedisCommand command) return new RedisCommandException("Command cannot be used with a cursor: " + s); } - private static string _libVersion; - internal static string GetLibVersion() - { - if (_libVersion == null) - { - var assembly = typeof(ConnectionMultiplexer).Assembly; - _libVersion = ((AssemblyFileVersionAttribute)Attribute.GetCustomAttribute(assembly, typeof(AssemblyFileVersionAttribute)))?.Version - ?? assembly.GetName().Version.ToString(); - } - return _libVersion; - } - private static void Add(List> data, StringBuilder sb, string lk, string sk, string v) + private static void Add(List> data, StringBuilder sb, string? lk, string? sk, string? v) { if (v != null) { @@ -221,21 +212,41 @@ private static void Add(List> data, StringBuilder sb, stri } } - internal static Exception Timeout(ConnectionMultiplexer multiplexer, string baseErrorMessage, Message message, ServerEndPoint server, WriteResult? result = null) + internal static Exception Timeout(ConnectionMultiplexer multiplexer, string? baseErrorMessage, Message message, ServerEndPoint? server, WriteResult? result = null, PhysicalBridge? bridge = null) { List> data = new List> { Tuple.Create("Message", message.CommandAndKey) }; var sb = new StringBuilder(); + + // We timeout writing messages in quite different ways sync/async - so centralize messaging here. + if (string.IsNullOrEmpty(baseErrorMessage) && result == WriteResult.TimeoutBeforeWrite) + { + baseErrorMessage = message.IsBacklogged + ? "The message timed out in the backlog attempting to send because no connection became available" + : "The timeout was reached before the message could be written to the output buffer, and it was not sent"; + } + + var lastConnectionException = bridge?.LastException as RedisConnectionException; + var logConnectionException = message.IsBacklogged && lastConnectionException is not null; + if (!string.IsNullOrEmpty(baseErrorMessage)) { sb.Append(baseErrorMessage); + + // If we're in the situation where we've never connected + if (logConnectionException && lastConnectionException is not null) + { + sb.Append(" (").Append(Format.ToString(multiplexer.TimeoutMilliseconds)).Append("ms)"); + sb.Append(" - Last Connection Exception: ").Append(lastConnectionException.Message); + } + if (message != null) { - sb.Append(", command=").Append(message.Command); // no key here, note + sb.Append(", command=").Append(message.CommandString); // no key here, note } } else { - sb.Append("Timeout performing ").Append(message.Command).Append(" (").Append(Format.ToString(multiplexer.TimeoutMilliseconds)).Append("ms)"); + sb.Append("Timeout performing ").Append(message.CommandString).Append(" (").Append(Format.ToString(multiplexer.TimeoutMilliseconds)).Append("ms)"); } // Add timeout data, if we have it @@ -244,10 +255,6 @@ internal static Exception Timeout(ConnectionMultiplexer multiplexer, string base Add(data, sb, "Timeout", "timeout", Format.ToString(multiplexer.TimeoutMilliseconds)); try { -#if DEBUG - if (message.QueuePosition >= 0) Add(data, sb, "QueuePosition", null, message.QueuePosition.ToString()); // the position the item was when added to the queue - if ((int)message.ConnectionWriteState >= 0) Add(data, sb, "WriteState", null, message.ConnectionWriteState.ToString()); // what the physical was doing when it was added to the queue -#endif if (message != null && message.TryGetPhysicalState(out var ws, out var rs, out var sentDelta, out var receivedDelta)) { Add(data, sb, "Write-State", null, ws.ToString()); @@ -265,24 +272,30 @@ internal static Exception Timeout(ConnectionMultiplexer multiplexer, string base } catch { } } - AddCommonDetail(data, sb, message, multiplexer, server); - sb.Append(" (Please take a look at this article for some common client-side issues that can cause timeouts: "); - sb.Append(timeoutHelpLink); - sb.Append(")"); + sb.Append(" (Please take a look at this article for some common client-side issues that can cause timeouts: ") + .Append(TimeoutHelpLink) + .Append(')'); - var ex = new RedisTimeoutException(sb.ToString(), message?.Status ?? CommandStatus.Unknown) - { - HelpLink = timeoutHelpLink - }; + // If we're from a backlog timeout scenario, we log a more intuitive connection exception for the timeout...because the timeout was a symptom + // and we have a more direct cause: we had no connection to send it on. + Exception ex = logConnectionException && lastConnectionException is not null + ? new RedisConnectionException(lastConnectionException.FailureType, sb.ToString(), lastConnectionException, message?.Status ?? CommandStatus.Unknown) + { + HelpLink = TimeoutHelpLink, + } + : new RedisTimeoutException(sb.ToString(), message?.Status ?? CommandStatus.Unknown) + { + HelpLink = TimeoutHelpLink, + }; CopyDataToException(data, ex); - if (multiplexer.IncludeDetailInExceptions) AddExceptionDetail(ex, message, server, null); + if (multiplexer.RawConfig.IncludeDetailInExceptions) AddExceptionDetail(ex, message, server, null); return ex; } - private static void CopyDataToException(List> data, Exception ex) + private static void CopyDataToException(List>? data, Exception ex) { if (data != null) { @@ -297,48 +310,61 @@ private static void CopyDataToException(List> data, Except private static void AddCommonDetail( List> data, StringBuilder sb, - Message message, + Message? message, ConnectionMultiplexer multiplexer, - ServerEndPoint server - ) + ServerEndPoint? server) { if (message != null) { message.TryGetHeadMessages(out var now, out var next); - if (now != null) Add(data, sb, "Message-Current", "active", multiplexer.IncludeDetailInExceptions ? now.CommandAndKey : now.Command.ToString()); - if (next != null) Add(data, sb, "Message-Next", "next", multiplexer.IncludeDetailInExceptions ? next.CommandAndKey : next.Command.ToString()); + if (now != null) Add(data, sb, "Message-Current", "active", multiplexer.RawConfig.IncludeDetailInExceptions ? now.CommandAndKey : now.CommandString); + if (next != null) Add(data, sb, "Message-Next", "next", multiplexer.RawConfig.IncludeDetailInExceptions ? next.CommandAndKey : next.CommandString); } // Add server data, if we have it if (server != null && message != null) { - server.GetOutstandingCount(message.Command, out int inst, out int qs, out long @in, out int qu, out bool aw, out long toRead, out long toWrite, out var bs, out var rs, out var ws); - switch (rs) + var bs = server.GetBridgeStatus(message.IsForSubscriptionBridge ? ConnectionType.Subscription : ConnectionType.Interactive); + + switch (bs.Connection.ReadStatus) { case PhysicalConnection.ReadStatus.CompletePendingMessageAsync: case PhysicalConnection.ReadStatus.CompletePendingMessageSync: sb.Append(" ** possible thread-theft indicated; see https://stackexchange.github.io/StackExchange.Redis/ThreadTheft ** "); break; } - Add(data, sb, "OpsSinceLastHeartbeat", "inst", inst.ToString()); - Add(data, sb, "Queue-Awaiting-Write", "qu", qu.ToString()); - Add(data, sb, "Queue-Awaiting-Response", "qs", qs.ToString()); - Add(data, sb, "Active-Writer", "aw", aw.ToString()); - if (qu != 0) Add(data, sb, "Backlog-Writer", "bw", bs.ToString()); - if (rs != PhysicalConnection.ReadStatus.NA) Add(data, sb, "Read-State", "rs", rs.ToString()); - if (ws != PhysicalConnection.WriteStatus.NA) Add(data, sb, "Write-State", "ws", ws.ToString()); - - if (@in >= 0) Add(data, sb, "Inbound-Bytes", "in", @in.ToString()); - if (toRead >= 0) Add(data, sb, "Inbound-Pipe-Bytes", "in-pipe", toRead.ToString()); - if (toWrite >= 0) Add(data, sb, "Outbound-Pipe-Bytes", "out-pipe", toWrite.ToString()); - - if (multiplexer.StormLogThreshold >= 0 && qs >= multiplexer.StormLogThreshold && Interlocked.CompareExchange(ref multiplexer.haveStormLog, 1, 0) == 0) + Add(data, sb, "OpsSinceLastHeartbeat", "inst", bs.MessagesSinceLastHeartbeat.ToString()); + Add(data, sb, "Queue-Awaiting-Write", "qu", bs.BacklogMessagesPending.ToString()); + Add(data, sb, "Queue-Awaiting-Response", "qs", bs.Connection.MessagesSentAwaitingResponse.ToString()); + Add(data, sb, "Active-Writer", "aw", bs.IsWriterActive.ToString()); + Add(data, sb, "Backlog-Writer", "bw", bs.BacklogStatus.ToString()); + if (bs.Connection.ReadStatus != PhysicalConnection.ReadStatus.NA) Add(data, sb, "Read-State", "rs", bs.Connection.ReadStatus.ToString()); + if (bs.Connection.WriteStatus != PhysicalConnection.WriteStatus.NA) Add(data, sb, "Write-State", "ws", bs.Connection.WriteStatus.ToString()); + + if (bs.Connection.BytesAvailableOnSocket >= 0) Add(data, sb, "Inbound-Bytes", "in", bs.Connection.BytesAvailableOnSocket.ToString()); + if (bs.Connection.BytesInReadPipe >= 0) Add(data, sb, "Inbound-Pipe-Bytes", "in-pipe", bs.Connection.BytesInReadPipe.ToString()); + if (bs.Connection.BytesInWritePipe >= 0) Add(data, sb, "Outbound-Pipe-Bytes", "out-pipe", bs.Connection.BytesInWritePipe.ToString()); + Add(data, sb, "Last-Result-Bytes", "last-in", bs.Connection.BytesLastResult.ToString()); + Add(data, sb, "Inbound-Buffer-Bytes", "cur-in", bs.Connection.BytesInBuffer.ToString()); + + var liveMuxers = ConnectionMultiplexer.GetLiveObjectCount(out var created, out var disposed, out var finalized); + if (created > 1) + { + Add(data, sb, "Live-Multiplexers", "lm", $"{liveMuxers}/{created}/{disposed}/{finalized}"); + } + + Add(data, sb, "Sync-Ops", "sync-ops", multiplexer.syncOps.ToString()); + Add(data, sb, "Async-Ops", "async-ops", multiplexer.asyncOps.ToString()); + + if (multiplexer.StormLogThreshold >= 0 && bs.Connection.MessagesSentAwaitingResponse >= multiplexer.StormLogThreshold && Interlocked.CompareExchange(ref multiplexer.haveStormLog, 1, 0) == 0) { - var log = server.GetStormLog(message.Command); + var log = server.GetStormLog(message); if (string.IsNullOrWhiteSpace(log)) Interlocked.Exchange(ref multiplexer.haveStormLog, 0); else Interlocked.Exchange(ref multiplexer.stormLogSnapshot, log); } - Add(data, sb, "Server-Endpoint", "serverEndpoint", server.EndPoint.ToString().Replace("Unspecified/", "")); + Add(data, sb, "Server-Endpoint", "serverEndpoint", (server.EndPoint.ToString() ?? "Unknown").Replace("Unspecified/", "")); + Add(data, sb, "Server-Connected-Seconds", "conn-sec", bs.ConnectedAt is DateTime dt ? (DateTime.UtcNow - dt).TotalSeconds.ToString("0.##") : "n/a"); + Add(data, sb, "Abort-On-Connect", "aoc", multiplexer.RawConfig.AbortOnConnectFail ? "1" : "0"); } Add(data, sb, "Multiplexer-Connects", "mc", $"{multiplexer._connectAttemptCount}/{multiplexer._connectCompletedCount}/{multiplexer._connectionCloseCount}"); Add(data, sb, "Manager", "mgr", multiplexer.SocketManager?.GetState()); @@ -353,20 +379,19 @@ ServerEndPoint server Add(data, sb, "Key-HashSlot", "PerfCounterHelperkeyHashSlot", message.GetHashSlot(multiplexer.ServerSelectionStrategy).ToString()); } } - int busyWorkerCount = PerfCounterHelper.GetThreadPoolStats(out string iocp, out string worker); + int busyWorkerCount = PerfCounterHelper.GetThreadPoolStats(out string iocp, out string worker, out string? workItems); Add(data, sb, "ThreadPool-IO-Completion", "IOCP", iocp); Add(data, sb, "ThreadPool-Workers", "WORKER", worker); - data.Add(Tuple.Create("Busy-Workers", busyWorkerCount.ToString())); - - if (multiplexer.IncludePerformanceCountersInExceptions) + if (workItems != null) { - Add(data, sb, "Local-CPU", "Local-CPU", PerfCounterHelper.GetSystemCpuPercent()); + Add(data, sb, "ThreadPool-Items", "POOL", workItems); } + data.Add(Tuple.Create("Busy-Workers", busyWorkerCount.ToString())); - Add(data, sb, "Version", "v", GetLibVersion()); + Add(data, sb, "Version", "v", Utils.GetLibVersion()); } - private static void AddExceptionDetail(Exception exception, Message message, ServerEndPoint server, string label) + private static void AddExceptionDetail(Exception? exception, Message? message, ServerEndPoint? server, string? label) { if (exception != null) { @@ -384,36 +409,44 @@ private static void AddExceptionDetail(Exception exception, Message message, Ser } } - private static string GetLabel(bool includeDetail, RedisCommand command, Message message) + private static string GetLabel(bool includeDetail, RedisCommand command, Message? message) { - return message == null ? command.ToString() : (includeDetail ? message.CommandAndKey : message.Command.ToString()); + return message == null ? command.ToString() : (includeDetail ? message.CommandAndKey : message.CommandString); } - internal static Exception UnableToConnect(ConnectionMultiplexer muxer, string failureMessage=null) + internal static Exception UnableToConnect(ConnectionMultiplexer muxer, string? failureMessage = null, string? connectionName = null) { - var sb = new StringBuilder("It was not possible to connect to the redis server(s)."); - if (muxer != null) + var sb = new StringBuilder("It was not possible to connect to the redis server(s)"); + if (connectionName is not null) { - if (muxer.AuthSuspect) sb.Append(" There was an authentication failure; check that passwords (or client certificates) are configured correctly."); - else if (muxer.RawConfig.AbortOnConnectFail) sb.Append(" Error connecting right now. To allow this multiplexer to continue retrying until it's able to connect, use abortConnect=false in your connection string or AbortOnConnectFail=false; in your code."); + sb.Append(' ').Append(connectionName); + } + sb.Append('.'); + Exception? inner = null; + var failureType = ConnectionFailureType.UnableToConnect; + if (muxer is not null) + { + if (muxer.AuthException is Exception aex) + { + failureType = ConnectionFailureType.AuthenticationFailure; + sb.Append(" There was an authentication failure; check that passwords (or client certificates) are configured correctly: (").Append(aex.GetType().Name).Append(") ").Append(aex.Message); + inner = aex; + if (aex is AuthenticationException && aex.InnerException is Exception iaex) + { + sb.Append(" (Inner - ").Append(iaex.GetType().Name).Append(") ").Append(iaex.Message); + } + } + else if (muxer.RawConfig.AbortOnConnectFail) + { + sb.Append(" Error connecting right now. To allow this multiplexer to continue retrying until it's able to connect, use abortConnect=false in your connection string or AbortOnConnectFail=false; in your code."); + } + } + if (!failureMessage.IsNullOrWhiteSpace()) + { + sb.Append(' ').Append(failureMessage.Trim()); } - if (!string.IsNullOrWhiteSpace(failureMessage)) sb.Append(" ").Append(failureMessage.Trim()); - - return new RedisConnectionException(ConnectionFailureType.UnableToConnect, sb.ToString()); - } - - internal static Exception BeganProfilingWithDuplicateContext(object forContext) - { - var exc = new InvalidOperationException("Attempted to begin profiling for the same context twice"); - exc.Data["forContext"] = forContext; - return exc; - } - internal static Exception FinishedProfilingWithInvalidContext(object forContext) - { - var exc = new InvalidOperationException("Attempted to finish profiling for a context which is no longer valid, or was never begun"); - exc.Data["forContext"] = forContext; - return exc; + return new RedisConnectionException(failureType, sb.ToString(), inner); } } } diff --git a/src/StackExchange.Redis/Exceptions.cs b/src/StackExchange.Redis/Exceptions.cs index 2e1787883..1f1c973ce 100644 --- a/src/StackExchange.Redis/Exceptions.cs +++ b/src/StackExchange.Redis/Exceptions.cs @@ -1,11 +1,11 @@ using System; +using System.ComponentModel; using System.Runtime.Serialization; -#pragma warning disable RCS1194 // Implement exception constructors. namespace StackExchange.Redis { /// - /// Indicates that a command was illegal and was not sent to the server + /// Indicates that a command was illegal and was not sent to the server. /// [Serializable] public sealed partial class RedisCommandException : Exception @@ -23,6 +23,10 @@ public RedisCommandException(string message) : base(message) { } /// The inner exception. public RedisCommandException(string message, Exception innerException) : base(message, innerException) { } +#if NET8_0_OR_GREATER + [Obsolete(Obsoletions.LegacyFormatterImplMessage, DiagnosticId = Obsoletions.LegacyFormatterImplDiagId)] + [EditorBrowsable(EditorBrowsableState.Never)] +#endif private RedisCommandException(SerializationInfo info, StreamingContext ctx) : base(info, ctx) { } } @@ -43,19 +47,28 @@ public RedisTimeoutException(string message, CommandStatus commandStatus) : base } /// - /// status of the command while communicating with Redis + /// status of the command while communicating with Redis. /// public CommandStatus Commandstatus { get; } +#if NET8_0_OR_GREATER + [Obsolete(Obsoletions.LegacyFormatterImplMessage, DiagnosticId = Obsoletions.LegacyFormatterImplDiagId)] + [EditorBrowsable(EditorBrowsableState.Never)] +#endif private RedisTimeoutException(SerializationInfo info, StreamingContext ctx) : base(info, ctx) { - Commandstatus = (CommandStatus)info.GetValue("commandStatus", typeof(CommandStatus)); + Commandstatus = info.GetValue("commandStatus", typeof(CommandStatus)) as CommandStatus? ?? CommandStatus.Unknown; } + /// - /// Serialization implementation; not intended for general usage + /// Serialization implementation; not intended for general usage. /// /// Serialization info. /// Serialization context. +#if NET8_0_OR_GREATER + [Obsolete(Obsoletions.LegacyFormatterImplMessage, DiagnosticId = Obsoletions.LegacyFormatterImplDiagId)] + [EditorBrowsable(EditorBrowsableState.Never)] +#endif public override void GetObjectData(SerializationInfo info, StreamingContext context) { base.GetObjectData(info, context); @@ -64,7 +77,7 @@ public override void GetObjectData(SerializationInfo info, StreamingContext cont } /// - /// Indicates a connection fault when communicating with redis + /// Indicates a connection fault when communicating with redis. /// [Serializable] public sealed partial class RedisConnectionException : RedisException @@ -74,7 +87,7 @@ public sealed partial class RedisConnectionException : RedisException /// /// The type of connection failure. /// The message for the exception. - public RedisConnectionException(ConnectionFailureType failureType, string message) : this(failureType, message, null, CommandStatus.Unknown) {} + public RedisConnectionException(ConnectionFailureType failureType, string message) : this(failureType, message, null, CommandStatus.Unknown) { } /// /// Creates a new . @@ -82,7 +95,7 @@ public RedisConnectionException(ConnectionFailureType failureType, string messag /// The type of connection failure. /// The message for the exception. /// The inner exception. - public RedisConnectionException(ConnectionFailureType failureType, string message, Exception innerException) : this(failureType, message, innerException, CommandStatus.Unknown) {} + public RedisConnectionException(ConnectionFailureType failureType, string message, Exception? innerException) : this(failureType, message, innerException, CommandStatus.Unknown) { } /// /// Creates a new . @@ -91,32 +104,41 @@ public RedisConnectionException(ConnectionFailureType failureType, string messag /// The message for the exception. /// The inner exception. /// The status of the command. - public RedisConnectionException(ConnectionFailureType failureType, string message, Exception innerException, CommandStatus commandStatus) : base(message, innerException) + public RedisConnectionException(ConnectionFailureType failureType, string message, Exception? innerException, CommandStatus commandStatus) : base(message, innerException) { FailureType = failureType; CommandStatus = commandStatus; } /// - /// The type of connection failure + /// The type of connection failure. /// public ConnectionFailureType FailureType { get; } /// - /// status of the command while communicating with Redis + /// Status of the command while communicating with Redis. /// public CommandStatus CommandStatus { get; } +#if NET8_0_OR_GREATER + [Obsolete(Obsoletions.LegacyFormatterImplMessage, DiagnosticId = Obsoletions.LegacyFormatterImplDiagId)] + [EditorBrowsable(EditorBrowsableState.Never)] +#endif private RedisConnectionException(SerializationInfo info, StreamingContext ctx) : base(info, ctx) { FailureType = (ConnectionFailureType)info.GetInt32("failureType"); - CommandStatus = (CommandStatus)info.GetValue("commandStatus", typeof(CommandStatus)); + CommandStatus = info.GetValue("commandStatus", typeof(CommandStatus)) as CommandStatus? ?? CommandStatus.Unknown; } + /// - /// Serialization implementation; not intended for general usage + /// Serialization implementation; not intended for general usage. /// /// Serialization info. /// Serialization context. +#if NET8_0_OR_GREATER + [Obsolete(Obsoletions.LegacyFormatterImplMessage, DiagnosticId = Obsoletions.LegacyFormatterImplDiagId)] + [EditorBrowsable(EditorBrowsableState.Never)] +#endif public override void GetObjectData(SerializationInfo info, StreamingContext context) { base.GetObjectData(info, context); @@ -126,7 +148,7 @@ public override void GetObjectData(SerializationInfo info, StreamingContext cont } /// - /// Indicates an issue communicating with redis + /// Indicates an issue communicating with redis. /// [Serializable] public partial class RedisException : Exception @@ -142,18 +164,22 @@ public RedisException(string message) : base(message) { } /// /// The message for the exception. /// The inner exception. - public RedisException(string message, Exception innerException) : base(message, innerException) { } + public RedisException(string message, Exception? innerException) : base(message, innerException) { } /// - /// Deserialization constructor; not intended for general usage + /// Deserialization constructor; not intended for general usage. /// /// Serialization info. /// Serialization context. +#if NET8_0_OR_GREATER + [Obsolete(Obsoletions.LegacyFormatterImplMessage, DiagnosticId = Obsoletions.LegacyFormatterImplDiagId)] + [EditorBrowsable(EditorBrowsableState.Never)] +#endif protected RedisException(SerializationInfo info, StreamingContext ctx) : base(info, ctx) { } } /// - /// Indicates an exception raised by a redis server + /// Indicates an exception raised by a redis server. /// [Serializable] public sealed partial class RedisServerException : RedisException @@ -164,6 +190,10 @@ public sealed partial class RedisServerException : RedisException /// The message for the exception. public RedisServerException(string message) : base(message) { } +#if NET8_0_OR_GREATER + [Obsolete(Obsoletions.LegacyFormatterImplMessage, DiagnosticId = Obsoletions.LegacyFormatterImplDiagId)] + [EditorBrowsable(EditorBrowsableState.Never)] +#endif private RedisServerException(SerializationInfo info, StreamingContext ctx) : base(info, ctx) { } } } diff --git a/src/StackExchange.Redis/Expiration.cs b/src/StackExchange.Redis/Expiration.cs new file mode 100644 index 000000000..e04094358 --- /dev/null +++ b/src/StackExchange.Redis/Expiration.cs @@ -0,0 +1,273 @@ +using System; + +namespace StackExchange.Redis; + +/// +/// Configures the expiration behaviour of a command. +/// +public readonly struct Expiration +{ + /* + Redis expiration supports different modes: + - (nothing) - do nothing; implicit wipe for writes, nothing for reads + - PERSIST - explicit wipe of expiry + - KEEPTTL - sets no expiry, but leaves any existing expiry alone + - EX {s} - relative expiry in seconds + - PX {ms} - relative expiry in milliseconds + - EXAT {s} - absolute expiry in seconds + - PXAT {ms} - absolute expiry in milliseconds + + We need to distinguish between these 6 scenarios, which we can logically do with 3 bits (8 options). + So; we'll use a ulong for the value, reserving the top 3 bits for the mode. + */ + + /// + /// Default expiration behaviour. For writes, this is typically no expiration. For reads, this is typically no action. + /// + public static Expiration Default => s_Default; + + /// + /// Explicitly retain the existing expiry, if one. This is valid in some (not all) write scenarios. + /// + public static Expiration KeepTtl => s_KeepTtl; + + /// + /// Explicitly remove the existing expiry, if one. This is valid in some (not all) read scenarios. + /// + public static Expiration Persist => s_Persist; + + /// + /// Expire at the specified absolute time. + /// + public Expiration(DateTime when) + { + if (when == DateTime.MaxValue) + { + _valueAndMode = s_Default._valueAndMode; + return; + } + + long millis = GetUnixTimeMilliseconds(when); + if ((millis % 1000) == 0) + { + Init(ExpirationMode.AbsoluteSeconds, millis / 1000, out _valueAndMode); + } + else + { + Init(ExpirationMode.AbsoluteMilliseconds, millis, out _valueAndMode); + } + } + + /// + /// Expire at the specified absolute time. + /// + public static implicit operator Expiration(DateTime when) => new(when); + + /// + /// Expire at the specified absolute time. + /// + public static implicit operator Expiration(TimeSpan ttl) => new(ttl); + + /// + /// Expire at the specified relative time. + /// + public Expiration(TimeSpan ttl) + { + if (ttl == TimeSpan.MaxValue) + { + _valueAndMode = s_Default._valueAndMode; + return; + } + + var millis = ttl.Ticks / TimeSpan.TicksPerMillisecond; + if ((millis % 1000) == 0) + { + Init(ExpirationMode.RelativeSeconds, millis / 1000, out _valueAndMode); + } + else + { + Init(ExpirationMode.RelativeMilliseconds, millis, out _valueAndMode); + } + } + + private readonly ulong _valueAndMode; + + private static void Init(ExpirationMode mode, long value, out ulong valueAndMode) + { + // check the caller isn't using the top 3 bits that we have reserved; this includes checking for -ve values + ulong uValue = (ulong)value; + if ((uValue & ~ValueMask) != 0) Throw(); + valueAndMode = (uValue & ValueMask) | ((ulong)mode << 61); + static void Throw() => throw new ArgumentOutOfRangeException(nameof(value)); + } + + private Expiration(ExpirationMode mode, long value) => Init(mode, value, out _valueAndMode); + + private enum ExpirationMode : byte + { + Default = 0, + RelativeSeconds = 1, + RelativeMilliseconds = 2, + AbsoluteSeconds = 3, + AbsoluteMilliseconds = 4, + KeepTtl = 5, + Persist = 6, + NotUsed = 7, // just to ensure all 8 possible values are covered + } + + private const ulong ValueMask = (~0UL) >> 3; + internal long Value => unchecked((long)(_valueAndMode & ValueMask)); + private ExpirationMode Mode => (ExpirationMode)(_valueAndMode >> 61); // note unsigned, no need to mask + + internal bool IsKeepTtl => Mode is ExpirationMode.KeepTtl; + internal bool IsPersist => Mode is ExpirationMode.Persist; + internal bool IsNone => Mode is ExpirationMode.Default; + internal bool IsNoneOrKeepTtl => Mode is ExpirationMode.Default or ExpirationMode.KeepTtl; + internal bool IsAbsolute => Mode is ExpirationMode.AbsoluteSeconds or ExpirationMode.AbsoluteMilliseconds; + internal bool IsRelative => Mode is ExpirationMode.RelativeSeconds or ExpirationMode.RelativeMilliseconds; + + internal bool IsMilliseconds => + Mode is ExpirationMode.RelativeMilliseconds or ExpirationMode.AbsoluteMilliseconds; + + internal bool IsSeconds => Mode is ExpirationMode.RelativeSeconds or ExpirationMode.AbsoluteSeconds; + + private static readonly Expiration s_Default = new(ExpirationMode.Default, 0); + + private static readonly Expiration s_KeepTtl = new(ExpirationMode.KeepTtl, 0), + s_Persist = new(ExpirationMode.Persist, 0); + + private static void ThrowExpiryAndKeepTtl() => + // ReSharper disable once NotResolvedInText + throw new ArgumentException(message: "Cannot specify both expiry and keepTtl.", paramName: "keepTtl"); + + private static void ThrowExpiryAndPersist() => + // ReSharper disable once NotResolvedInText + throw new ArgumentException(message: "Cannot specify both expiry and persist.", paramName: "persist"); + + internal static Expiration CreateOrPersist(in TimeSpan? ttl, bool persist) + { + if (persist) + { + if (ttl.HasValue) ThrowExpiryAndPersist(); + return s_Persist; + } + + return ttl.HasValue ? new(ttl.GetValueOrDefault()) : s_Default; + } + + internal static Expiration CreateOrKeepTtl(in TimeSpan? ttl, bool keepTtl) + { + if (keepTtl) + { + if (ttl.HasValue) ThrowExpiryAndKeepTtl(); + return s_KeepTtl; + } + + return ttl.HasValue ? new(ttl.GetValueOrDefault()) : s_Default; + } + + internal static long GetUnixTimeMilliseconds(DateTime when) + { + return when.Kind switch + { + DateTimeKind.Local or DateTimeKind.Utc => (when.ToUniversalTime() - RedisBase.UnixEpoch).Ticks / + TimeSpan.TicksPerMillisecond, + _ => ThrowKind(), + }; + + static long ThrowKind() => + throw new ArgumentException("Expiry time must be either Utc or Local", nameof(when)); + } + + internal static Expiration CreateOrPersist(in DateTime? when, bool persist) + { + if (persist) + { + if (when.HasValue) ThrowExpiryAndPersist(); + return s_Persist; + } + + return when.HasValue ? new(when.GetValueOrDefault()) : s_Default; + } + + internal static Expiration CreateOrKeepTtl(in DateTime? ttl, bool keepTtl) + { + if (keepTtl) + { + if (ttl.HasValue) ThrowExpiryAndKeepTtl(); + return s_KeepTtl; + } + + return ttl.HasValue ? new(ttl.GetValueOrDefault()) : s_Default; + } + + internal RedisValue Operand => GetOperand(out _); + + internal RedisValue GetOperand(out long value) + { + value = Value; + var mode = Mode; + return mode switch + { + ExpirationMode.KeepTtl => RedisLiterals.KEEPTTL, + ExpirationMode.Persist => RedisLiterals.PERSIST, + ExpirationMode.RelativeSeconds => RedisLiterals.EX, + ExpirationMode.RelativeMilliseconds => RedisLiterals.PX, + ExpirationMode.AbsoluteSeconds => RedisLiterals.EXAT, + ExpirationMode.AbsoluteMilliseconds => RedisLiterals.PXAT, + _ => RedisValue.Null, + }; + } + + private static void ThrowMode(ExpirationMode mode) => + throw new InvalidOperationException("Unknown mode: " + mode); + + /// + public override string ToString() => Mode switch + { + ExpirationMode.Default or ExpirationMode.NotUsed => "", + ExpirationMode.KeepTtl => "KEEPTTL", + ExpirationMode.Persist => "PERSIST", + _ => $"{Operand} {Value}", + }; + + /// + public override int GetHashCode() => _valueAndMode.GetHashCode(); + + /// + public override bool Equals(object? obj) => obj is Expiration other && _valueAndMode == other._valueAndMode; + + internal int TokenCount => Mode switch + { + ExpirationMode.Default or ExpirationMode.NotUsed => 0, + ExpirationMode.KeepTtl or ExpirationMode.Persist => 1, + _ => 2, + }; + + internal void WriteTo(PhysicalConnection physical) + { + var mode = Mode; + switch (Mode) + { + case ExpirationMode.Default or ExpirationMode.NotUsed: + break; + case ExpirationMode.KeepTtl: + physical.WriteBulkString("KEEPTTL"u8); + break; + case ExpirationMode.Persist: + physical.WriteBulkString("PERSIST"u8); + break; + default: + physical.WriteBulkString(mode switch + { + ExpirationMode.RelativeSeconds => "EX"u8, + ExpirationMode.RelativeMilliseconds => "PX"u8, + ExpirationMode.AbsoluteSeconds => "EXAT"u8, + ExpirationMode.AbsoluteMilliseconds => "PXAT"u8, + _ => default, + }); + physical.WriteBulkString(Value); + break; + } + } +} diff --git a/src/StackExchange.Redis/ExponentialRetry.cs b/src/StackExchange.Redis/ExponentialRetry.cs index 594ffaf20..5ee10a951 100644 --- a/src/StackExchange.Redis/ExponentialRetry.cs +++ b/src/StackExchange.Redis/ExponentialRetry.cs @@ -1,30 +1,43 @@ -using System; +using System; namespace StackExchange.Redis { /// - /// Represents a retry policy that performs retries, using a randomized exponential back off scheme to determine the interval between retries. + /// Represents a retry policy that performs retries, using a randomized exponential back off scheme to determine the interval between retries. /// public class ExponentialRetry : IReconnectRetryPolicy { private readonly int deltaBackOffMilliseconds; - private readonly int maxDeltaBackOffMilliseconds = (int)TimeSpan.FromSeconds(10).TotalMilliseconds; + private readonly int maxDeltaBackOffMilliseconds = (int)TimeSpan.FromSeconds(60).TotalMilliseconds; [ThreadStatic] - private static Random r; + private static Random? r; /// - /// Initializes a new instance using the specified back off interval with default maxDeltaBackOffMilliseconds of 10 seconds + /// Initializes a new instance using the specified back off interval with default maxDeltaBackOffMilliseconds of 10 seconds. /// - /// time in milliseconds for the back-off interval between retries - public ExponentialRetry(int deltaBackOffMilliseconds) : this(deltaBackOffMilliseconds, (int)TimeSpan.FromSeconds(10).TotalMilliseconds) {} + /// Time in milliseconds for the back-off interval between retries. + public ExponentialRetry(int deltaBackOffMilliseconds) : this(deltaBackOffMilliseconds, Math.Max(deltaBackOffMilliseconds, (int)TimeSpan.FromSeconds(10).TotalMilliseconds)) { } /// /// Initializes a new instance using the specified back off interval. /// - /// time in milliseconds for the back-off interval between retries - /// time in milliseconds for the maximum value that the back-off interval can exponentailly grow upto + /// Time in milliseconds for the back-off interval between retries. + /// Time in milliseconds for the maximum value that the back-off interval can exponentially grow up to. public ExponentialRetry(int deltaBackOffMilliseconds, int maxDeltaBackOffMilliseconds) { + if (deltaBackOffMilliseconds < 0) + { + throw new ArgumentOutOfRangeException(nameof(deltaBackOffMilliseconds), $"{nameof(deltaBackOffMilliseconds)} must be greater than or equal to zero"); + } + if (maxDeltaBackOffMilliseconds < 0) + { + throw new ArgumentOutOfRangeException(nameof(maxDeltaBackOffMilliseconds), $"{nameof(maxDeltaBackOffMilliseconds)} must be greater than or equal to zero"); + } + if (maxDeltaBackOffMilliseconds < deltaBackOffMilliseconds) + { + throw new ArgumentOutOfRangeException(nameof(maxDeltaBackOffMilliseconds), $"{nameof(maxDeltaBackOffMilliseconds)} must be greater than or equal to {nameof(deltaBackOffMilliseconds)}"); + } + this.deltaBackOffMilliseconds = deltaBackOffMilliseconds; this.maxDeltaBackOffMilliseconds = maxDeltaBackOffMilliseconds; } @@ -32,25 +45,25 @@ public ExponentialRetry(int deltaBackOffMilliseconds, int maxDeltaBackOffMillise /// /// This method is called by the ConnectionMultiplexer to determine if a reconnect operation can be retried now. /// - /// The number of times reconnect retries have already been made by the ConnectionMultiplexer while it was in the connecting state - /// Total elapsed time in milliseconds since the last reconnect retry was made + /// The number of times reconnect retries have already been made by the ConnectionMultiplexer while it was in the connecting state. + /// Total elapsed time in milliseconds since the last reconnect retry was made. public bool ShouldRetry(long currentRetryCount, int timeElapsedMillisecondsSinceLastRetry) { var exponential = (int)Math.Min(maxDeltaBackOffMilliseconds, deltaBackOffMilliseconds * Math.Pow(1.1, currentRetryCount)); int random; - r = r ?? new Random(); + r ??= new Random(); random = r.Next((int)deltaBackOffMilliseconds, exponential); return timeElapsedMillisecondsSinceLastRetry >= random; - //exponential backoff with deltaBackOff of 5000ms - //deltabackoff exponential - //5000 5500 - //5000 6050 - //5000 6655 - //5000 8053 - //5000 10718 - //5000 17261 - //5000 37001 - //5000 127738 + // exponential backoff with deltaBackOff of 5000ms + // deltabackoff exponential + // 5000 5500 + // 5000 6050 + // 5000 6655 + // 5000 8053 + // 5000 10718 + // 5000 17261 + // 5000 37001 + // 5000 127738 } } -} \ No newline at end of file +} diff --git a/src/StackExchange.Redis/ExtensionMethods.Internal.cs b/src/StackExchange.Redis/ExtensionMethods.Internal.cs new file mode 100644 index 000000000..446f6ff88 --- /dev/null +++ b/src/StackExchange.Redis/ExtensionMethods.Internal.cs @@ -0,0 +1,37 @@ +using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; + +namespace StackExchange.Redis +{ + internal static class ExtensionMethodsInternal + { + internal static bool IsNullOrEmpty([NotNullWhen(false)] this string? s) => + string.IsNullOrEmpty(s); + + internal static bool IsNullOrWhiteSpace([NotNullWhen(false)] this string? s) => + string.IsNullOrWhiteSpace(s); + +#if !NET + internal static bool TryDequeue(this Queue queue, [NotNullWhen(true)] out T? result) + { + if (queue.Count == 0) + { + result = default; + return false; + } + result = queue.Dequeue()!; + return true; + } + internal static bool TryPeek(this Queue queue, [NotNullWhen(true)] out T? result) + { + if (queue.Count == 0) + { + result = default; + return false; + } + result = queue.Peek()!; + return true; + } +#endif + } +} diff --git a/src/StackExchange.Redis/ExtensionMethods.cs b/src/StackExchange.Redis/ExtensionMethods.cs index a9b7fd14b..e5a5c4d4d 100644 --- a/src/StackExchange.Redis/ExtensionMethods.cs +++ b/src/StackExchange.Redis/ExtensionMethods.cs @@ -1,42 +1,53 @@ using System; using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; using System.IO; using System.Net.Security; using System.Runtime.CompilerServices; using System.Security.Authentication; using System.Security.Cryptography.X509Certificates; using System.Text; +using System.Threading.Tasks; using Pipelines.Sockets.Unofficial.Arenas; namespace StackExchange.Redis { /// - /// Utility methods + /// Utility methods. /// public static class ExtensionMethods { /// - /// Create a dictionary from an array of HashEntry values + /// Create a dictionary from an array of HashEntry values. /// /// The entry to convert to a dictionary. - public static Dictionary ToStringDictionary(this HashEntry[] hash) + [return: NotNullIfNotNull("hash")] + public static Dictionary? ToStringDictionary(this HashEntry[]? hash) { - if (hash == null) return null; + if (hash is null) + { + return null; + } var result = new Dictionary(hash.Length, StringComparer.Ordinal); - for(int i = 0; i < hash.Length; i++) + for (int i = 0; i < hash.Length; i++) { - result.Add(hash[i].name, hash[i].value); + result.Add(hash[i].name!, hash[i].value!); } return result; } + /// - /// Create a dictionary from an array of HashEntry values + /// Create a dictionary from an array of HashEntry values. /// /// The entry to convert to a dictionary. - public static Dictionary ToDictionary(this HashEntry[] hash) + [return: NotNullIfNotNull("hash")] + public static Dictionary? ToDictionary(this HashEntry[]? hash) { - if (hash == null) return null; + if (hash is null) + { + return null; + } var result = new Dictionary(hash.Length); for (int i = 0; i < hash.Length; i++) @@ -47,28 +58,36 @@ public static Dictionary ToDictionary(this HashEntry[] h } /// - /// Create a dictionary from an array of SortedSetEntry values + /// Create a dictionary from an array of SortedSetEntry values. /// /// The set entries to convert to a dictionary. - public static Dictionary ToStringDictionary(this SortedSetEntry[] sortedSet) + [return: NotNullIfNotNull("sortedSet")] + public static Dictionary? ToStringDictionary(this SortedSetEntry[]? sortedSet) { - if (sortedSet == null) return null; + if (sortedSet is null) + { + return null; + } var result = new Dictionary(sortedSet.Length, StringComparer.Ordinal); for (int i = 0; i < sortedSet.Length; i++) { - result.Add(sortedSet[i].element, sortedSet[i].score); + result.Add(sortedSet[i].element!, sortedSet[i].score); } return result; } /// - /// Create a dictionary from an array of SortedSetEntry values + /// Create a dictionary from an array of SortedSetEntry values. /// /// The set entries to convert to a dictionary. - public static Dictionary ToDictionary(this SortedSetEntry[] sortedSet) + [return: NotNullIfNotNull("sortedSet")] + public static Dictionary? ToDictionary(this SortedSetEntry[]? sortedSet) { - if (sortedSet == null) return null; + if (sortedSet is null) + { + return null; + } var result = new Dictionary(sortedSet.Length); for (int i = 0; i < sortedSet.Length; i++) @@ -79,28 +98,36 @@ public static Dictionary ToDictionary(this SortedSetEntry[] } /// - /// Create a dictionary from an array of key/value pairs + /// Create a dictionary from an array of key/value pairs. /// /// The pairs to convert to a dictionary. - public static Dictionary ToStringDictionary(this KeyValuePair[] pairs) + [return: NotNullIfNotNull("pairs")] + public static Dictionary? ToStringDictionary(this KeyValuePair[]? pairs) { - if (pairs == null) return null; + if (pairs is null) + { + return null; + } var result = new Dictionary(pairs.Length, StringComparer.Ordinal); for (int i = 0; i < pairs.Length; i++) { - result.Add(pairs[i].Key, pairs[i].Value); + result.Add(pairs[i].Key!, pairs[i].Value!); } return result; } /// - /// Create a dictionary from an array of key/value pairs + /// Create a dictionary from an array of key/value pairs. /// /// The pairs to convert to a dictionary. - public static Dictionary ToDictionary(this KeyValuePair[] pairs) + [return: NotNullIfNotNull("pairs")] + public static Dictionary? ToDictionary(this KeyValuePair[]? pairs) { - if (pairs == null) return null; + if (pairs is null) + { + return null; + } var result = new Dictionary(pairs.Length); for (int i = 0; i < pairs.Length; i++) @@ -111,12 +138,16 @@ public static Dictionary ToDictionary(this KeyValuePair - /// Create a dictionary from an array of string pairs + /// Create a dictionary from an array of string pairs. /// /// The pairs to convert to a dictionary. - public static Dictionary ToDictionary(this KeyValuePair[] pairs) + [return: NotNullIfNotNull("pairs")] + public static Dictionary? ToDictionary(this KeyValuePair[]? pairs) { - if (pairs == null) return null; + if (pairs is null) + { + return null; + } var result = new Dictionary(pairs.Length, StringComparer.Ordinal); for (int i = 0; i < pairs.Length; i++) @@ -129,95 +160,121 @@ public static Dictionary ToDictionary(this KeyValuePair /// Create an array of RedisValues from an array of strings. /// - /// The string array to convert to RedisValues - public static RedisValue[] ToRedisValueArray(this string[] values) + /// The string array to convert to RedisValues. + [return: NotNullIfNotNull("values")] + public static RedisValue[]? ToRedisValueArray(this string[]? values) { - if (values == null) return null; + if (values is null) + { + return null; + } + if (values.Length == 0) return Array.Empty(); return Array.ConvertAll(values, x => (RedisValue)x); } /// - /// Create an array of strings from an array of values + /// Create an array of strings from an array of values. /// /// The values to convert to an array. - public static string[] ToStringArray(this RedisValue[] values) + [return: NotNullIfNotNull("values")] + public static string?[]? ToStringArray(this RedisValue[]? values) { - if (values == null) return null; + if (values == null) + { + return null; + } + if (values.Length == 0) return Array.Empty(); - return Array.ConvertAll(values, x => (string)x); + return Array.ConvertAll(values, x => (string?)x); } - internal static void AuthenticateAsClient(this SslStream ssl, string host, SslProtocols? allowedProtocols, bool checkCertificateRevocation) + internal static Task AuthenticateAsClientAsync(this SslStream ssl, string host, SslProtocols? allowedProtocols, bool checkCertificateRevocation) { if (!allowedProtocols.HasValue) { - //Default to the sslProtocols defined by the .NET Framework - AuthenticateAsClientUsingDefaultProtocols(ssl, host); - return; + // Default to the sslProtocols defined by the .NET Framework + return ssl.AuthenticateAsClientAsync(host); } var certificateCollection = new X509CertificateCollection(); - ssl.AuthenticateAsClient(host, certificateCollection, allowedProtocols.Value, checkCertificateRevocation); - } - - private static void AuthenticateAsClientUsingDefaultProtocols(SslStream ssl, string host) - { - ssl.AuthenticateAsClient(host); + return ssl.AuthenticateAsClientAsync(host, certificateCollection, allowedProtocols.Value, checkCertificateRevocation); } /// - /// Represent a byte-Lease as a read-only Stream + /// Represent a byte-Lease as a read-only Stream. /// - /// The lease upon which to base the stream - /// If true, disposing the stream also disposes the lease - public static Stream AsStream(this Lease bytes, bool ownsLease = true) + /// The lease upon which to base the stream. + /// If true, disposing the stream also disposes the lease. + [return: NotNullIfNotNull("bytes")] + public static Stream? AsStream(this Lease? bytes, bool ownsLease = true) { - if (bytes == null) return null; // GIGO + if (bytes is null) + { + return null; // GIGO + } + var segment = bytes.ArraySegment; - if (ownsLease) return new LeaseMemoryStream(segment, bytes); - return new MemoryStream(segment.Array, segment.Offset, segment.Count, false, true); + if (ownsLease) + { + return new LeaseMemoryStream(segment, bytes); + } + return new MemoryStream(segment.Array!, segment.Offset, segment.Count, false, true); } /// - /// Decode a byte-Lease as a String, optionally specifying the encoding (UTF-8 if omitted) + /// Decode a byte-Lease as a String, optionally specifying the encoding (UTF-8 if omitted). /// - /// The bytes to decode - /// The encoding to use - public static string DecodeString(this Lease bytes, Encoding encoding = null) + /// The bytes to decode. + /// The encoding to use. + [return: NotNullIfNotNull("bytes")] + public static string? DecodeString(this Lease bytes, Encoding? encoding = null) { - if (bytes == null) return null; - if (encoding == null) encoding = Encoding.UTF8; - if (bytes.Length == 0) return ""; + if (bytes is null) + { + return null; + } + + encoding ??= Encoding.UTF8; + if (bytes.Length == 0) + { + return ""; + } var segment = bytes.ArraySegment; - return encoding.GetString(segment.Array, segment.Offset, segment.Count); + return encoding.GetString(segment.Array!, segment.Offset, segment.Count); } /// - /// Decode a byte-Lease as a String, optionally specifying the encoding (UTF-8 if omitted) + /// Decode a byte-Lease as a String, optionally specifying the encoding (UTF-8 if omitted). /// - /// The bytes to decode - /// The encoding to use - public static Lease DecodeLease(this Lease bytes, Encoding encoding = null) + /// The bytes to decode. + /// The encoding to use. + [return: NotNullIfNotNull("bytes")] + public static Lease? DecodeLease(this Lease? bytes, Encoding? encoding = null) { - if (bytes == null) return null; - if (encoding == null) encoding = Encoding.UTF8; - if (bytes.Length == 0) return Lease.Empty; + if (bytes is null) + { + return null; + } + + encoding ??= Encoding.UTF8; + if (bytes.Length == 0) + { + return Lease.Empty; + } var bytesSegment = bytes.ArraySegment; - var charCount = encoding.GetCharCount(bytesSegment.Array, bytesSegment.Offset, bytesSegment.Count); + var charCount = encoding.GetCharCount(bytesSegment.Array!, bytesSegment.Offset, bytesSegment.Count); var chars = Lease.Create(charCount, false); var charsSegment = chars.ArraySegment; - encoding.GetChars(bytesSegment.Array, bytesSegment.Offset, bytesSegment.Count, - charsSegment.Array, charsSegment.Offset); + encoding.GetChars(bytesSegment.Array!, bytesSegment.Offset, bytesSegment.Count, charsSegment.Array!, charsSegment.Offset); return chars; } private sealed class LeaseMemoryStream : MemoryStream { private readonly IDisposable _parent; - public LeaseMemoryStream(ArraySegment segment, IDisposable parent) - : base(segment.Array, segment.Offset, segment.Count, false, true) - => _parent = parent; + + public LeaseMemoryStream(ArraySegment segment, IDisposable parent) : base(segment.Array!, segment.Offset, segment.Count, false, true) => _parent = parent; protected override void Dispose(bool disposing) { @@ -235,11 +292,10 @@ protected override void Dispose(bool disposing) // Could not load file or assembly 'System.Numerics.Vectors, Version=4.1.3.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a' // or one of its dependencies.The located assembly's manifest definition does not match the assembly reference. (Exception from HRESULT: 0x80131040) // - // also; note that the nuget tools *do not* reliably (or even occasionally) produce the correct + // also; note that the NuGet tools *do not* reliably (or even occasionally) produce the correct // assembly-binding-redirect entries to fix this up, so; it would present an unreasonable support burden // otherwise. And yes, I've tried explicitly referencing System.Numerics.Vectors in the manifest to // force it... nothing. Nada. - #if VECTOR_SAFE [MethodImpl(MethodImplOptions.AggressiveInlining)] internal static int VectorSafeIndexOf(this ReadOnlySpan span, byte value) @@ -247,10 +303,11 @@ internal static int VectorSafeIndexOf(this ReadOnlySpan span, byte value) [MethodImpl(MethodImplOptions.AggressiveInlining)] internal static int VectorSafeIndexOfCRLF(this ReadOnlySpan span) - { - ReadOnlySpan CRLF = stackalloc byte[2] { (byte)'\r', (byte)'\n' }; - return span.IndexOf(CRLF); - } + => span.IndexOf(CRLF); + + // note that this is *not* actually an array; this is compiled into a .data section + // (confirmed down to net472, which is the lowest TFM that uses this branch) + private static ReadOnlySpan CRLF => new byte[] { (byte)'\r', (byte)'\n' }; #else internal static int VectorSafeIndexOf(this ReadOnlySpan span, byte value) { @@ -261,23 +318,24 @@ internal static int VectorSafeIndexOf(this ReadOnlySpan span, byte value) } return -1; } + internal static int VectorSafeIndexOfCRLF(this ReadOnlySpan span) { // yes, this has zero optimization; I'm OK with this as the fallback strategy for (int i = 1; i < span.Length; i++) { - if (span[i] == '\n' && span[i-1] == '\r') return i - 1; + if (span[i] == '\n' && span[i - 1] == '\r') return i - 1; } return -1; } #endif [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal static T[] ToArray(in this RawResult result, Projection selector) + internal static T[]? ToArray(in this RawResult result, Projection selector) => result.IsNull ? null : result.GetItems().ToArray(selector); [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal static TTo[] ToArray(in this RawResult result, Projection selector, in TState state) + internal static TTo[]? ToArray(in this RawResult result, Projection selector, in TState state) => result.IsNull ? null : result.GetItems().ToArray(selector, in state); } } diff --git a/src/StackExchange.Redis/Format.cs b/src/StackExchange.Redis/Format.cs index f9407894e..9279bb0f5 100644 --- a/src/StackExchange.Redis/Format.cs +++ b/src/StackExchange.Redis/Format.cs @@ -1,16 +1,25 @@ using System; using System.Buffers; using System.Buffers.Text; +using System.Diagnostics.CodeAnalysis; using System.Globalization; using System.Net; -using System.Net.Sockets; -using System.Runtime.InteropServices; +using System.Runtime.CompilerServices; using System.Text; +#if UNIX_SOCKET +using System.Net.Sockets; +#endif + namespace StackExchange.Redis { internal static class Format { +#if NET + public static int ParseInt32(ReadOnlySpan s) => int.Parse(s, NumberStyles.Integer, NumberFormatInfo.InvariantInfo); + public static bool TryParseInt32(ReadOnlySpan s, out int value) => int.TryParse(s, NumberStyles.Integer, NumberFormatInfo.InvariantInfo, out value); +#endif + public static int ParseInt32(string s) => int.Parse(s, NumberStyles.Integer, NumberFormatInfo.InvariantInfo); public static long ParseInt64(string s) => long.Parse(s, NumberStyles.Integer, NumberFormatInfo.InvariantInfo); @@ -35,21 +44,24 @@ public static bool TryParseBoolean(string s, out bool value) return false; } - public static bool TryParseInt32(string s, out int value) - { - return int.TryParse(s, NumberStyles.Integer, NumberFormatInfo.InvariantInfo, out value); - } + public static bool TryParseInt32(string s, out int value) => + int.TryParse(s, NumberStyles.Integer, NumberFormatInfo.InvariantInfo, out value); internal static EndPoint ParseEndPoint(string host, int port) { - if (IPAddress.TryParse(host, out IPAddress ip)) return new IPEndPoint(ip, port); + if (IPAddress.TryParse(host, out IPAddress? ip)) return new IPEndPoint(ip, port); return new DnsEndPoint(host, port); } - internal static EndPoint TryParseEndPoint(string host, string port) + internal static bool TryParseEndPoint(string host, string? port, [NotNullWhen(true)] out EndPoint? endpoint) { - if (string.IsNullOrEmpty(host) || string.IsNullOrEmpty(port)) return null; - return TryParseInt32(port, out int i) ? ParseEndPoint(host, i) : null; + if (!host.IsNullOrEmpty() && !port.IsNullOrEmpty() && TryParseInt32(port, out int i)) + { + endpoint = ParseEndPoint(host, i); + return true; + } + endpoint = null; + return false; } internal static string ToString(long value) => value.ToString(NumberFormatInfo.InvariantInfo); @@ -66,18 +78,19 @@ internal static string ToString(double value) return value.ToString("G17", NumberFormatInfo.InvariantInfo); } - internal static string ToString(object value) + [return: NotNullIfNotNull("value")] + internal static string? ToString(object? value) => value switch { - if (value == null) return ""; - if (value is long l) return ToString(l); - if (value is int i) return ToString(i); - if (value is float f) return ToString(f); - if (value is double d) return ToString(d); - if (value is EndPoint e) return ToString(e); - return Convert.ToString(value, CultureInfo.InvariantCulture); - } + null => "", + long l => ToString(l), + int i => ToString(i), + float f => ToString(f), + double d => ToString(d), + EndPoint e => ToString(e), + _ => Convert.ToString(value, CultureInfo.InvariantCulture), + }; - internal static string ToString(EndPoint endpoint) + internal static string ToString(EndPoint? endpoint) { switch (endpoint) { @@ -85,7 +98,20 @@ internal static string ToString(EndPoint endpoint) if (dns.Port == 0) return dns.Host; return dns.Host + ":" + Format.ToString(dns.Port); case IPEndPoint ip: - if (ip.Port == 0) return ip.Address.ToString(); + var addr = ip.Address.ToString(); + + if (ip.Port == 0) + { + // no port specified; use naked IP + return addr; + } + + if (addr.IndexOf(':') >= 0) + { + // ipv6 with port; use "[IP]:port" notation + return "[" + addr + "]:" + Format.ToString(ip.Port); + } + // ipv4 with port; use "IP:port" notation return ip.Address + ":" + Format.ToString(ip.Port); #if UNIX_SOCKET case UnixDomainSocketEndPoint uds: @@ -96,22 +122,17 @@ internal static string ToString(EndPoint endpoint) } } - internal static string ToStringHostOnly(EndPoint endpoint) - { - if (endpoint is DnsEndPoint dns) + internal static string ToStringHostOnly(EndPoint endpoint) => + endpoint switch { - return dns.Host; - } - if (endpoint is IPEndPoint ip) - { - return ip.Address.ToString(); - } - return ""; - } + DnsEndPoint dns => dns.Host, + IPEndPoint ip => ip.Address.ToString(), + _ => "", + }; - internal static bool TryGetHostPort(EndPoint endpoint, out string host, out int port) + internal static bool TryGetHostPort(EndPoint? endpoint, [NotNullWhen(true)] out string? host, [NotNullWhen(true)] out int? port) { - if (endpoint != null) + if (endpoint is not null) { if (endpoint is IPEndPoint ip) { @@ -127,48 +148,76 @@ internal static bool TryGetHostPort(EndPoint endpoint, out string host, out int } } host = null; - port = 0; + port = null; return false; } - internal static bool TryParseDouble(string s, out double value) + internal static bool TryParseDouble(string? s, out double value) { - if (string.IsNullOrEmpty(s)) + if (s is null) { value = 0; return false; } - if (s.Length == 1 && s[0] >= '0' && s[0] <= '9') + switch (s.Length) { - value = (int)(s[0] - '0'); - return true; - } - // need to handle these - if (string.Equals("+inf", s, StringComparison.OrdinalIgnoreCase) || string.Equals("inf", s, StringComparison.OrdinalIgnoreCase)) - { - value = double.PositiveInfinity; - return true; + case 0: + value = 0; + return false; + // single-digits + case 1 when s[0] >= '0' && s[0] <= '9': + value = s[0] - '0'; + return true; + // RESP3 spec demands inf/nan handling + case 3 when TryParseInfNaN(s.AsSpan(), true, out value): + case 4 when s[0] == '+' && TryParseInfNaN(s.AsSpan(1), true, out value): + case 4 when s[0] == '-' && TryParseInfNaN(s.AsSpan(1), false, out value): + return true; } - if (string.Equals("-inf", s, StringComparison.OrdinalIgnoreCase)) + return double.TryParse(s, NumberStyles.Any, NumberFormatInfo.InvariantInfo, out value); + + static bool TryParseInfNaN(ReadOnlySpan s, bool positive, out double value) { - value = double.NegativeInfinity; - return true; + switch (s[0]) + { + case 'i': + case 'I': + if (s[1] is 'n' or 'N' && s[2] is 'f' or 'F') + { + value = positive ? double.PositiveInfinity : double.NegativeInfinity; + return true; + } + break; + case 'n': + case 'N': + if (s[1] is 'a' or 'A' && s[2] is 'n' or 'N') + { + value = double.NaN; + return true; + } + break; + } +#if NET + Unsafe.SkipInit(out value); +#else + value = 0; +#endif + return false; } - return double.TryParse(s, NumberStyles.Any, NumberFormatInfo.InvariantInfo, out value); } - internal static bool TryParseUInt64(string s, out ulong value) - => ulong.TryParse(s, NumberStyles.Integer, NumberFormatInfo.InvariantInfo, out value); + internal static bool TryParseUInt64(string s, out ulong value) => + ulong.TryParse(s, NumberStyles.Integer, NumberFormatInfo.InvariantInfo, out value); - internal static bool TryParseUInt64(ReadOnlySpan s, out ulong value) - => Utf8Parser.TryParse(s, out value, out int bytes, standardFormat: 'D') & bytes == s.Length; + internal static bool TryParseUInt64(ReadOnlySpan s, out ulong value) => + Utf8Parser.TryParse(s, out value, out int bytes, standardFormat: 'D') & bytes == s.Length; - internal static bool TryParseInt64(ReadOnlySpan s, out long value) - => Utf8Parser.TryParse(s, out value, out int bytes, standardFormat: 'D') & bytes == s.Length; + internal static bool TryParseInt64(ReadOnlySpan s, out long value) => + Utf8Parser.TryParse(s, out value, out int bytes, standardFormat: 'D') & bytes == s.Length; internal static bool CouldBeInteger(string s) { - if (string.IsNullOrEmpty(s) || s.Length > PhysicalConnection.MaxInt64TextLen) return false; + if (string.IsNullOrEmpty(s) || s.Length > Format.MaxInt64TextLen) return false; bool isSigned = s[0] == '-'; for (int i = isSigned ? 1 : 0; i < s.Length; i++) { @@ -179,7 +228,7 @@ internal static bool CouldBeInteger(string s) } internal static bool CouldBeInteger(ReadOnlySpan s) { - if (s.IsEmpty | s.Length > PhysicalConnection.MaxInt64TextLen) return false; + if (s.IsEmpty | s.Length > Format.MaxInt64TextLen) return false; bool isSigned = s[0] == '-'; for (int i = isSigned ? 1 : 0; i < s.Length; i++) { @@ -189,61 +238,90 @@ internal static bool CouldBeInteger(ReadOnlySpan s) return true; } - internal static bool TryParseInt64(string s, out long value) - => long.TryParse(s, NumberStyles.Integer, NumberFormatInfo.InvariantInfo, out value); + internal static bool TryParseInt64(string s, out long value) => + long.TryParse(s, NumberStyles.Integer, NumberFormatInfo.InvariantInfo, out value); internal static bool TryParseDouble(ReadOnlySpan s, out double value) { - if (s.IsEmpty) - { - value = 0; - return false; - } - if (s.Length == 1 && s[0] >= '0' && s[0] <= '9') - { - value = (int)(s[0] - '0'); - return true; - } - // need to handle these - if (CaseInsensitiveASCIIEqual("+inf", s) || CaseInsensitiveASCIIEqual("inf", s)) + switch (s.Length) { - value = double.PositiveInfinity; - return true; - } - if (CaseInsensitiveASCIIEqual("-inf", s)) - { - value = double.NegativeInfinity; - return true; + case 0: + value = 0; + return false; + // single-digits + case 1 when s[0] >= '0' && s[0] <= '9': + value = s[0] - '0'; + return true; + // RESP3 spec demands inf/nan handling + case 3 when TryParseInfNaN(s, true, out value): + case 4 when s[0] == '+' && TryParseInfNaN(s.Slice(1), true, out value): + case 4 when s[0] == '-' && TryParseInfNaN(s.Slice(1), false, out value): + return true; } return Utf8Parser.TryParse(s, out value, out int bytes) & bytes == s.Length; - } - private static bool CaseInsensitiveASCIIEqual(string xLowerCase, ReadOnlySpan y) - { - if (y.Length != xLowerCase.Length) return false; - for (int i = 0; i < y.Length; i++) + static bool TryParseInfNaN(ReadOnlySpan s, bool positive, out double value) { - if (char.ToLower((char)y[i]) != xLowerCase[i]) return false; + switch (s[0]) + { + case (byte)'i': + case (byte)'I': + if (s[1] is (byte)'n' or (byte)'N' && s[2] is (byte)'f' or (byte)'F') + { + value = positive ? double.PositiveInfinity : double.NegativeInfinity; + return true; + } + break; + case (byte)'n': + case (byte)'N': + if (s[1] is (byte)'a' or (byte)'A' && s[2] is (byte)'n' or (byte)'N') + { + value = double.NaN; + return true; + } + break; + } +#if NET + Unsafe.SkipInit(out value); +#else + value = 0; +#endif + return false; } - return true; } - internal static EndPoint TryParseEndPoint(string addressWithPort) + /// + /// + /// Adapted from IPEndPointParser in Microsoft.AspNetCore + /// Link: . + /// + /// + /// Copyright (c) .NET Foundation. All rights reserved. + /// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information. + /// + /// + /// If Unix sockets are attempted but not supported. + internal static bool TryParseEndPoint(string? addressWithPort, [NotNullWhen(true)] out EndPoint? endpoint) { - // Adapted from IPEndPointParser in Microsoft.AspNetCore - // Link: https://github.com/aspnet/BasicMiddleware/blob/f320511b63da35571e890d53f3906c7761cd00a1/src/Microsoft.AspNetCore.HttpOverrides/Internal/IPEndPointParser.cs#L8 - // Copyright (c) .NET Foundation. All rights reserved. - // Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information. string addressPart; - string portPart = null; - if (string.IsNullOrEmpty(addressWithPort)) return null; + string? portPart = null; + if (addressWithPort.IsNullOrEmpty()) + { + endpoint = null; + return false; + } - if (addressWithPort[0]=='!') + if (addressWithPort[0] == '!') { - if (addressWithPort.Length == 1) return null; + if (addressWithPort.Length == 1) + { + endpoint = null; + return false; + } #if UNIX_SOCKET - return new UnixDomainSocketEndPoint(addressWithPort.Substring(1)); + endpoint = new UnixDomainSocketEndPoint(addressWithPort.Substring(1)); + return true; #else throw new PlatformNotSupportedException("Unix domain sockets require .NET Core 3 or above"); #endif @@ -289,22 +367,28 @@ internal static EndPoint TryParseEndPoint(string addressWithPort) int? port = 0; if (portPart != null) { - if (Format.TryParseInt32(portPart, out var portVal)) + if (TryParseInt32(portPart, out var portVal)) { port = portVal; } else { // Invalid port, return - return null; + endpoint = null; + return false; } } - if (IPAddress.TryParse(addressPart, out IPAddress address)) + if (IPAddress.TryParse(addressPart, out IPAddress? address)) { - return new IPEndPoint(address, port ?? 0); + endpoint = new IPEndPoint(address, port ?? 0); + return true; + } + else + { + endpoint = new DnsEndPoint(addressPart, port ?? 0); + return true; } - return new DnsEndPoint(addressPart, port ?? 0); } internal static string GetString(ReadOnlySequence buffer) @@ -318,13 +402,212 @@ internal static string GetString(ReadOnlySequence buffer) ArrayPool.Shared.Return(arr); return s; } + internal static unsafe string GetString(ReadOnlySpan span) { if (span.IsEmpty) return ""; +#if NET + return Encoding.UTF8.GetString(span); +#else fixed (byte* ptr = span) { return Encoding.UTF8.GetString(ptr, span.Length); } +#endif + } + + [DoesNotReturn] + private static void ThrowFormatFailed() => throw new InvalidOperationException("TryFormat failed"); + + internal const int + MaxInt32TextLen = 11, // -2,147,483,648 (not including the commas) + MaxInt64TextLen = 20, // -9,223,372,036,854,775,808 (not including the commas), + MaxDoubleTextLen = 40; // we use G17, allow for sign/E/and allow plenty of panic room + + internal static int MeasureDouble(double value) + { + if (double.IsInfinity(value)) return 4; // +inf / -inf + +#if NET8_0_OR_GREATER // can use IUtf8Formattable + Span buffer = stackalloc byte[MaxDoubleTextLen]; + if (value.TryFormat(buffer, out int len, "G17", NumberFormatInfo.InvariantInfo)) + { + return len; + } +#endif + // fallback (TFM or unexpected size) + var s = value.ToString("G17", NumberFormatInfo.InvariantInfo); // this looks inefficient, but is how Utf8Formatter works too, just: more direct + return s.Length; + } + + internal static int FormatDouble(double value, Span destination) + { + if (double.IsInfinity(value)) + { + if (!(double.IsPositiveInfinity(value) ? "+inf"u8 : "-inf"u8).TryCopyTo(destination)) ThrowFormatFailed(); + return 4; + } + +#if NET8_0_OR_GREATER // can use IUtf8Formattable + if (!value.TryFormat(destination, out int len, "G17", NumberFormatInfo.InvariantInfo)) + { + ThrowFormatFailed(); + } + + return len; +#else + var s = value.ToString("G17", NumberFormatInfo.InvariantInfo); // this looks inefficient, but is how Utf8Formatter works too, just: more direct + if (s.Length > destination.Length) ThrowFormatFailed(); + + var chars = s.AsSpan(); + for (int i = 0; i < chars.Length; i++) + { + destination[i] = (byte)chars[i]; + } + return chars.Length; +#endif + } + + internal static int FormatDouble(double value, Span destination) + { + string s; + if (double.IsInfinity(value)) + { + s = double.IsPositiveInfinity(value) ? "+inf" : "-inf"; + if (!s.AsSpan().TryCopyTo(destination)) ThrowFormatFailed(); + return 4; + } + +#if NET + if (!value.TryFormat(destination, out int len, "G17", NumberFormatInfo.InvariantInfo)) + { + ThrowFormatFailed(); + } + + return len; +#else + s = value.ToString("G17", NumberFormatInfo.InvariantInfo); // this looks inefficient, but is how Utf8Formatter works too, just: more direct + if (s.Length > destination.Length) ThrowFormatFailed(); + s.AsSpan().CopyTo(destination); + return s.Length; +#endif + } + + internal static int MeasureInt64(long value) + { + Span valueSpan = stackalloc byte[MaxInt64TextLen]; + return FormatInt64(value, valueSpan); + } + + internal static int FormatInt64(long value, Span destination) + { + if (!Utf8Formatter.TryFormat(value, destination, out var len)) + ThrowFormatFailed(); + return len; + } + + internal static int FormatInt64(long value, Span destination) + { +#if NET + if (!value.TryFormat(destination, out var len)) + ThrowFormatFailed(); + return len; +#else + Span buffer = stackalloc byte[MaxInt64TextLen]; + var bytes = FormatInt64(value, buffer); + return Encoding.UTF8.GetChars(buffer.Slice(0, bytes), destination); +#endif + } + + internal static int MeasureUInt64(ulong value) + { + Span valueSpan = stackalloc byte[MaxInt64TextLen]; + return FormatUInt64(value, valueSpan); + } + + internal static int FormatUInt64(ulong value, Span destination) + { +#if NET + if (!value.TryFormat(destination, out var len)) + ThrowFormatFailed(); + return len; +#else + Span buffer = stackalloc byte[MaxInt64TextLen]; + var bytes = FormatUInt64(value, buffer); + return Encoding.UTF8.GetChars(buffer.Slice(0, bytes), destination); +#endif + } + + internal static int FormatUInt64(ulong value, Span destination) + { + if (!Utf8Formatter.TryFormat(value, destination, out var len)) + ThrowFormatFailed(); + return len; + } + + internal static int FormatInt32(int value, Span destination) + { + if (!Utf8Formatter.TryFormat(value, destination, out var len)) + ThrowFormatFailed(); + return len; + } + + internal static int FormatInt32(int value, Span destination) + { +#if NET + if (!value.TryFormat(destination, out var len)) + ThrowFormatFailed(); + return len; +#else + Span buffer = stackalloc byte[MaxInt32TextLen]; + var bytes = FormatInt32(value, buffer); + return Encoding.UTF8.GetChars(buffer.Slice(0, bytes), destination); +#endif + } + + internal static bool TryParseVersion(ReadOnlySpan input, [NotNullWhen(true)] out Version? version) + { +#if NET + if (Version.TryParse(input, out version)) return true; + // allow major-only (Version doesn't do this, because... reasons?) + if (TryParseInt32(input, out int i32)) + { + version = new(i32, 0); + return true; + } + version = null; + return false; +#else + if (input.IsEmpty) + { + version = null; + return false; + } + unsafe + { + fixed (char* ptr = input) + { + string s = new(ptr, 0, input.Length); + return TryParseVersion(s, out version); + } + } +#endif + } + + internal static bool TryParseVersion(string? input, [NotNullWhen(true)] out Version? version) + { + if (input is not null) + { + if (Version.TryParse(input, out version)) return true; + // allow major-only (Version doesn't do this, because... reasons?) + if (TryParseInt32(input, out int i32)) + { + version = new(i32, 0); + return true; + } + } + version = null; + return false; } } } diff --git a/src/StackExchange.Redis/FrameworkShims.cs b/src/StackExchange.Redis/FrameworkShims.cs new file mode 100644 index 000000000..ce954406d --- /dev/null +++ b/src/StackExchange.Redis/FrameworkShims.cs @@ -0,0 +1,78 @@ +#pragma warning disable SA1403 // single namespace + +#if NET +// context: https://github.com/StackExchange/StackExchange.Redis/issues/2619 +[assembly: System.Runtime.CompilerServices.TypeForwardedTo(typeof(System.Runtime.CompilerServices.IsExternalInit))] +#else +// To support { get; init; } properties +using System.ComponentModel; +using System.Text; + +namespace System.Runtime.CompilerServices +{ + [EditorBrowsable(EditorBrowsableState.Never)] + internal static class IsExternalInit { } +} +#endif + +#if !NET10_0_OR_GREATER +namespace System.Runtime.CompilerServices +{ + // see https://learn.microsoft.com/dotnet/api/system.runtime.compilerservices.overloadresolutionpriorityattribute + [AttributeUsage(AttributeTargets.Constructor | AttributeTargets.Method | AttributeTargets.Property, Inherited = false)] + internal sealed class OverloadResolutionPriorityAttribute(int priority) : Attribute + { + public int Priority => priority; + } +} +#endif + +#if !NET + +namespace System.Text +{ + internal static class EncodingExtensions + { + public static unsafe int GetBytes(this Encoding encoding, ReadOnlySpan source, Span destination) + { + fixed (byte* bPtr = destination) + { + fixed (char* cPtr = source) + { + return encoding.GetBytes(cPtr, source.Length, bPtr, destination.Length); + } + } + } + + public static unsafe int GetChars(this Encoding encoding, ReadOnlySpan source, Span destination) + { + fixed (byte* bPtr = source) + { + fixed (char* cPtr = destination) + { + return encoding.GetChars(bPtr, source.Length, cPtr, destination.Length); + } + } + } + + public static unsafe int GetCharCount(this Encoding encoding, ReadOnlySpan source) + { + fixed (byte* bPtr = source) + { + return encoding.GetCharCount(bPtr, source.Length); + } + } + + public static unsafe string GetString(this Encoding encoding, ReadOnlySpan source) + { + fixed (byte* bPtr = source) + { + return encoding.GetString(bPtr, source.Length); + } + } + } +} +#endif + + +#pragma warning restore SA1403 diff --git a/src/StackExchange.Redis/GeoEntry.cs b/src/StackExchange.Redis/GeoEntry.cs deleted file mode 100644 index bf97404d2..000000000 --- a/src/StackExchange.Redis/GeoEntry.cs +++ /dev/null @@ -1,246 +0,0 @@ -using System; - -namespace StackExchange.Redis -{ - /// - /// GeoRadius command options. - /// - [Flags] - public enum GeoRadiusOptions - { - /// - /// No Options - /// - None = 0, - /// - /// Redis will return the coordinates of any results. - /// - WithCoordinates = 1, - /// - /// Redis will return the distance from center for all results. - /// - WithDistance = 2, - /// - /// Redis will return the geo hash value as an integer. (This is the score in the sorted set) - /// - WithGeoHash = 4, - /// - /// Populates the commonly used values from the entry (the integer hash is not returned as it is not commonly useful) - /// - Default = WithCoordinates | GeoRadiusOptions.WithDistance - } - - /// - /// The result of a GeoRadius command. - /// - public readonly struct GeoRadiusResult - { - /// - /// Indicate the member being represented - /// - public override string ToString() => Member.ToString(); - - /// - /// The matched member. - /// - public RedisValue Member { get; } - - /// - /// The distance of the matched member from the center of the geo radius command. - /// - public double? Distance { get; } - - /// - /// The hash value of the matched member as an integer. (The key in the sorted set) - /// - /// Note that this is not the same as the hash returned from GeoHash - public long? Hash { get; } - - /// - /// The coordinates of the matched member. - /// - public GeoPosition? Position { get; } - - /// - /// Returns a new GeoRadiusResult - /// - /// The value from the result. - /// Tthe distance from the result. - /// The hash of the result. - /// The geo position of the result. - public GeoRadiusResult(in RedisValue member, double? distance, long? hash, GeoPosition? position) - { - Member = member; - Distance = distance; - Hash = hash; - Position = position; - } - } - - /// - /// Describes the longitude and latitude of a GeoEntry - /// - public readonly struct GeoPosition : IEquatable - { - internal static string GetRedisUnit(GeoUnit unit) - { - switch (unit) - { - case GeoUnit.Meters: return "m"; - case GeoUnit.Kilometers: return "km"; - case GeoUnit.Miles: return "mi"; - case GeoUnit.Feet: return "ft"; - default: - throw new ArgumentOutOfRangeException(nameof(unit)); - } - } - - /// - /// The Latitude of the GeoPosition - /// - public double Latitude { get; } - - /// - /// The Logitude of the GeoPosition - /// - public double Longitude { get; } - - /// - /// Creates a new GeoPosition - /// - /// - /// - public GeoPosition(double longitude, double latitude) - { - Longitude = longitude; - Latitude = latitude; - } - - /// - /// See Object.ToString() - /// - public override string ToString() => string.Format("{0} {1}", Longitude, Latitude); - - /// - /// See Object.GetHashCode() - /// Diagonals not an issue in the case of lat/long - /// - /// - /// Diagonals are not an issue in the case of lat/long. - /// - public override int GetHashCode() => Longitude.GetHashCode() ^ Latitude.GetHashCode(); - - /// - /// Compares two values for equality - /// - /// The to compare to. - public override bool Equals(object obj) => obj is GeoPosition gpObj && Equals(gpObj); - - /// - /// Compares two values for equality - /// - /// The to compare to. -#pragma warning disable RCS1231 // Make parameter ref read-only. - public API - public bool Equals(GeoPosition other) => this == other; -#pragma warning restore RCS1231 // Make parameter ref read-only. - - /// - /// Compares two values for equality - /// - /// The first position to compare. - /// The second position to compare. -#pragma warning disable RCS1231 // Make parameter ref read-only. - public API - public static bool operator ==(GeoPosition x, GeoPosition y) => x.Longitude == y.Longitude && x.Latitude == y.Latitude; -#pragma warning restore RCS1231 // Make parameter ref read-only. - - /// - /// Compares two values for non-equality - /// - /// The first position to compare. - /// The second position to compare. -#pragma warning disable RCS1231 // Make parameter ref read-only. - public API - public static bool operator !=(GeoPosition x, GeoPosition y) => x.Longitude != y.Longitude || x.Latitude != y.Latitude; -#pragma warning restore RCS1231 // Make parameter ref read-only. - } - - /// - /// Describes a GeoEntry element with the corresponding value - /// GeoEntries are stored in redis as SortedSetEntries - /// - public readonly struct GeoEntry : IEquatable - { - /// - /// The name of the geo entry - /// - public RedisValue Member { get; } - - /// - /// Describes the longitude and latitude of a GeoEntry - /// - public GeoPosition Position { get; } - - /// - /// Initializes a GeoEntry value - /// - /// The longitude position to use. - /// The latitude position to use. - /// The value to store for this position. -#pragma warning disable RCS1231 // Make parameter ref read-only. - public API - public GeoEntry(double longitude, double latitude, RedisValue member) -#pragma warning restore RCS1231 // Make parameter ref read-only. - { - Member = member; - Position = new GeoPosition(longitude, latitude); - } - - /// - /// The longitude of the geo entry - /// - public double Longitude => Position.Longitude; - - /// - /// The latitude of the geo entry - /// - public double Latitude => Position.Latitude; - - /// - /// See Object.ToString() - /// - public override string ToString() => $"({Longitude},{Latitude})={Member}"; - - /// - /// See Object.GetHashCode() - /// - public override int GetHashCode() => Position.GetHashCode() ^ Member.GetHashCode(); - - /// - /// Compares two values for equality - /// - /// The to compare to. - public override bool Equals(object obj) => obj is GeoEntry geObj && Equals(geObj); - - /// - /// Compares two values for equality - /// - /// The to compare to. - public bool Equals(GeoEntry other) => this == other; - - /// - /// Compares two values for equality - /// - /// The first entry to compare. - /// The second entry to compare. -#pragma warning disable RCS1231 // Make parameter ref read-only. - public API - public static bool operator ==(GeoEntry x, GeoEntry y) => x.Position == y.Position && x.Member == y.Member; -#pragma warning restore RCS1231 // Make parameter ref read-only. - - /// - /// Compares two values for non-equality - /// - /// The first entry to compare. - /// The second entry to compare. -#pragma warning disable RCS1231 // Make parameter ref read-only. - public API - public static bool operator !=(GeoEntry x, GeoEntry y) => x.Position != y.Position || x.Member != y.Member; -#pragma warning restore RCS1231 // Make parameter ref read-only. - } -} diff --git a/src/StackExchange.Redis/GlobalSuppressions.cs b/src/StackExchange.Redis/GlobalSuppressions.cs new file mode 100644 index 000000000..84d04d110 --- /dev/null +++ b/src/StackExchange.Redis/GlobalSuppressions.cs @@ -0,0 +1,21 @@ +// This file is used by Code Analysis to maintain SuppressMessage +// attributes that are applied to this project. +// Project-level suppressions either have no target or are given +// a specific target and scoped to a namespace, type, member, etc. + +using System.Diagnostics.CodeAnalysis; + +[assembly: SuppressMessage("Style", "IDE0066:Convert switch statement to expression", Justification = "Pending", Scope = "member", Target = "~P:StackExchange.Redis.Message.IsAdmin")] +[assembly: SuppressMessage("Style", "IDE0066:Convert switch statement to expression", Justification = "Pending", Scope = "member", Target = "~M:StackExchange.Redis.ServerEndPoint.GetBridge(StackExchange.Redis.RedisCommand,System.Boolean)~StackExchange.Redis.PhysicalBridge")] +[assembly: SuppressMessage("Style", "IDE0066:Convert switch statement to expression", Justification = "Pending", Scope = "member", Target = "~M:StackExchange.Redis.RedisValue.op_Equality(StackExchange.Redis.RedisValue,StackExchange.Redis.RedisValue)~System.Boolean")] +[assembly: SuppressMessage("Style", "IDE0075:Simplify conditional expression", Justification = "Pending", Scope = "member", Target = "~M:StackExchange.Redis.RedisSubscriber.Unsubscribe(StackExchange.Redis.RedisChannel@,System.Action{StackExchange.Redis.RedisChannel,StackExchange.Redis.RedisValue},StackExchange.Redis.ChannelMessageQueue,StackExchange.Redis.CommandFlags)~System.Boolean")] +[assembly: SuppressMessage("Roslynator", "RCS1104:Simplify conditional expression.", Justification = "Pending", Scope = "member", Target = "~M:StackExchange.Redis.RedisSubscriber.Unsubscribe(StackExchange.Redis.RedisChannel@,System.Action{StackExchange.Redis.RedisChannel,StackExchange.Redis.RedisValue},StackExchange.Redis.ChannelMessageQueue,StackExchange.Redis.CommandFlags)~System.Boolean")] +[assembly: SuppressMessage("Style", "IDE0066:Convert switch statement to expression", Justification = "Pending", Scope = "member", Target = "~M:StackExchange.Redis.Message.IsPrimaryOnly(StackExchange.Redis.RedisCommand)~System.Boolean")] +[assembly: SuppressMessage("Style", "IDE0066:Convert switch statement to expression", Justification = "Pending", Scope = "member", Target = "~M:StackExchange.Redis.Message.RequiresDatabase(StackExchange.Redis.RedisCommand)~System.Boolean")] +[assembly: SuppressMessage("Style", "IDE0180:Use tuple to swap values", Justification = "Pending", Scope = "member", Target = "~M:StackExchange.Redis.RedisDatabase.ReverseLimits(StackExchange.Redis.Order,StackExchange.Redis.Exclude@,StackExchange.Redis.RedisValue@,StackExchange.Redis.RedisValue@)")] +[assembly: SuppressMessage("Style", "IDE0180:Use tuple to swap values", Justification = "Pending", Scope = "member", Target = "~M:StackExchange.Redis.RedisDatabase.GetSortedSetRangeByScoreMessage(StackExchange.Redis.RedisKey,System.Double,System.Double,StackExchange.Redis.Exclude,StackExchange.Redis.Order,System.Int64,System.Int64,StackExchange.Redis.CommandFlags,System.Boolean)~StackExchange.Redis.Message")] +[assembly: SuppressMessage("Reliability", "CA2012:Use ValueTasks correctly", Justification = "Pending", Scope = "member", Target = "~M:StackExchange.Redis.PhysicalConnection.FlushSync(System.Boolean,System.Int32)~StackExchange.Redis.WriteResult")] +[assembly: SuppressMessage("Usage", "CA2219:Do not raise exceptions in finally clauses", Justification = "Pending", Scope = "member", Target = "~M:StackExchange.Redis.PhysicalBridge.ProcessBacklogAsync~System.Threading.Tasks.Task")] +[assembly: SuppressMessage("Usage", "CA2249:Consider using 'string.Contains' instead of 'string.IndexOf'", Justification = "Pending", Scope = "member", Target = "~M:StackExchange.Redis.ClientInfo.AddFlag(StackExchange.Redis.ClientFlags@,System.String,StackExchange.Redis.ClientFlags,System.Char)")] +[assembly: SuppressMessage("Style", "IDE0070:Use 'System.HashCode'", Justification = "Pending", Scope = "member", Target = "~M:StackExchange.Redis.CommandBytes.GetHashCode~System.Int32")] +[assembly: SuppressMessage("Roslynator", "RCS1085:Use auto-implemented property.", Justification = "Pending", Scope = "member", Target = "~P:StackExchange.Redis.RedisValue.OverlappedValueInt64")] diff --git a/src/StackExchange.Redis/HashEntry.cs b/src/StackExchange.Redis/HashEntry.cs deleted file mode 100644 index f3ad4bc3d..000000000 --- a/src/StackExchange.Redis/HashEntry.cs +++ /dev/null @@ -1,92 +0,0 @@ -using System; -using System.Collections.Generic; -using System.ComponentModel; - -namespace StackExchange.Redis -{ - /// - /// Describes a hash-field (a name/value pair) - /// - public readonly struct HashEntry : IEquatable - { - internal readonly RedisValue name, value; - - /// - /// Initializes a value. - /// - /// The name for this hash entry. - /// The value for this hash entry. - public HashEntry(RedisValue name, RedisValue value) - { - this.name = name; - this.value = value; - } - - /// - /// The name of the hash field - /// - public RedisValue Name => name; - - /// - /// The value of the hash field - /// - public RedisValue Value => value; - - /// - /// The name of the hash field - /// - [Browsable(false)] - [EditorBrowsable(EditorBrowsableState.Never), Obsolete("Please use Name", false)] - public RedisValue Key { get { return name; } } - - /// - /// Converts to a key/value pair - /// - /// The to create a from. - public static implicit operator KeyValuePair(HashEntry value) => - new KeyValuePair(value.name, value.value); - - /// - /// Converts from a key/value pair - /// - /// The to get a from. - public static implicit operator HashEntry(KeyValuePair value) => - new HashEntry(value.Key, value.Value); - - /// - /// See Object.ToString() - /// - public override string ToString() => name + ": " + value; - - /// - /// See Object.GetHashCode() - /// - public override int GetHashCode() => name.GetHashCode() ^ value.GetHashCode(); - - /// - /// Compares two values for equality. - /// - /// The to compare to. - public override bool Equals(object obj) => obj is HashEntry heObj && Equals(heObj); - - /// - /// Compares two values for equality. - /// - /// The to compare to. - public bool Equals(HashEntry other) => name == other.name && value == other.value; - - /// - /// Compares two values for equality - /// - /// The first to compare. - /// The second to compare. - public static bool operator ==(HashEntry x, HashEntry y) => x.name == y.name && x.value == y.value; - - /// - /// Compares two values for non-equality - /// - /// The first to compare. - /// The second to compare. - public static bool operator !=(HashEntry x, HashEntry y) => x.name != y.name || x.value != y.value; - } -} diff --git a/src/StackExchange.Redis/HashSlotMovedEventArgs.cs b/src/StackExchange.Redis/HashSlotMovedEventArgs.cs index 14b30a6cb..876088e5c 100644 --- a/src/StackExchange.Redis/HashSlotMovedEventArgs.cs +++ b/src/StackExchange.Redis/HashSlotMovedEventArgs.cs @@ -5,30 +5,29 @@ namespace StackExchange.Redis { /// - /// Contains information about individual hash-slot relocations + /// Contains information about individual hash-slot relocations. /// public class HashSlotMovedEventArgs : EventArgs, ICompletable { private readonly object sender; - private readonly EventHandler handler; + private readonly EventHandler? handler; /// - /// The hash-slot that was relocated + /// The hash-slot that was relocated. /// public int HashSlot { get; } /// - /// The old endpoint for this hash-slot (if known) + /// The old endpoint for this hash-slot (if known). /// - public EndPoint OldEndPoint { get; } + public EndPoint? OldEndPoint { get; } /// - /// The new endpoint for this hash-slot (if known) + /// The new endpoint for this hash-slot (if known). /// public EndPoint NewEndPoint { get; } - internal HashSlotMovedEventArgs(EventHandler handler, object sender, - int hashSlot, EndPoint old, EndPoint @new) + internal HashSlotMovedEventArgs(EventHandler? handler, object sender, int hashSlot, EndPoint? old, EndPoint @new) { this.handler = handler; this.sender = sender; @@ -45,15 +44,12 @@ internal HashSlotMovedEventArgs(EventHandler handler, ob /// Old endpoint. /// New endpoint. public HashSlotMovedEventArgs(object sender, int hashSlot, EndPoint old, EndPoint @new) - : this (null, sender, hashSlot, old, @new) + : this(null, sender, hashSlot, old, @new) { } bool ICompletable.TryComplete(bool isAsync) => ConnectionMultiplexer.TryCompleteHandler(handler, sender, this, isAsync); - void ICompletable.AppendStormLog(StringBuilder sb) - { - sb.Append("event, slot-moved: ").Append(HashSlot); - } + void ICompletable.AppendStormLog(StringBuilder sb) => sb.Append("event, slot-moved: ").Append(HashSlot); } } diff --git a/src/StackExchange.Redis/HotKeys.ResultProcessor.cs b/src/StackExchange.Redis/HotKeys.ResultProcessor.cs new file mode 100644 index 000000000..71644c010 --- /dev/null +++ b/src/StackExchange.Redis/HotKeys.ResultProcessor.cs @@ -0,0 +1,193 @@ +namespace StackExchange.Redis; + +public sealed partial class HotKeysResult +{ + internal static readonly ResultProcessor Processor = new HotKeysResultProcessor(); + + private sealed class HotKeysResultProcessor : ResultProcessor + { + protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) + { + if (result.IsNull) + { + SetResult(message, null); + return true; + } + + // an array with a single element that *is* an array/map that is the results + if (result is { Resp2TypeArray: ResultType.Array, ItemsCount: 1 }) + { + ref readonly RawResult inner = ref result[0]; + if (inner is { Resp2TypeArray: ResultType.Array, IsNull: false }) + { + var hotKeys = new HotKeysResult(in inner); + SetResult(message, hotKeys); + return true; + } + } + + return false; + } + } + + private HotKeysResult(in RawResult result) + { + var metrics = HotKeysMetrics.None; // we infer this from the keys present + var iter = result.GetItems().GetEnumerator(); + while (iter.MoveNext()) + { + if (!iter.Current.TryParse(HotKeysFieldMetadata.TryParse, out HotKeysField field)) + field = HotKeysField.Unknown; + + if (!iter.MoveNext()) break; // lies about the length! + ref readonly RawResult value = ref iter.Current; + + long i64; + switch (field) + { + case HotKeysField.TrackingActive: + TrackingActive = value.GetBoolean(); + break; + case HotKeysField.SampleRatio when value.TryGetInt64(out i64): + SampleRatio = i64; + break; + case HotKeysField.SelectedSlots when value.Resp2TypeArray is ResultType.Array: + var len = value.ItemsCount; + if (len == 0) + { + _selectedSlots = []; + continue; + } + + var items = value.GetItems().GetEnumerator(); + var slots = len == 1 ? null : new SlotRange[len]; + for (int i = 0; i < len && items.MoveNext(); i++) + { + ref readonly RawResult pair = ref items.Current; + if (pair.Resp2TypeArray is ResultType.Array) + { + long from = -1, to = -1; + switch (pair.ItemsCount) + { + case 1 when pair[0].TryGetInt64(out from): + to = from; // single slot + break; + case 2 when pair[0].TryGetInt64(out from) && pair[1].TryGetInt64(out to): + break; + } + + if (from < SlotRange.MinSlot) + { + // skip invalid ranges + } + else if (len == 1 & from == SlotRange.MinSlot & to == SlotRange.MaxSlot) + { + // this is the "normal" case when no slot filter was applied + slots = SlotRange.SharedAllSlots; // avoid the alloc + } + else + { + slots ??= new SlotRange[len]; + slots[i] = new((int)from, (int)to); + } + } + } + _selectedSlots = slots; + break; + case HotKeysField.AllCommandsAllSlotsUs when value.TryGetInt64(out i64): + AllCommandsAllSlotsMicroseconds = i64; + break; + case HotKeysField.AllCommandsSelectedSlotsUs when value.TryGetInt64(out i64): + AllCommandSelectedSlotsMicroseconds = i64; + break; + case HotKeysField.SampledCommandSelectedSlotsUs when value.TryGetInt64(out i64): + case HotKeysField.SampledCommandsSelectedSlotsUs when value.TryGetInt64(out i64): + SampledCommandsSelectedSlotsMicroseconds = i64; + break; + case HotKeysField.NetBytesAllCommandsAllSlots when value.TryGetInt64(out i64): + AllCommandsAllSlotsNetworkBytes = i64; + break; + case HotKeysField.NetBytesAllCommandsSelectedSlots when value.TryGetInt64(out i64): + NetworkBytesAllCommandsSelectedSlotsRaw = i64; + break; + case HotKeysField.NetBytesSampledCommandsSelectedSlots when value.TryGetInt64(out i64): + NetworkBytesSampledCommandsSelectedSlotsRaw = i64; + break; + case HotKeysField.CollectionStartTimeUnixMs when value.TryGetInt64(out i64): + CollectionStartTimeUnixMilliseconds = i64; + break; + case HotKeysField.CollectionDurationMs when value.TryGetInt64(out i64): + CollectionDurationMicroseconds = i64 * 1000; // ms vs us is in question: support both, and abstract it from the caller + break; + case HotKeysField.CollectionDurationUs when value.TryGetInt64(out i64): + CollectionDurationMicroseconds = i64; + break; + case HotKeysField.TotalCpuTimeSysMs when value.TryGetInt64(out i64): + metrics |= HotKeysMetrics.Cpu; + TotalCpuTimeSystemMicroseconds = i64 * 1000; // ms vs us is in question: support both, and abstract it from the caller + break; + case HotKeysField.TotalCpuTimeSysUs when value.TryGetInt64(out i64): + metrics |= HotKeysMetrics.Cpu; + TotalCpuTimeSystemMicroseconds = i64; + break; + case HotKeysField.TotalCpuTimeUserMs when value.TryGetInt64(out i64): + metrics |= HotKeysMetrics.Cpu; + TotalCpuTimeUserMicroseconds = i64 * 1000; // ms vs us is in question: support both, and abstract it from the caller + break; + case HotKeysField.TotalCpuTimeUserUs when value.TryGetInt64(out i64): + metrics |= HotKeysMetrics.Cpu; + TotalCpuTimeUserMicroseconds = i64; + break; + case HotKeysField.TotalNetBytes when value.TryGetInt64(out i64): + metrics |= HotKeysMetrics.Network; + TotalNetworkBytesRaw = i64; + break; + case HotKeysField.ByCpuTimeUs when value.Resp2TypeArray is ResultType.Array: + metrics |= HotKeysMetrics.Cpu; + len = value.ItemsCount / 2; + if (len == 0) + { + _cpuByKey = []; + continue; + } + + var cpuTime = new MetricKeyCpu[len]; + items = value.GetItems().GetEnumerator(); + for (int i = 0; i < len && items.MoveNext(); i++) + { + var metricKey = items.Current.AsRedisKey(); + if (items.MoveNext() && items.Current.TryGetInt64(out var metricValue)) + { + cpuTime[i] = new(metricKey, metricValue); + } + } + + _cpuByKey = cpuTime; + break; + case HotKeysField.ByNetBytes when value.Resp2TypeArray is ResultType.Array: + metrics |= HotKeysMetrics.Network; + len = value.ItemsCount / 2; + if (len == 0) + { + _networkBytesByKey = []; + continue; + } + + var netBytes = new MetricKeyBytes[len]; + items = value.GetItems().GetEnumerator(); + for (int i = 0; i < len && items.MoveNext(); i++) + { + var metricKey = items.Current.AsRedisKey(); + if (items.MoveNext() && items.Current.TryGetInt64(out var metricValue)) + { + netBytes[i] = new(metricKey, metricValue); + } + } + + _networkBytesByKey = netBytes; + break; + } // switch + } // while + Metrics = metrics; + } +} diff --git a/src/StackExchange.Redis/HotKeys.Server.cs b/src/StackExchange.Redis/HotKeys.Server.cs new file mode 100644 index 000000000..967a454e8 --- /dev/null +++ b/src/StackExchange.Redis/HotKeys.Server.cs @@ -0,0 +1,47 @@ +using System; +using System.Threading.Tasks; + +namespace StackExchange.Redis; + +internal partial class RedisServer +{ + public void HotKeysStart( + HotKeysMetrics metrics = (HotKeysMetrics)~0, + long count = 0, + TimeSpan duration = default, + long sampleRatio = 1, + int[]? slots = null, + CommandFlags flags = CommandFlags.None) + => ExecuteSync( + new HotKeysStartMessage(flags, metrics, count, duration, sampleRatio, slots), + ResultProcessor.DemandOK); + + public Task HotKeysStartAsync( + HotKeysMetrics metrics = (HotKeysMetrics)~0, + long count = 0, + TimeSpan duration = default, + long sampleRatio = 1, + int[]? slots = null, + CommandFlags flags = CommandFlags.None) + => ExecuteAsync( + new HotKeysStartMessage(flags, metrics, count, duration, sampleRatio, slots), + ResultProcessor.DemandOK); + + public bool HotKeysStop(CommandFlags flags = CommandFlags.None) + => ExecuteSync(Message.Create(-1, flags, RedisCommand.HOTKEYS, RedisLiterals.STOP), ResultProcessor.Boolean, server); + + public Task HotKeysStopAsync(CommandFlags flags = CommandFlags.None) + => ExecuteAsync(Message.Create(-1, flags, RedisCommand.HOTKEYS, RedisLiterals.STOP), ResultProcessor.Boolean, server); + + public void HotKeysReset(CommandFlags flags = CommandFlags.None) + => ExecuteSync(Message.Create(-1, flags, RedisCommand.HOTKEYS, RedisLiterals.RESET), ResultProcessor.DemandOK, server); + + public Task HotKeysResetAsync(CommandFlags flags = CommandFlags.None) + => ExecuteAsync(Message.Create(-1, flags, RedisCommand.HOTKEYS, RedisLiterals.RESET), ResultProcessor.DemandOK, server); + + public HotKeysResult? HotKeysGet(CommandFlags flags = CommandFlags.None) + => ExecuteSync(Message.Create(-1, flags, RedisCommand.HOTKEYS, RedisLiterals.GET), HotKeysResult.Processor, server); + + public Task HotKeysGetAsync(CommandFlags flags = CommandFlags.None) + => ExecuteAsync(Message.Create(-1, flags, RedisCommand.HOTKEYS, RedisLiterals.GET), HotKeysResult.Processor, server); +} diff --git a/src/StackExchange.Redis/HotKeys.StartMessage.cs b/src/StackExchange.Redis/HotKeys.StartMessage.cs new file mode 100644 index 000000000..c9f0bc371 --- /dev/null +++ b/src/StackExchange.Redis/HotKeys.StartMessage.cs @@ -0,0 +1,80 @@ +using System; +using System.Threading.Tasks; + +namespace StackExchange.Redis; + +internal partial class RedisServer +{ + internal sealed class HotKeysStartMessage( + CommandFlags flags, + HotKeysMetrics metrics, + long count, + TimeSpan duration, + long sampleRatio, + int[]? slots) : Message(-1, flags, RedisCommand.HOTKEYS) + { + protected override void WriteImpl(PhysicalConnection physical) + { + /* + HOTKEYS START + + [COUNT k] + [DURATION duration] + [SAMPLE ratio] + [SLOTS count slot…] + */ + physical.WriteHeader(Command, ArgCount); + physical.WriteBulkString("START"u8); + physical.WriteBulkString("METRICS"u8); + var metricCount = 0; + if ((metrics & HotKeysMetrics.Cpu) != 0) metricCount++; + if ((metrics & HotKeysMetrics.Network) != 0) metricCount++; + physical.WriteBulkString(metricCount); + if ((metrics & HotKeysMetrics.Cpu) != 0) physical.WriteBulkString("CPU"u8); + if ((metrics & HotKeysMetrics.Network) != 0) physical.WriteBulkString("NET"u8); + + if (count != 0) + { + physical.WriteBulkString("COUNT"u8); + physical.WriteBulkString(count); + } + + if (duration != TimeSpan.Zero) + { + physical.WriteBulkString("DURATION"u8); + physical.WriteBulkString(Math.Ceiling(duration.TotalSeconds)); + } + + if (sampleRatio != 1) + { + physical.WriteBulkString("SAMPLE"u8); + physical.WriteBulkString(sampleRatio); + } + + if (slots is { Length: > 0 }) + { + physical.WriteBulkString("SLOTS"u8); + physical.WriteBulkString(slots.Length); + foreach (var slot in slots) + { + physical.WriteBulkString(slot); + } + } + } + + public override int ArgCount + { + get + { + int argCount = 3; + if ((metrics & HotKeysMetrics.Cpu) != 0) argCount++; + if ((metrics & HotKeysMetrics.Network) != 0) argCount++; + if (count != 0) argCount += 2; + if (duration != TimeSpan.Zero) argCount += 2; + if (sampleRatio != 1) argCount += 2; + if (slots is { Length: > 0 }) argCount += 2 + slots.Length; + return argCount; + } + } + } +} diff --git a/src/StackExchange.Redis/HotKeys.cs b/src/StackExchange.Redis/HotKeys.cs new file mode 100644 index 000000000..c3d71fe17 --- /dev/null +++ b/src/StackExchange.Redis/HotKeys.cs @@ -0,0 +1,337 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using System.Threading.Tasks; +using RESPite; + +namespace StackExchange.Redis; + +public partial interface IServer +{ + /// + /// Start a new HOTKEYS profiling session. + /// + /// The metrics to record during this capture (defaults to "all"). + /// The number of keys to retain and report when is invoked. If zero, the server default is used (currently 10). + /// The duration of this profiling session. + /// Profiling frequency; effectively: measure every Nth command. + /// The key-slots to record during this capture (defaults to "all"). + /// The command flags to use. + [Experimental(Experiments.Server_8_6, UrlFormat = Experiments.UrlFormat)] + void HotKeysStart( + HotKeysMetrics metrics = (HotKeysMetrics)~0, // everything by default + long count = 0, + TimeSpan duration = default, + long sampleRatio = 1, + int[]? slots = null, + CommandFlags flags = CommandFlags.None); + + /// + /// Start a new HOTKEYS profiling session. + /// + /// The metrics to record during this capture (defaults to "all"). + /// The number of keys to retain and report when is invoked. If zero, the server default is used (currently 10). + /// The duration of this profiling session. + /// Profiling frequency; effectively: measure every Nth command. + /// The key-slots to record during this capture (defaults to "all" / "all on this node"). + /// The command flags to use. + [Experimental(Experiments.Server_8_6, UrlFormat = Experiments.UrlFormat)] + Task HotKeysStartAsync( + HotKeysMetrics metrics = (HotKeysMetrics)~0, // everything by default + long count = 0, + TimeSpan duration = default, + long sampleRatio = 1, + int[]? slots = null, + CommandFlags flags = CommandFlags.None); + + /// + /// Stop the current HOTKEYS capture, if any. + /// + /// The command flags to use. + [Experimental(Experiments.Server_8_6, UrlFormat = Experiments.UrlFormat)] + bool HotKeysStop(CommandFlags flags = CommandFlags.None); + + /// + /// Stop the current HOTKEYS capture, if any. + /// + /// The command flags to use. + [Experimental(Experiments.Server_8_6, UrlFormat = Experiments.UrlFormat)] + Task HotKeysStopAsync(CommandFlags flags = CommandFlags.None); + + /// + /// Discard the last HOTKEYS capture data, if any. + /// + /// The command flags to use. + [Experimental(Experiments.Server_8_6, UrlFormat = Experiments.UrlFormat)] + void HotKeysReset(CommandFlags flags = CommandFlags.None); + + /// + /// Discard the last HOTKEYS capture data, if any. + /// + /// The command flags to use. + [Experimental(Experiments.Server_8_6, UrlFormat = Experiments.UrlFormat)] + Task HotKeysResetAsync(CommandFlags flags = CommandFlags.None); + + /// + /// Fetch the most recent HOTKEYS profiling data. + /// + /// The command flags to use. + /// The data captured during HOTKEYS profiling. + [Experimental(Experiments.Server_8_6, UrlFormat = Experiments.UrlFormat)] + HotKeysResult? HotKeysGet(CommandFlags flags = CommandFlags.None); + + /// + /// Fetch the most recent HOTKEYS profiling data. + /// + /// The command flags to use. + /// The data captured during HOTKEYS profiling. + [Experimental(Experiments.Server_8_6, UrlFormat = Experiments.UrlFormat)] + Task HotKeysGetAsync(CommandFlags flags = CommandFlags.None); +} + +/// +/// Metrics to record during HOTKEYS profiling. +/// +[Flags] +[Experimental(Experiments.Server_8_6, UrlFormat = Experiments.UrlFormat)] +public enum HotKeysMetrics +{ + /// + /// No metrics. + /// + None = 0, + + /// + /// Capture CPU time. + /// + Cpu = 1 << 0, + + /// + /// Capture network bytes. + /// + Network = 1 << 1, +} + +/// +/// Captured data from HOTKEYS profiling. +/// +[Experimental(Experiments.Server_8_6, UrlFormat = Experiments.UrlFormat)] +public sealed partial class HotKeysResult +{ + // Note: names are intentionally chosen to align reasonably well with the Redis command output; some + // liberties have been taken, for example "all-commands-all-slots-us" and "net-bytes-all-commands-all-slots" + // have been named "AllCommandsAllSlotsTime" and "AllCommandsAllSlotsNetworkBytes" for consistency + // with each-other. + + /// + /// The metrics captured during this profiling session. + /// + public HotKeysMetrics Metrics { get; } + + /// + /// Indicates whether the capture currently active. + /// + public bool TrackingActive { get; } + + /// + /// Profiling frequency; effectively: measure every Nth command. + /// + public long SampleRatio { get; } + + /// + /// Gets whether sampling is in use. + /// + public bool IsSampled => SampleRatio > 1; + + /// + /// The key slots active for this profiling session. + /// + public ReadOnlySpan SelectedSlots => _selectedSlots; + + private readonly SlotRange[]? _selectedSlots; + + /// + /// Gets whether slot filtering is in use. + /// + public bool IsSlotFiltered => + NetworkBytesAllCommandsSelectedSlotsRaw >= 0; // this key only present if slot-filtering active + + /// + /// The total CPU measured for all commands in all slots, without any sampling or filtering applied. + /// + public TimeSpan AllCommandsAllSlotsTime => NonNegativeMicroseconds(AllCommandsAllSlotsMicroseconds); + + internal long AllCommandsAllSlotsMicroseconds { get; } = -1; + + internal long AllCommandSelectedSlotsMicroseconds { get; } = -1; + internal long SampledCommandsSelectedSlotsMicroseconds { get; } = -1; + + /// + /// When slot filtering is used, this is the total CPU time measured for all commands in the selected slots. + /// + public TimeSpan? AllCommandsSelectedSlotsTime => AllCommandSelectedSlotsMicroseconds < 0 + ? null + : NonNegativeMicroseconds(AllCommandSelectedSlotsMicroseconds); + + /// + /// When sampling and slot filtering are used, this is the total CPU time measured for the sampled commands in the selected slots. + /// + public TimeSpan? SampledCommandsSelectedSlotsTime => SampledCommandsSelectedSlotsMicroseconds < 0 + ? null + : NonNegativeMicroseconds(SampledCommandsSelectedSlotsMicroseconds); + + internal static TimeSpan NonNegativeMicroseconds(long us) + { + const long TICKS_PER_MICROSECOND = TimeSpan.TicksPerMillisecond / 1000; // 10, but: clearer + return TimeSpan.FromTicks(Math.Max(us, 0) * TICKS_PER_MICROSECOND); + } + + /// + /// The total network usage measured for all commands in all slots, without any sampling or filtering applied. + /// + public long AllCommandsAllSlotsNetworkBytes { get; } + + internal long NetworkBytesAllCommandsSelectedSlotsRaw { get; } = -1; + internal long NetworkBytesSampledCommandsSelectedSlotsRaw { get; } = -1; + + /// + /// When slot filtering is used, this is the total network usage measured for all commands in the selected slots. + /// + public long? AllCommandsSelectedSlotsNetworkBytes => NetworkBytesAllCommandsSelectedSlotsRaw < 0 + ? null + : NetworkBytesAllCommandsSelectedSlotsRaw; + + /// + /// When sampling and slot filtering are used, this is the total network usage measured for the sampled commands in the selected slots. + /// + public long? SampledCommandsSelectedSlotsNetworkBytes => NetworkBytesSampledCommandsSelectedSlotsRaw < 0 + ? null + : NetworkBytesSampledCommandsSelectedSlotsRaw; + + internal long CollectionStartTimeUnixMilliseconds { get; } = -1; + + /// + /// The start time of the capture. + /// + public DateTime CollectionStartTime => + RedisBase.UnixEpoch.AddMilliseconds(Math.Max(CollectionStartTimeUnixMilliseconds, 0)); + + internal long CollectionDurationMicroseconds { get; } + + /// + /// The duration of the capture. + /// + public TimeSpan CollectionDuration => NonNegativeMicroseconds(CollectionDurationMicroseconds); + + internal long TotalCpuTimeUserMicroseconds { get; } = -1; + + /// + /// The total user CPU time measured in the profiling session. + /// + public TimeSpan? TotalCpuTimeUser => TotalCpuTimeUserMicroseconds < 0 + ? null + : NonNegativeMicroseconds(TotalCpuTimeUserMicroseconds); + + internal long TotalCpuTimeSystemMicroseconds { get; } = -1; + + /// + /// The total system CPU measured in the profiling session. + /// + public TimeSpan? TotalCpuTimeSystem => TotalCpuTimeSystemMicroseconds < 0 + ? null + : NonNegativeMicroseconds(TotalCpuTimeSystemMicroseconds); + + /// + /// The total CPU time measured in the profiling session (this is just + ). + /// + public TimeSpan? TotalCpuTime => TotalCpuTimeUser + TotalCpuTimeSystem; + + internal long TotalNetworkBytesRaw { get; } = -1; + + /// + /// The total network data measured in the profiling session. + /// + public long? TotalNetworkBytes => TotalNetworkBytesRaw < 0 + ? null + : TotalNetworkBytesRaw; + + // Intentionally do construct a dictionary from the results; the caller is unlikely to be looking + // for a particular key (lookup), but rather: is likely to want to list them for display; this way, + // we'll preserve the server's display order. + + /// + /// Hot keys, as measured by CPU activity. + /// + public ReadOnlySpan CpuByKey => _cpuByKey; + + private readonly MetricKeyCpu[]? _cpuByKey; + + /// + /// Hot keys, as measured by network activity. + /// + public ReadOnlySpan NetworkBytesByKey => _networkBytesByKey; + + private readonly MetricKeyBytes[]? _networkBytesByKey; + + /// + /// A hot key, as measured by CPU activity. + /// + /// The key observed. + /// The time taken, in microseconds. + public readonly struct MetricKeyCpu(in RedisKey key, long durationMicroseconds) + { + private readonly RedisKey _key = key; + + /// + /// The key observed. + /// + public RedisKey Key => _key; + + internal long DurationMicroseconds => durationMicroseconds; + + /// + /// The time taken. + /// + public TimeSpan Duration => NonNegativeMicroseconds(durationMicroseconds); + + /// + public override string ToString() => $"{_key}: {Duration}"; + + /// + public override int GetHashCode() => _key.GetHashCode() ^ durationMicroseconds.GetHashCode(); + + /// + public override bool Equals(object? obj) + => obj is MetricKeyCpu other && _key.Equals(other.Key) && + durationMicroseconds == other.DurationMicroseconds; + } + + /// + /// A hot key, as measured by network activity. + /// + /// The key observed. + /// The network activity, in bytes. + public readonly struct MetricKeyBytes(in RedisKey key, long bytes) + { + private readonly RedisKey _key = key; + + /// + /// The key observed. + /// + public RedisKey Key => _key; + + /// + /// The network activity, in bytes. + /// + public long Bytes => bytes; + + /// + public override string ToString() => $"{_key}: {bytes}B"; + + /// + public override int GetHashCode() => _key.GetHashCode() ^ bytes.GetHashCode(); + + /// + public override bool Equals(object? obj) + => obj is MetricKeyBytes other && _key.Equals(other.Key) && Bytes == other.Bytes; + } +} diff --git a/src/StackExchange.Redis/HotKeysField.cs b/src/StackExchange.Redis/HotKeysField.cs new file mode 100644 index 000000000..0c514c6fd --- /dev/null +++ b/src/StackExchange.Redis/HotKeysField.cs @@ -0,0 +1,145 @@ +using System; +using RESPite; + +namespace StackExchange.Redis; + +/// +/// Fields that can appear in a HOTKEYS response. +/// +internal enum HotKeysField +{ + /// + /// Unknown or unrecognized field. + /// + [AsciiHash("")] + Unknown = 0, + + /// + /// Whether tracking is active. + /// + [AsciiHash("tracking-active")] + TrackingActive, + + /// + /// Sample ratio. + /// + [AsciiHash("sample-ratio")] + SampleRatio, + + /// + /// Selected slots. + /// + [AsciiHash("selected-slots")] + SelectedSlots, + + /// + /// All commands all slots microseconds. + /// + [AsciiHash("all-commands-all-slots-us")] + AllCommandsAllSlotsUs, + + /// + /// All commands selected slots microseconds. + /// + [AsciiHash("all-commands-selected-slots-us")] + AllCommandsSelectedSlotsUs, + + /// + /// Sampled command selected slots microseconds (singular). + /// + [AsciiHash("sampled-command-selected-slots-us")] + SampledCommandSelectedSlotsUs, + + /// + /// Sampled commands selected slots microseconds (plural). + /// + [AsciiHash("sampled-commands-selected-slots-us")] + SampledCommandsSelectedSlotsUs, + + /// + /// Network bytes all commands all slots. + /// + [AsciiHash("net-bytes-all-commands-all-slots")] + NetBytesAllCommandsAllSlots, + + /// + /// Network bytes all commands selected slots. + /// + [AsciiHash("net-bytes-all-commands-selected-slots")] + NetBytesAllCommandsSelectedSlots, + + /// + /// Network bytes sampled commands selected slots. + /// + [AsciiHash("net-bytes-sampled-commands-selected-slots")] + NetBytesSampledCommandsSelectedSlots, + + /// + /// Collection start time in Unix milliseconds. + /// + [AsciiHash("collection-start-time-unix-ms")] + CollectionStartTimeUnixMs, + + /// + /// Collection duration in milliseconds. + /// + [AsciiHash("collection-duration-ms")] + CollectionDurationMs, + + /// + /// Collection duration in microseconds. + /// + [AsciiHash("collection-duration-us")] + CollectionDurationUs, + + /// + /// Total CPU time user in milliseconds. + /// + [AsciiHash("total-cpu-time-user-ms")] + TotalCpuTimeUserMs, + + /// + /// Total CPU time user in microseconds. + /// + [AsciiHash("total-cpu-time-user-us")] + TotalCpuTimeUserUs, + + /// + /// Total CPU time system in milliseconds. + /// + [AsciiHash("total-cpu-time-sys-ms")] + TotalCpuTimeSysMs, + + /// + /// Total CPU time system in microseconds. + /// + [AsciiHash("total-cpu-time-sys-us")] + TotalCpuTimeSysUs, + + /// + /// Total network bytes. + /// + [AsciiHash("total-net-bytes")] + TotalNetBytes, + + /// + /// By CPU time in microseconds. + /// + [AsciiHash("by-cpu-time-us")] + ByCpuTimeUs, + + /// + /// By network bytes. + /// + [AsciiHash("by-net-bytes")] + ByNetBytes, +} + +/// +/// Metadata and parsing methods for HotKeysField. +/// +internal static partial class HotKeysFieldMetadata +{ + [AsciiHash] + internal static partial bool TryParse(ReadOnlySpan value, out HotKeysField field); +} diff --git a/src/StackExchange.Redis/Interfaces/IBatch.cs b/src/StackExchange.Redis/Interfaces/IBatch.cs index 34f125332..a0a71becb 100644 --- a/src/StackExchange.Redis/Interfaces/IBatch.cs +++ b/src/StackExchange.Redis/Interfaces/IBatch.cs @@ -1,8 +1,8 @@ namespace StackExchange.Redis { /// - /// Represents a block of operations that will be sent to the server together; - /// this can be useful to reduce packet fragmentation on slow connections - it + /// Represents a block of operations that will be sent to the server together. + /// This can be useful to reduce packet fragmentation on slow connections - it /// can improve the time to get *all* the operations processed, with the trade-off /// of a slower time to get the *first* operation processed; this is usually /// a good thing. Unless this batch is a transaction, there is no guarantee @@ -12,9 +12,8 @@ public interface IBatch : IDatabaseAsync { /// /// Execute the batch operation, sending all queued commands to the server. - /// Note that this operation is neither synchronous nor truly asynchronous - it - /// simply enqueues the buffered messages. To check on completion, you should - /// check the individual responses. + /// Note that this operation is neither synchronous nor truly asynchronous - it simply enqueues the buffered messages. + /// To check on completion, you should check the individual responses. /// void Execute(); } diff --git a/src/StackExchange.Redis/Interfaces/IConnectionMultiplexer.cs b/src/StackExchange.Redis/Interfaces/IConnectionMultiplexer.cs index f4770e4f8..96b4ce8f6 100644 --- a/src/StackExchange.Redis/Interfaces/IConnectionMultiplexer.cs +++ b/src/StackExchange.Redis/Interfaces/IConnectionMultiplexer.cs @@ -1,269 +1,318 @@ using System; +using System.Collections.Concurrent; +using System.ComponentModel; using System.IO; using System.Net; using System.Threading.Tasks; +using StackExchange.Redis.Maintenance; using StackExchange.Redis.Profiling; +using static StackExchange.Redis.ConnectionMultiplexer; -namespace StackExchange.Redis +namespace StackExchange.Redis; + +internal interface IInternalConnectionMultiplexer : IConnectionMultiplexer { - internal interface IInternalConnectionMultiplexer : IConnectionMultiplexer - { - bool AllowConnect { get; set; } - - bool IgnoreConnect { get; set; } - - ReadOnlySpan GetServerSnapshot(); - } - - /// - /// Represents the abstract multiplexer API - /// - public interface IConnectionMultiplexer : IDisposable - { - /// - /// Gets the client-name that will be used on all new connections - /// - string ClientName { get; } - - /// - /// Gets the configuration of the connection - /// - string Configuration { get; } - - /// - /// Gets the timeout associated with the connections - /// - int TimeoutMilliseconds { get; } - - /// - /// The number of operations that have been performed on all connections - /// - long OperationCount { get; } - - /// - /// Gets or sets whether asynchronous operations should be invoked in a way that guarantees their original delivery order - /// - [Obsolete("Not supported; if you require ordered pub/sub, please see " + nameof(ChannelMessageQueue), false)] - bool PreserveAsyncOrder { get; set; } - - /// - /// Indicates whether any servers are connected - /// - bool IsConnected { get; } - - /// - /// Indicates whether any servers are connected - /// - bool IsConnecting { get; } - - /// - /// Should exceptions include identifiable details? (key names, additional .Data annotations) - /// - bool IncludeDetailInExceptions { get; set; } - - /// - /// Limit at which to start recording unusual busy patterns (only one log will be retained at a time; - /// set to a negative value to disable this feature) - /// - int StormLogThreshold { get; set; } - - /// - /// Register a callback to provide an on-demand ambient session provider based on the - /// calling context; the implementing code is responsible for reliably resolving the same provider - /// based on ambient context, or returning null to not profile - /// - /// The profiling session provider. - void RegisterProfiler(Func profilingSessionProvider); - - /// - /// Get summary statistics associates with this server - /// - ServerCounters GetCounters(); - - /// - /// A server replied with an error message; - /// - event EventHandler ErrorMessage; - - /// - /// Raised whenever a physical connection fails - /// - event EventHandler ConnectionFailed; - - /// - /// Raised whenever an internal error occurs (this is primarily for debugging) - /// - event EventHandler InternalError; - - /// - /// Raised whenever a physical connection is established - /// - event EventHandler ConnectionRestored; - - /// - /// Raised when configuration changes are detected - /// - event EventHandler ConfigurationChanged; - - /// - /// Raised when nodes are explicitly requested to reconfigure via broadcast; - /// this usually means master/replica changes - /// - event EventHandler ConfigurationChangedBroadcast; - - /// - /// Gets all endpoints defined on the server - /// - /// Whether to return only the explicitly configured endpoints. - EndPoint[] GetEndPoints(bool configuredOnly = false); - - /// - /// Wait for a given asynchronous operation to complete (or timeout) - /// - /// The task to wait on. - void Wait(Task task); - - /// - /// Wait for a given asynchronous operation to complete (or timeout) - /// - /// The type in . - /// The task to wait on. - T Wait(Task task); - - /// - /// Wait for the given asynchronous operations to complete (or timeout) - /// - /// The tasks to wait on. - void WaitAll(params Task[] tasks); - - /// - /// Raised when a hash-slot has been relocated - /// - event EventHandler HashSlotMoved; - - /// - /// Compute the hash-slot of a specified key - /// - /// The key to get a slot ID for. - int HashSlot(RedisKey key); - - /// - /// Obtain a pub/sub subscriber connection to the specified server - /// - /// The async state to pass to the created . - ISubscriber GetSubscriber(object asyncState = null); - - /// - /// Obtain an interactive connection to a database inside redis - /// - /// The database ID to get. - /// The async state to pass to the created . - IDatabase GetDatabase(int db = -1, object asyncState = null); - - /// - /// Obtain a configuration API for an individual server - /// - /// The host to get a server for. - /// The specific port for to get a server for. - /// The async state to pass to the created . - IServer GetServer(string host, int port, object asyncState = null); - - /// - /// Obtain a configuration API for an individual server - /// - /// The "host:port" string to get a server for. - /// The async state to pass to the created . - IServer GetServer(string hostAndPort, object asyncState = null); - - /// - /// Obtain a configuration API for an individual server - /// - /// The host to get a server for. - /// The specific port for to get a server for. - IServer GetServer(IPAddress host, int port); - - /// - /// Obtain a configuration API for an individual server - /// - /// The endpoint to get a server for. - /// The async state to pass to the created . - IServer GetServer(EndPoint endpoint, object asyncState = null); - - /// - /// Reconfigure the current connections based on the existing configuration - /// - /// The log to write output to. - Task ConfigureAsync(TextWriter log = null); - - /// - /// Reconfigure the current connections based on the existing configuration - /// - /// The log to write output to. - bool Configure(TextWriter log = null); - - /// - /// Provides a text overview of the status of all connections - /// - string GetStatus(); - - /// - /// Provides a text overview of the status of all connections - /// - /// The log to write output to. - void GetStatus(TextWriter log); - - /// - /// See Object.ToString() - /// - string ToString(); - - /// - /// Close all connections and release all resources associated with this object - /// - /// Whether to allow in-queue commands to complete first. - void Close(bool allowCommandsToComplete = true); - - /// - /// Close all connections and release all resources associated with this object - /// - /// Whether to allow in-queue commands to complete first. - Task CloseAsync(bool allowCommandsToComplete = true); - - /// - /// Obtains the log of unusual busy patterns - /// - string GetStormLog(); - - /// - /// Resets the log of unusual busy patterns - /// - void ResetStormLog(); - - /// - /// Request all compatible clients to reconfigure or reconnect - /// - /// The command flags to use. - /// The number of instances known to have received the message (however, the actual number can be higher; returns -1 if the operation is pending) - long PublishReconfigure(CommandFlags flags = CommandFlags.None); - - /// - /// Request all compatible clients to reconfigure or reconnect - /// - /// The command flags to use. - /// The number of instances known to have received the message (however, the actual number can be higher) - Task PublishReconfigureAsync(CommandFlags flags = CommandFlags.None); - - /// - /// Get the hash-slot associated with a given key, if applicable; this can be useful for grouping operations - /// - /// The key to get a the slot for. - int GetHashSlot(RedisKey key); - - /// - /// Write the configuration of all servers to an output stream - /// - /// The destination stream to write the export to. - /// The options to use for this export. - void ExportConfiguration(Stream destination, ExportOptions options = ExportOptions.All); - } + bool AllowConnect { get; set; } + + bool IgnoreConnect { get; set; } + + ReadOnlySpan GetServerSnapshot(); + ServerEndPoint GetServerEndPoint(EndPoint endpoint); + + ConfigurationOptions RawConfig { get; } + + long? GetConnectionId(EndPoint endPoint, ConnectionType type); + + ServerSelectionStrategy ServerSelectionStrategy { get; } + + int GetSubscriptionsCount(); + ConcurrentDictionary GetSubscriptions(); + + ConnectionMultiplexer UnderlyingMultiplexer { get; } +} + +/// +/// Represents the abstract multiplexer API. +/// +public interface IConnectionMultiplexer : IDisposable, IAsyncDisposable +{ + /// + /// Gets the client-name that will be used on all new connections. + /// + string ClientName { get; } + + /// + /// Gets the configuration of the connection. + /// + string Configuration { get; } + + /// + /// Gets the timeout associated with the connections. + /// + int TimeoutMilliseconds { get; } + + /// + /// The number of operations that have been performed on all connections. + /// + long OperationCount { get; } + + /// + /// Gets or sets whether asynchronous operations should be invoked in a way that guarantees their original delivery order. + /// + [Obsolete("Not supported; if you require ordered pub/sub, please see " + nameof(ChannelMessageQueue), false)] + [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] + bool PreserveAsyncOrder { get; set; } + + /// + /// Indicates whether any servers are connected. + /// + bool IsConnected { get; } + + /// + /// Indicates whether any servers are connecting. + /// + bool IsConnecting { get; } + + /// + /// Should exceptions include identifiable details? (key names, additional annotations). + /// + [Obsolete($"Please use {nameof(ConfigurationOptions)}.{nameof(ConfigurationOptions.IncludeDetailInExceptions)} instead - this will be removed in 3.0.")] + [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] + bool IncludeDetailInExceptions { get; set; } + + /// + /// Limit at which to start recording unusual busy patterns (only one log will be retained at a time. + /// Set to a negative value to disable this feature). + /// + int StormLogThreshold { get; set; } + + /// + /// Register a callback to provide an on-demand ambient session provider based on the calling context. + /// The implementing code is responsible for reliably resolving the same provider + /// based on ambient context, or returning null to not profile. + /// + /// The profiling session provider. + void RegisterProfiler(Func profilingSessionProvider); + + /// + /// Get summary statistics associates with this server. + /// + ServerCounters GetCounters(); + + /// + /// A server replied with an error message. + /// + event EventHandler ErrorMessage; + + /// + /// Raised whenever a physical connection fails. + /// + event EventHandler ConnectionFailed; + + /// + /// Raised whenever an internal error occurs (this is primarily for debugging). + /// + event EventHandler InternalError; + + /// + /// Raised whenever a physical connection is established. + /// + event EventHandler ConnectionRestored; + + /// + /// Raised when configuration changes are detected. + /// + event EventHandler ConfigurationChanged; + + /// + /// Raised when nodes are explicitly requested to reconfigure via broadcast. + /// This usually means primary/replica changes. + /// + event EventHandler ConfigurationChangedBroadcast; + + /// + /// Raised when server indicates a maintenance event is going to happen. + /// + event EventHandler ServerMaintenanceEvent; + + /// + /// Gets all endpoints defined on the multiplexer. + /// + /// Whether to return only the explicitly configured endpoints. + EndPoint[] GetEndPoints(bool configuredOnly = false); + + /// + /// Wait for a given asynchronous operation to complete (or timeout). + /// + /// The task to wait on. + void Wait(Task task); + + /// + /// Wait for a given asynchronous operation to complete (or timeout). + /// + /// The type in . + /// The task to wait on. + T Wait(Task task); + + /// + /// Wait for the given asynchronous operations to complete (or timeout). + /// + /// The tasks to wait on. + void WaitAll(params Task[] tasks); + + /// + /// Raised when a hash-slot has been relocated. + /// + event EventHandler HashSlotMoved; + + /// + /// Compute the hash-slot of a specified key. + /// + /// The key to get a slot ID for. + int HashSlot(RedisKey key); + + /// + /// Obtain a pub/sub subscriber connection to the specified server. + /// + /// The async state to pass to the created . + ISubscriber GetSubscriber(object? asyncState = null); + + /// + /// Obtain an interactive connection to a database inside redis. + /// + /// The database ID to get. + /// The async state to pass to the created . + IDatabase GetDatabase(int db = -1, object? asyncState = null); + + /// + /// Obtain a configuration API for an individual server. + /// + /// The host to get a server for. + /// The specific port for to get a server for. + /// The async state to pass to the created . + IServer GetServer(string host, int port, object? asyncState = null); + + /// + /// Obtain a configuration API for an individual server. + /// + /// The "host:port" string to get a server for. + /// The async state to pass to the created . + IServer GetServer(string hostAndPort, object? asyncState = null); + + /// + /// Obtain a configuration API for an individual server. + /// + /// The host to get a server for. + /// The specific port for to get a server for. + IServer GetServer(IPAddress host, int port); + + /// + /// Obtain a configuration API for an individual server. + /// + /// The endpoint to get a server for. + /// The async state to pass to the created . + IServer GetServer(EndPoint endpoint, object? asyncState = null); + + /// + /// Gets a server that would be used for a given key and flags. + /// + /// The endpoint to get a server for. In a non-cluster environment, this parameter is ignored. A key may be specified + /// on cluster, which will return a connection to an arbitrary server matching the specified flags. + /// The async state to pass to the created . + /// The command flags to use. + /// This method is particularly useful when communicating with a cluster environment, to obtain a connection to the server that owns the specified key + /// and ad-hoc commands with unusual routing requirements. Note that provides a connection that automatically routes commands by + /// looking for parameters, so this method is only necessary when used with commands that do not take a parameter, + /// but require consistent routing using key-like semantics. + IServer GetServer(RedisKey key, object? asyncState = null, CommandFlags flags = CommandFlags.None); + + /// + /// Obtain configuration APIs for all servers in this multiplexer. + /// + IServer[] GetServers(); + + /// + /// Reconfigure the current connections based on the existing configuration. + /// + /// The log to write output to. + Task ConfigureAsync(TextWriter? log = null); + + /// + /// Reconfigure the current connections based on the existing configuration. + /// + /// The log to write output to. + bool Configure(TextWriter? log = null); + + /// + /// Provides a text overview of the status of all connections. + /// + string GetStatus(); + + /// + /// Provides a text overview of the status of all connections. + /// + /// The log to write output to. + void GetStatus(TextWriter log); + + /// + /// See . + /// + string ToString(); + + /// + /// Close all connections and release all resources associated with this object. + /// + /// Whether to allow in-queue commands to complete first. + void Close(bool allowCommandsToComplete = true); + + /// + /// Close all connections and release all resources associated with this object. + /// + /// Whether to allow in-queue commands to complete first. + Task CloseAsync(bool allowCommandsToComplete = true); + + /// + /// Obtains the log of unusual busy patterns. + /// + string? GetStormLog(); + + /// + /// Resets the log of unusual busy patterns. + /// + void ResetStormLog(); + + /// + /// Request all compatible clients to reconfigure or reconnect. + /// + /// The command flags to use. + /// The number of instances known to have received the message (however, the actual number can be higher; returns -1 if the operation is pending). + long PublishReconfigure(CommandFlags flags = CommandFlags.None); + + /// + /// Request all compatible clients to reconfigure or reconnect. + /// + /// The command flags to use. + /// The number of instances known to have received the message (however, the actual number can be higher). + Task PublishReconfigureAsync(CommandFlags flags = CommandFlags.None); + + /// + /// Get the hash-slot associated with a given key, if applicable; this can be useful for grouping operations. + /// + /// The key to get a the slot for. + int GetHashSlot(RedisKey key); + + /// + /// Write the configuration of all servers to an output stream. + /// + /// The destination stream to write the export to. + /// The options to use for this export. + void ExportConfiguration(Stream destination, ExportOptions options = ExportOptions.All); + + /// + /// Append a usage-specific modifier to the advertised library name; suffixes are de-duplicated + /// and sorted alphabetically (so adding 'a', 'b' and 'a' will result in suffix '-a-b'). + /// Connections will be updated as necessary (RESP2 subscription + /// connections will not show updates until those connections next connect). + /// + void AddLibraryNameSuffix(string suffix); } diff --git a/src/StackExchange.Redis/Interfaces/IDatabase.VectorSets.cs b/src/StackExchange.Redis/Interfaces/IDatabase.VectorSets.cs new file mode 100644 index 000000000..8e6444ea8 --- /dev/null +++ b/src/StackExchange.Redis/Interfaces/IDatabase.VectorSets.cs @@ -0,0 +1,224 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using RESPite; + +// ReSharper disable once CheckNamespace +namespace StackExchange.Redis; + +/// +/// Describes functionality that is common to both standalone redis servers and redis clusters. +/// +public partial interface IDatabase +{ + // Vector Set operations + + /// + /// Add a vector to a vectorset. + /// + /// The key of the vectorset. + /// The data to add. + /// The flags to use for this operation. + /// if the element was added; if it already existed. + /// + [Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] + bool VectorSetAdd( + RedisKey key, + VectorSetAddRequest request, + CommandFlags flags = CommandFlags.None); + + /// + /// Get the cardinality (number of elements) of a vectorset. + /// + /// The key of the vectorset. + /// The flags to use for this operation. + /// The cardinality of the vectorset. + /// + [Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] + long VectorSetLength(RedisKey key, CommandFlags flags = CommandFlags.None); + + /// + /// Get the dimension of vectors in a vectorset. + /// + /// The key of the vectorset. + /// The flags to use for this operation. + /// The dimension of vectors in the vectorset. + /// + [Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] + int VectorSetDimension(RedisKey key, CommandFlags flags = CommandFlags.None); + + /// + /// Get the vector for a member. + /// + /// The key of the vectorset. + /// The member name. + /// The flags to use for this operation. + /// The vector as a pooled memory lease. + /// + [Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] + Lease? VectorSetGetApproximateVector( + RedisKey key, + RedisValue member, + CommandFlags flags = CommandFlags.None); + + /// + /// Get JSON attributes for a member in a vectorset. + /// + /// The key of the vectorset. + /// The member name. + /// The flags to use for this operation. + /// The attributes as a JSON string. + /// + [Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] + string? VectorSetGetAttributesJson(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None); + + /// + /// Get information about a vectorset. + /// + /// The key of the vectorset. + /// The flags to use for this operation. + /// Information about the vectorset. + /// + [Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] + VectorSetInfo? VectorSetInfo(RedisKey key, CommandFlags flags = CommandFlags.None); + + /// + /// Check if a member exists in a vectorset. + /// + /// The key of the vectorset. + /// The member name. + /// The flags to use for this operation. + /// True if the member exists, false otherwise. + /// + [Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] + bool VectorSetContains(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None); + + /// + /// Get links/connections for a member in a vectorset. + /// + /// The key of the vectorset. + /// The member name. + /// The flags to use for this operation. + /// The linked members. + /// + [Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] + Lease? VectorSetGetLinks(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None); + + /// + /// Get links/connections with scores for a member in a vectorset. + /// + /// The key of the vectorset. + /// The member name. + /// The flags to use for this operation. + /// The linked members with their similarity scores. + /// + [Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] + Lease? VectorSetGetLinksWithScores( + RedisKey key, + RedisValue member, + CommandFlags flags = CommandFlags.None); + + /// + /// Get a random member from a vectorset. + /// + /// The key of the vectorset. + /// The flags to use for this operation. + /// A random member from the vectorset, or null if the vectorset is empty. + /// + [Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] + RedisValue VectorSetRandomMember(RedisKey key, CommandFlags flags = CommandFlags.None); + + /// + /// Get random members from a vectorset. + /// + /// The key of the vectorset. + /// The number of random members to return. + /// The flags to use for this operation. + /// Random members from the vectorset. + /// + [Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] + RedisValue[] VectorSetRandomMembers(RedisKey key, long count, CommandFlags flags = CommandFlags.None); + + /// + /// Remove a member from a vectorset. + /// + /// The key of the vectorset. + /// The member to remove. + /// The flags to use for this operation. + /// if the member was removed; if it was not found. + /// + [Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] + bool VectorSetRemove(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None); + + /// + /// Set JSON attributes for a member in a vectorset. + /// + /// The key of the vectorset. + /// The member name. + /// The attributes to set as a JSON string. + /// The flags to use for this operation. + /// True if successful. + /// + [Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] + bool VectorSetSetAttributesJson( + RedisKey key, + RedisValue member, +#if NET8_0_OR_GREATER + [StringSyntax(StringSyntaxAttribute.Json)] +#endif + string attributesJson, + CommandFlags flags = CommandFlags.None); + + /// + /// Find similar vectors using vector similarity search. + /// + /// The key of the vectorset. + /// The query to execute. + /// The flags to use for this operation. + /// Similar vectors with their similarity scores. + /// + [Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] + Lease? VectorSetSimilaritySearch( + RedisKey key, + VectorSetSimilaritySearchRequest query, + CommandFlags flags = CommandFlags.None); + + /// + /// Get a range of members from a vectorset by lexicographical order. + /// + /// The key of the vectorset. + /// The minimum value to filter by (inclusive by default). + /// The maximum value to filter by (inclusive by default). + /// The maximum number of members to return (-1 for all). + /// Whether to exclude the start and/or end values. + /// The flags to use for this operation. + /// Members in the specified range as a pooled memory lease. + /// + [Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] + Lease VectorSetRange( + RedisKey key, + RedisValue start = default, + RedisValue end = default, + long count = -1, + Exclude exclude = Exclude.None, + CommandFlags flags = CommandFlags.None); + + /// + /// Enumerate members from a vectorset by lexicographical order in batches. + /// + /// The key of the vectorset. + /// The minimum value to filter by (inclusive by default). + /// The maximum value to filter by (inclusive by default). + /// The batch size for each iteration. + /// Whether to exclude the start and/or end values. + /// The flags to use for this operation. + /// An enumerable of members in the specified range. + /// + [Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] + System.Collections.Generic.IEnumerable VectorSetRangeEnumerate( + RedisKey key, + RedisValue start = default, + RedisValue end = default, + long count = 100, + Exclude exclude = Exclude.None, + CommandFlags flags = CommandFlags.None); +} diff --git a/src/StackExchange.Redis/Interfaces/IDatabase.cs b/src/StackExchange.Redis/Interfaces/IDatabase.cs index a4d14ab2f..e26154652 100644 --- a/src/StackExchange.Redis/Interfaces/IDatabase.cs +++ b/src/StackExchange.Redis/Interfaces/IDatabase.cs @@ -1,16 +1,20 @@ using System; using System.Collections.Generic; +using System.ComponentModel; +using System.Diagnostics.CodeAnalysis; using System.Net; +using RESPite; +// ReSharper disable once CheckNamespace namespace StackExchange.Redis { /// - /// Describes functionality that is common to both standalone redis servers and redis clusters + /// Describes functionality that is common to both standalone redis servers and redis clusters. /// - public interface IDatabase : IRedis, IDatabaseAsync + public partial interface IDatabase : IRedis, IDatabaseAsync { /// - /// The numeric identifier of this database + /// The numeric identifier of this database. /// int Database { get; } @@ -20,7 +24,7 @@ public interface IDatabase : IRedis, IDatabaseAsync /// /// The async object state to be passed into the created . /// The created batch. - IBatch CreateBatch(object asyncState = null); + IBatch CreateBatch(object? asyncState = null); /// /// Allows creation of a group of operations that will be sent to the server as a single unit, @@ -28,10 +32,11 @@ public interface IDatabase : IRedis, IDatabaseAsync /// /// The async object state to be passed into the created . /// The created transaction. - ITransaction CreateTransaction(object asyncState = null); + ITransaction CreateTransaction(object? asyncState = null); /// - /// Atomically transfer a key from a source Redis instance to a destination Redis instance. On success the key is deleted from the original instance by default, and is guaranteed to exist in the target instance. + /// Atomically transfer a key from a source Redis instance to a destination Redis instance. + /// On success the key is deleted from the original instance by default, and is guaranteed to exist in the target instance. /// /// The key to migrate. /// The server to migrate the key to. @@ -39,58 +44,66 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The timeout to use for the transfer. /// The options to use for this migration. /// The flags to use for this operation. - /// https://redis.io/commands/MIGRATE + /// void KeyMigrate(RedisKey key, EndPoint toServer, int toDatabase = 0, int timeoutMilliseconds = 0, MigrateOptions migrateOptions = MigrateOptions.None, CommandFlags flags = CommandFlags.None); /// - /// Returns the raw DEBUG OBJECT output for a key; this command is not fully documented and should be avoided unless you have good reason, and then avoided anyway. + /// Returns the raw DEBUG OBJECT output for a key. + /// This command is not fully documented and should be avoided unless you have good reason, and then avoided anyway. /// /// The key to debug. /// The flags to use for this migration. /// The raw output from DEBUG OBJECT. - /// https://redis.io/commands/debug-object + /// RedisValue DebugObject(RedisKey key, CommandFlags flags = CommandFlags.None); /// - /// Add the specified member to the set stored at key. Specified members that are already a member of this set are ignored. If key does not exist, a new set is created before adding the specified members. + /// Add the specified member to the set stored at key. + /// Specified members that are already a member of this set are ignored. + /// If key does not exist, a new set is created before adding the specified members. /// /// The key of the set. /// The longitude of geo entry. /// The latitude of the geo entry. /// The value to set at this entry. /// The flags to use for this operation. - /// True if the specified member was not already present in the set, else False. - /// https://redis.io/commands/geoadd + /// if the specified member was not already present in the set, else . + /// bool GeoAdd(RedisKey key, double longitude, double latitude, RedisValue member, CommandFlags flags = CommandFlags.None); /// - /// Add the specified member to the set stored at key. Specified members that are already a member of this set are ignored. If key does not exist, a new set is created before adding the specified members. + /// Add the specified member to the set stored at key. + /// Specified members that are already a member of this set are ignored. + /// If key does not exist, a new set is created before adding the specified members. /// /// The key of the set. /// The geo value to store. /// The flags to use for this operation. - /// True if the specified member was not already present in the set, else False - /// https://redis.io/commands/geoadd + /// if the specified member was not already present in the set, else . + /// bool GeoAdd(RedisKey key, GeoEntry value, CommandFlags flags = CommandFlags.None); /// - /// Add the specified members to the set stored at key. Specified members that are already a member of this set are ignored. If key does not exist, a new set is created before adding the specified members. + /// Add the specified members to the set stored at key. + /// Specified members that are already a member of this set are ignored. + /// If key does not exist, a new set is created before adding the specified members. /// /// The key of the set. /// The geo values add to the set. /// The flags to use for this operation. /// The number of elements that were added to the set, not including all the elements already present into the set. - /// https://redis.io/commands/geoadd + /// long GeoAdd(RedisKey key, GeoEntry[] values, CommandFlags flags = CommandFlags.None); /// - /// Removes the specified member from the geo sorted set stored at key. Non existing members are ignored. + /// Removes the specified member from the geo sorted set stored at key. + /// Non-existing members are ignored. /// /// The key of the set. /// The geo value to remove. /// The flags to use for this operation. - /// True if the member existed in the sorted set and was removed; False otherwise. - /// https://redis.io/commands/zrem + /// if the member existed in the sorted set and was removed, else . + /// bool GeoRemove(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None); /// @@ -101,8 +114,8 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The second member to check. /// The unit of distance to return (defaults to meters). /// The flags to use for this operation. - /// The command returns the distance as a double (represented as a string) in the specified unit, or NULL if one or both the elements are missing. - /// https://redis.io/commands/geodist + /// The command returns the distance as a double (represented as a string) in the specified unit, or if one or both the elements are missing. + /// double? GeoDistance(RedisKey key, RedisValue member1, RedisValue member2, GeoUnit unit = GeoUnit.Meters, CommandFlags flags = CommandFlags.None); /// @@ -112,8 +125,8 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The members to get. /// The flags to use for this operation. /// The command returns an array where each element is the Geohash corresponding to each member name passed as argument to the command. - /// https://redis.io/commands/geohash - string[] GeoHash(RedisKey key, RedisValue[] members, CommandFlags flags = CommandFlags.None); + /// + string?[] GeoHash(RedisKey key, RedisValue[] members, CommandFlags flags = CommandFlags.None); /// /// Return valid Geohash strings representing the position of one or more elements in a sorted set value representing a geospatial index (where elements were added using GEOADD). @@ -122,8 +135,8 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The member to get. /// The flags to use for this operation. /// The command returns an array where each element is the Geohash corresponding to each member name passed as argument to the command. - /// https://redis.io/commands/geohash - string GeoHash(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None); + /// + string? GeoHash(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None); /// /// Return the positions (longitude,latitude) of all the specified members of the geospatial index represented by the sorted set at key. @@ -131,8 +144,11 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The key of the set. /// The members to get. /// The flags to use for this operation. - /// The command returns an array where each element is a two elements array representing longitude and latitude (x,y) of each member name passed as argument to the command.Non existing elements are reported as NULL elements of the array. - /// https://redis.io/commands/geopos + /// + /// The command returns an array where each element is a two elements array representing longitude and latitude (x,y) of each member name passed as argument to the command. + /// Non-existing elements are reported as NULL elements of the array. + /// + /// GeoPosition?[] GeoPosition(RedisKey key, RedisValue[] members, CommandFlags flags = CommandFlags.None); /// @@ -141,12 +157,16 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The key of the set. /// The member to get. /// The flags to use for this operation. - /// The command returns an array where each element is a two elements array representing longitude and latitude (x,y) of each member name passed as argument to the command.Non existing elements are reported as NULL elements of the array. - /// https://redis.io/commands/geopos + /// + /// The command returns an array where each element is a two elements array representing longitude and latitude (x,y) of each member name passed as argument to the command. + /// Non-existing elements are reported as NULL elements of the array. + /// + /// GeoPosition? GeoPosition(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None); /// - /// Return the members of a sorted set populated with geospatial information using GEOADD, which are within the borders of the area specified with the center location and the maximum distance from the center (the radius). + /// Return the members of a sorted set populated with geospatial information using GEOADD, which are + /// within the borders of the area specified with the center location and the maximum distance from the center (the radius). /// /// The key of the set. /// The member to get a radius of results from. @@ -157,11 +177,12 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The search options to use. /// The flags to use for this operation. /// The results found within the radius, if any. - /// https://redis.io/commands/georadius + /// GeoRadiusResult[] GeoRadius(RedisKey key, RedisValue member, double radius, GeoUnit unit = GeoUnit.Meters, int count = -1, Order? order = null, GeoRadiusOptions options = GeoRadiusOptions.Default, CommandFlags flags = CommandFlags.None); /// - /// Return the members of a sorted set populated with geospatial information using GEOADD, which are within the borders of the area specified with the center location and the maximum distance from the center (the radius). + /// Return the members of a sorted set populated with geospatial information using GEOADD, which are + /// within the borders of the area specified with the center location and the maximum distance from the center (the radius). /// /// The key of the set. /// The longitude of the point to get a radius of results from. @@ -173,51 +194,128 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The search options to use. /// The flags to use for this operation. /// The results found within the radius, if any. - /// https://redis.io/commands/georadius + /// GeoRadiusResult[] GeoRadius(RedisKey key, double longitude, double latitude, double radius, GeoUnit unit = GeoUnit.Meters, int count = -1, Order? order = null, GeoRadiusOptions options = GeoRadiusOptions.Default, CommandFlags flags = CommandFlags.None); /// - /// Decrements the number stored at field in the hash stored at key by decrement. If key does not exist, a new key holding a hash is created. If field does not exist the value is set to 0 before the operation is performed. + /// Return the members of the geo-encoded sorted set stored at bounded by the provided + /// , centered at the provided set . + /// + /// The key of the set. + /// The set member to use as the center of the shape. + /// The shape to use to bound the geo search. + /// The maximum number of results to pull back. + /// Whether to terminate the search after finding results. Must be true of count is -1. + /// The order to sort by (defaults to unordered). + /// The search options to use. + /// The flags for this operation. + /// The results found within the shape, if any. + /// + GeoRadiusResult[] GeoSearch(RedisKey key, RedisValue member, GeoSearchShape shape, int count = -1, bool demandClosest = true, Order? order = null, GeoRadiusOptions options = GeoRadiusOptions.Default, CommandFlags flags = CommandFlags.None); + + /// + /// Return the members of the geo-encoded sorted set stored at bounded by the provided + /// , centered at the point provided by the and . + /// + /// The key of the set. + /// The longitude of the center point. + /// The latitude of the center point. + /// The shape to use to bound the geo search. + /// The maximum number of results to pull back. + /// Whether to terminate the search after finding results. Must be true of count is -1. + /// The order to sort by (defaults to unordered). + /// The search options to use. + /// The flags for this operation. + /// The results found within the shape, if any. + /// + GeoRadiusResult[] GeoSearch(RedisKey key, double longitude, double latitude, GeoSearchShape shape, int count = -1, bool demandClosest = true, Order? order = null, GeoRadiusOptions options = GeoRadiusOptions.Default, CommandFlags flags = CommandFlags.None); + + /// + /// Stores the members of the geo-encoded sorted set stored at bounded by the provided + /// , centered at the provided set . + /// + /// The key of the set. + /// The key to store the result at. + /// The set member to use as the center of the shape. + /// The shape to use to bound the geo search. + /// The maximum number of results to pull back. + /// Whether to terminate the search after finding results. Must be true of count is -1. + /// The order to sort by (defaults to unordered). + /// If set to true, the resulting set will be a regular sorted-set containing only distances, rather than a geo-encoded sorted-set. + /// The flags for this operation. + /// The size of the set stored at . + /// + long GeoSearchAndStore(RedisKey sourceKey, RedisKey destinationKey, RedisValue member, GeoSearchShape shape, int count = -1, bool demandClosest = true, Order? order = null, bool storeDistances = false, CommandFlags flags = CommandFlags.None); + + /// + /// Stores the members of the geo-encoded sorted set stored at bounded by the provided + /// , centered at the point provided by the and . + /// + /// The key of the set. + /// The key to store the result at. + /// The longitude of the center point. + /// The latitude of the center point. + /// The shape to use to bound the geo search. + /// The maximum number of results to pull back. + /// Whether to terminate the search after finding results. Must be true of count is -1. + /// The order to sort by (defaults to unordered). + /// If set to true, the resulting set will be a regular sorted-set containing only distances, rather than a geo-encoded sorted-set. + /// The flags for this operation. + /// The size of the set stored at . + /// + long GeoSearchAndStore(RedisKey sourceKey, RedisKey destinationKey, double longitude, double latitude, GeoSearchShape shape, int count = -1, bool demandClosest = true, Order? order = null, bool storeDistances = false, CommandFlags flags = CommandFlags.None); + + /// + /// Decrements the number stored at field in the hash stored at key by decrement. + /// If key does not exist, a new key holding a hash is created. + /// If field does not exist the value is set to 0 before the operation is performed. /// /// The key of the hash. /// The field in the hash to decrement. /// The amount to decrement by. /// The flags to use for this operation. /// The value at field after the decrement operation. - /// The range of values supported by HINCRBY is limited to 64 bit signed integers. - /// https://redis.io/commands/hincrby + /// + /// The range of values supported by HINCRBY is limited to 64-bit signed integers. + /// + /// long HashDecrement(RedisKey key, RedisValue hashField, long value = 1, CommandFlags flags = CommandFlags.None); /// - /// Decrement the specified field of an hash stored at key, and representing a floating point number, by the specified decrement. If the field does not exist, it is set to 0 before performing the operation. + /// Decrement the specified field of a hash stored at key, and representing a floating point number, by the specified decrement. + /// If the field does not exist, it is set to 0 before performing the operation. /// /// The key of the hash. /// The field in the hash to decrement. /// The amount to decrement by. /// The flags to use for this operation. /// The value at field after the decrement operation. - /// The precision of the output is fixed at 17 digits after the decimal point regardless of the actual internal precision of the computation. - /// https://redis.io/commands/hincrbyfloat + /// + /// The precision of the output is fixed at 17 digits after the decimal point regardless of the actual internal precision of the computation. + /// + /// double HashDecrement(RedisKey key, RedisValue hashField, double value, CommandFlags flags = CommandFlags.None); /// - /// Removes the specified fields from the hash stored at key. Non-existing fields are ignored. Non-existing keys are treated as empty hashes and this command returns 0. + /// Removes the specified fields from the hash stored at key. + /// Non-existing fields are ignored. Non-existing keys are treated as empty hashes and this command returns 0. /// /// The key of the hash. /// The field in the hash to delete. /// The flags to use for this operation. - /// The number of fields that were removed. - /// https://redis.io/commands/hdel + /// if the field was removed. + /// bool HashDelete(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None); /// - /// Removes the specified fields from the hash stored at key. Non-existing fields are ignored. Non-existing keys are treated as empty hashes and this command returns 0. + /// Removes the specified fields from the hash stored at key. + /// Non-existing fields are ignored. Non-existing keys are treated as empty hashes and this command returns 0. /// /// The key of the hash. /// The fields in the hash to delete. /// The flags to use for this operation. /// The number of fields that were removed. - /// https://redis.io/commands/hdel + /// long HashDelete(RedisKey key, RedisValue[] hashFields, CommandFlags flags = CommandFlags.None); /// @@ -226,18 +324,177 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The key of the hash. /// The field in the hash to check. /// The flags to use for this operation. - /// 1 if the hash contains field. 0 if the hash does not contain field, or key does not exist. - /// https://redis.io/commands/hexists + /// if the hash contains field, if the hash does not contain field, or key does not exist. + /// bool HashExists(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None); + /// + /// Set the remaining time to live in milliseconds for the given set of fields of hash + /// After the timeout has expired, the field of the hash will automatically be deleted. + /// + /// The key of the hash. + /// The fields in the hash to set expire time. + /// The timeout to set. + /// under which condition the expiration will be set using . + /// The flags to use for this operation. + /// + /// Empty array if the key does not exist. Otherwise, returns an array where each item is the result of operation for given fields: + /// + /// + /// Result + /// Description + /// + /// + /// 2 + /// Field deleted because the specified expiration time is due. + /// + /// + /// 1 + /// Expiration time set/updated. + /// + /// + /// 0 + /// Expiration time is not set/update (a specified ExpireWhen condition is not met). + /// + /// + /// -1 + /// No such field exists. + /// + /// + /// + ExpireResult[] HashFieldExpire(RedisKey key, RedisValue[] hashFields, TimeSpan expiry, ExpireWhen when = ExpireWhen.Always, CommandFlags flags = CommandFlags.None); + + /// + /// Set the time-out on a field of the given set of fields of hash. + /// After the timeout has expired, the field of the hash will automatically be deleted. + /// + /// The key of the hash. + /// The fields in the hash to set expire time. + /// The exact date to expiry to set. + /// under which condition the expiration will be set using . + /// The flags to use for this operation. + /// + /// Empty array if the key does not exist. Otherwise, returns an array where each item is the result of operation for given fields: + /// + /// + /// Result + /// Description + /// + /// + /// 2 + /// Field deleted because the specified expiration time is due. + /// + /// + /// 1 + /// Expiration time set/updated. + /// + /// + /// 0 + /// Expiration time is not set/update (a specified ExpireWhen condition is not met). + /// + /// + /// -1 + /// No such field exists. + /// + /// + /// + ExpireResult[] HashFieldExpire(RedisKey key, RedisValue[] hashFields, DateTime expiry, ExpireWhen when = ExpireWhen.Always, CommandFlags flags = CommandFlags.None); + + /// + /// For each specified field, it gets the expiration time as a Unix timestamp in milliseconds (milliseconds since the Unix epoch). + /// + /// The key of the hash. + /// The fields in the hash to get expire time. + /// The flags to use for this operation. + /// + /// Empty array if the key does not exist. Otherwise, returns the result of operation for given fields: + /// + /// + /// Result + /// Description + /// + /// + /// > 0 + /// Expiration time, as a Unix timestamp in milliseconds. + /// + /// + /// -1 + /// Field has no associated expiration time. + /// + /// + /// -2 + /// No such field exists. + /// + /// + /// + long[] HashFieldGetExpireDateTime(RedisKey key, RedisValue[] hashFields, CommandFlags flags = CommandFlags.None); + + /// + /// For each specified field, it removes the expiration time. + /// + /// The key of the hash. + /// The fields in the hash to remove expire time. + /// The flags to use for this operation. + /// + /// Empty array if the key does not exist. Otherwise, returns the result of operation for given fields: + /// + /// + /// Result + /// Description + /// + /// + /// 1 + /// Expiration time was removed. + /// + /// + /// -1 + /// Field has no associated expiration time. + /// + /// + /// -2 + /// No such field exists. + /// + /// + /// + PersistResult[] HashFieldPersist(RedisKey key, RedisValue[] hashFields, CommandFlags flags = CommandFlags.None); + + /// + /// For each specified field, it gets the remaining time to live in milliseconds. + /// + /// The key of the hash. + /// The fields in the hash to get expire time. + /// The flags to use for this operation. + /// + /// Empty array if the key does not exist. Otherwise, returns the result of operation for given fields: + /// + /// + /// Result + /// Description + /// + /// + /// > 0 + /// Time to live, in milliseconds. + /// + /// + /// -1 + /// Field has no associated expiration time. + /// + /// + /// -2 + /// No such field exists. + /// + /// + /// + long[] HashFieldGetTimeToLive(RedisKey key, RedisValue[] hashFields, CommandFlags flags = CommandFlags.None); + /// /// Returns the value associated with field in the hash stored at key. /// /// The key of the hash. /// The field in the hash to get. /// The flags to use for this operation. - /// The value associated with field, or nil when field is not present in the hash or key does not exist. - /// https://redis.io/commands/hget + /// The value associated with field, or when field is not present in the hash or key does not exist. + /// RedisValue HashGet(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None); /// @@ -246,52 +503,203 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The key of the hash. /// The field in the hash to get. /// The flags to use for this operation. - /// The value associated with field, or nil when field is not present in the hash or key does not exist. - /// https://redis.io/commands/hget - Lease HashGetLease(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None); + /// The value associated with field, or when field is not present in the hash or key does not exist. + /// + Lease? HashGetLease(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None); /// /// Returns the values associated with the specified fields in the hash stored at key. - /// For every field that does not exist in the hash, a nil value is returned.Because a non-existing keys are treated as empty hashes, running HMGET against a non-existing key will return a list of nil values. + /// For every field that does not exist in the hash, a value is returned. + /// Because non-existing keys are treated as empty hashes, running HMGET against a non-existing key will return a list of values. /// /// The key of the hash. /// The fields in the hash to get. /// The flags to use for this operation. /// List of values associated with the given fields, in the same order as they are requested. - /// https://redis.io/commands/hmget + /// RedisValue[] HashGet(RedisKey key, RedisValue[] hashFields, CommandFlags flags = CommandFlags.None); + /// + /// Returns the value associated with field in the hash stored at key. + /// + /// The key of the hash. + /// The field in the hash to get. + /// The flags to use for this operation. + /// The value associated with field, or when field is not present in the hash or key does not exist. + /// + RedisValue HashFieldGetAndDelete(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None); + + /// + /// Returns the value associated with field in the hash stored at key. + /// + /// The key of the hash. + /// The field in the hash to get. + /// The flags to use for this operation. + /// The value associated with field, or when field is not present in the hash or key does not exist. + /// + Lease? HashFieldGetLeaseAndDelete(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None); + + /// + /// Returns the values associated with the specified fields in the hash stored at key. + /// For every field that does not exist in the hash, a value is returned. + /// Because non-existing keys are treated as empty hashes, running HMGET against a non-existing key will return a list of values. + /// + /// The key of the hash. + /// The fields in the hash to get. + /// The flags to use for this operation. + /// List of values associated with the given fields, in the same order as they are requested. + /// + RedisValue[] HashFieldGetAndDelete(RedisKey key, RedisValue[] hashFields, CommandFlags flags = CommandFlags.None); + + /// + /// Gets the value of the specified hash field and sets its expiration time. + /// + /// The key of the hash. + /// The field in the hash to get and set the expiration for. + /// The expiration time to set. + /// If true, the expiration will be removed. And 'expiry' parameter is ignored. + /// The flags to use for this operation. + /// The value of the specified hash field. + RedisValue HashFieldGetAndSetExpiry(RedisKey key, RedisValue hashField, TimeSpan? expiry = null, bool persist = false, CommandFlags flags = CommandFlags.None); + + /// + /// Gets the value of the specified hash field and sets its expiration time. + /// + /// The key of the hash. + /// The field in the hash to get and set the expiration for. + /// The exact date and time to set the expiration to. + /// The flags to use for this operation. + /// The value of the specified hash field. + RedisValue HashFieldGetAndSetExpiry(RedisKey key, RedisValue hashField, DateTime expiry, CommandFlags flags = CommandFlags.None); + + /// + /// Gets the value of the specified hash field and sets its expiration time, returning a lease. + /// + /// The key of the hash. + /// The field in the hash to get and set the expiration for. + /// The expiration time to set. + /// If true, the expiration will be removed. And 'expiry' parameter is ignored. + /// The flags to use for this operation. + /// The value of the specified hash field as a lease. + Lease? HashFieldGetLeaseAndSetExpiry(RedisKey key, RedisValue hashField, TimeSpan? expiry = null, bool persist = false, CommandFlags flags = CommandFlags.None); + + /// + /// Gets the value of the specified hash field and sets its expiration time, returning a lease. + /// + /// The key of the hash. + /// The field in the hash to get and set the expiration for. + /// The exact date and time to set the expiration to. + /// The flags to use for this operation. + /// The value of the specified hash field as a lease. + Lease? HashFieldGetLeaseAndSetExpiry(RedisKey key, RedisValue hashField, DateTime expiry, CommandFlags flags = CommandFlags.None); + + /// + /// Gets the values of the specified hash fields and sets their expiration times. + /// + /// The key of the hash. + /// The fields in the hash to get and set the expiration for. + /// The expiration time to set. + /// If true, the expiration will be removed. And 'expiry' parameter is ignored. + /// The flags to use for this operation. + /// The values of the specified hash fields. + RedisValue[] HashFieldGetAndSetExpiry(RedisKey key, RedisValue[] hashFields, TimeSpan? expiry = null, bool persist = false, CommandFlags flags = CommandFlags.None); + + /// + /// Gets the values of the specified hash fields and sets their expiration times. + /// + /// The key of the hash. + /// The fields in the hash to get and set the expiration for. + /// The exact date and time to set the expiration to. + /// The flags to use for this operation. + /// The values of the specified hash fields. + RedisValue[] HashFieldGetAndSetExpiry(RedisKey key, RedisValue[] hashFields, DateTime expiry, CommandFlags flags = CommandFlags.None); + + /// + /// Sets the value of the specified hash field and sets its expiration time. + /// + /// The key of the hash. + /// The field in the hash to set and set the expiration for. + /// The value in the hash to set and set the expiration for. + /// The expiration time to set. + /// Whether to maintain the existing field's TTL (KEEPTTL flag). + /// Which conditions to set the value under (defaults to always). + /// The flags to use for this operation. + /// 0 if no fields were set, 1 if all the fields were set. + RedisValue HashFieldSetAndSetExpiry(RedisKey key, RedisValue field, RedisValue value, TimeSpan? expiry = null, bool keepTtl = false, When when = When.Always, CommandFlags flags = CommandFlags.None); + + /// + /// Sets the value of the specified hash field and sets its expiration time. + /// + /// The key of the hash. + /// The field in the hash to set and set the expiration for. + /// The value in the hash to set and set the expiration for. + /// The exact date and time to set the expiration to. + /// Which conditions to set the value under (defaults to always). + /// The flags to use for this operation. + /// 0 if no fields were set, 1 if all the fields were set. + RedisValue HashFieldSetAndSetExpiry(RedisKey key, RedisValue field, RedisValue value, DateTime expiry, When when = When.Always, CommandFlags flags = CommandFlags.None); + + /// + /// Sets the values of the specified hash fields and sets their expiration times. + /// + /// The key of the hash. + /// The fields in the hash to set and set the expiration for. + /// The expiration time to set. + /// Whether to maintain the existing fields' TTL (KEEPTTL flag). + /// Which conditions to set the values under (defaults to always). + /// The flags to use for this operation. + /// 0 if no fields were set, 1 if all the fields were set. + RedisValue HashFieldSetAndSetExpiry(RedisKey key, HashEntry[] hashFields, TimeSpan? expiry = null, bool keepTtl = false, When when = When.Always, CommandFlags flags = CommandFlags.None); + + /// + /// Sets the values of the specified hash fields and sets their expiration times. + /// + /// The key of the hash. + /// The fields in the hash to set and set the expiration for. + /// The exact date and time to set the expiration to. + /// Which conditions to set the values under (defaults to always). + /// The flags to use for this operation. + /// 0 if no fields were set, 1 if all the fields were set. + RedisValue HashFieldSetAndSetExpiry(RedisKey key, HashEntry[] hashFields, DateTime expiry, When when = When.Always, CommandFlags flags = CommandFlags.None); + /// /// Returns all fields and values of the hash stored at key. /// /// The key of the hash to get all entries from. /// The flags to use for this operation. /// List of fields and their values stored in the hash, or an empty list when key does not exist. - /// https://redis.io/commands/hgetall + /// HashEntry[] HashGetAll(RedisKey key, CommandFlags flags = CommandFlags.None); /// - /// Increments the number stored at field in the hash stored at key by increment. If key does not exist, a new key holding a hash is created. If field does not exist the value is set to 0 before the operation is performed. + /// Increments the number stored at field in the hash stored at key by increment. + /// If key does not exist, a new key holding a hash is created. + /// If field does not exist the value is set to 0 before the operation is performed. /// /// The key of the hash. /// The field in the hash to increment. /// The amount to increment by. /// The flags to use for this operation. /// The value at field after the increment operation. - /// The range of values supported by HINCRBY is limited to 64 bit signed integers. - /// https://redis.io/commands/hincrby + /// + /// The range of values supported by HINCRBY is limited to 64-bit signed integers. + /// + /// long HashIncrement(RedisKey key, RedisValue hashField, long value = 1, CommandFlags flags = CommandFlags.None); /// - /// Increment the specified field of an hash stored at key, and representing a floating point number, by the specified increment. If the field does not exist, it is set to 0 before performing the operation. + /// Increment the specified field of a hash stored at key, and representing a floating point number, by the specified increment. + /// If the field does not exist, it is set to 0 before performing the operation. /// /// The key of the hash. /// The field in the hash to increment. /// The amount to increment by. /// The flags to use for this operation. /// The value at field after the increment operation. - /// The precision of the output is fixed at 17 digits after the decimal point regardless of the actual internal precision of the computation. - /// https://redis.io/commands/hincrbyfloat + /// + /// The precision of the output is fixed at 17 digits after the decimal point regardless of the actual internal precision of the computation. + /// + /// double HashIncrement(RedisKey key, RedisValue hashField, double value, CommandFlags flags = CommandFlags.None); /// @@ -300,7 +708,7 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The key of the hash. /// The flags to use for this operation. /// List of fields in the hash, or an empty list when key does not exist. - /// https://redis.io/commands/hkeys + /// RedisValue[] HashKeys(RedisKey key, CommandFlags flags = CommandFlags.None); /// @@ -309,9 +717,38 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The key of the hash. /// The flags to use for this operation. /// The number of fields in the hash, or 0 when key does not exist. - /// https://redis.io/commands/hlen + /// long HashLength(RedisKey key, CommandFlags flags = CommandFlags.None); + /// + /// Gets a random field from the hash at . + /// + /// The key of the hash. + /// The flags to use for this operation. + /// A random hash field name or if the hash does not exist. + /// + RedisValue HashRandomField(RedisKey key, CommandFlags flags = CommandFlags.None); + + /// + /// Gets field names from the hash at . + /// + /// The key of the hash. + /// The number of fields to return. + /// The flags to use for this operation. + /// An array of hash field names of size of at most , or if the hash does not exist. + /// + RedisValue[] HashRandomFields(RedisKey key, long count, CommandFlags flags = CommandFlags.None); + + /// + /// Gets field names and values from the hash at . + /// + /// The key of the hash. + /// The number of fields to return. + /// The flags to use for this operation. + /// An array of hash entries of size of at most , or if the hash does not exist. + /// + HashEntry[] HashRandomFieldsWithValues(RedisKey key, long count, CommandFlags flags = CommandFlags.None); + /// /// The HSCAN command is used to incrementally iterate over a hash. /// @@ -320,11 +757,26 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The page size to iterate by. /// The flags to use for this operation. /// Yields all elements of the hash matching the pattern. - /// https://redis.io/commands/hscan + /// IEnumerable HashScan(RedisKey key, RedisValue pattern, int pageSize, CommandFlags flags); /// - /// The HSCAN command is used to incrementally iterate over a hash; note: to resume an iteration via cursor, cast the original enumerable or enumerator to IScanningCursor. + /// The HSCAN command is used to incrementally iterate over a hash. + /// Note: to resume an iteration via cursor, cast the original enumerable or enumerator to . + /// + /// The key of the hash. + /// The pattern of keys to get entries for. + /// The page size to iterate by. + /// The cursor position to start at. + /// The page offset to start at. + /// The flags to use for this operation. + /// Yields all elements of the hash matching the pattern. + /// + IEnumerable HashScan(RedisKey key, RedisValue pattern = default, int pageSize = RedisBase.CursorUtils.DefaultLibraryPageSize, long cursor = RedisBase.CursorUtils.Origin, int pageOffset = 0, CommandFlags flags = CommandFlags.None); + + /// + /// The HSCAN command is used to incrementally iterate over a hash and return only field names. + /// Note: to resume an iteration via cursor, cast the original enumerable or enumerator to . /// /// The key of the hash. /// The pattern of keys to get entries for. @@ -333,39 +785,46 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The page offset to start at. /// The flags to use for this operation. /// Yields all elements of the hash matching the pattern. - /// https://redis.io/commands/hscan - IEnumerable HashScan(RedisKey key, RedisValue pattern = default(RedisValue), int pageSize = RedisBase.CursorUtils.DefaultLibraryPageSize, long cursor = RedisBase.CursorUtils.Origin, int pageOffset = 0, CommandFlags flags = CommandFlags.None); + /// + IEnumerable HashScanNoValues(RedisKey key, RedisValue pattern = default, int pageSize = RedisBase.CursorUtils.DefaultLibraryPageSize, long cursor = RedisBase.CursorUtils.Origin, int pageOffset = 0, CommandFlags flags = CommandFlags.None); /// - /// Sets the specified fields to their respective values in the hash stored at key. This command overwrites any specified fields that already exist in the hash, leaving other unspecified fields untouched. If key does not exist, a new key holding a hash is created. + /// Sets the specified fields to their respective values in the hash stored at key. + /// This command overwrites any specified fields that already exist in the hash, leaving other unspecified fields untouched. + /// If key does not exist, a new key holding a hash is created. /// /// The key of the hash. /// The entries to set in the hash. /// The flags to use for this operation. - /// https://redis.io/commands/hmset + /// void HashSet(RedisKey key, HashEntry[] hashFields, CommandFlags flags = CommandFlags.None); /// - /// Sets field in the hash stored at key to value. If key does not exist, a new key holding a hash is created. If field already exists in the hash, it is overwritten. + /// Sets field in the hash stored at key to value. + /// If key does not exist, a new key holding a hash is created. + /// If field already exists in the hash, it is overwritten. /// /// The key of the hash. /// The field to set in the hash. /// The value to set. /// Which conditions under which to set the field value (defaults to always). /// The flags to use for this operation. - /// 1 if field is a new field in the hash and value was set. 0 if field already exists in the hash and the value was updated. - /// https://redis.io/commands/hset - /// https://redis.io/commands/hsetnx + /// if field is a new field in the hash and value was set, if field already exists in the hash and the value was updated. + /// + /// See + /// and + /// . + /// bool HashSet(RedisKey key, RedisValue hashField, RedisValue value, When when = When.Always, CommandFlags flags = CommandFlags.None); /// /// Returns the string length of the value associated with field in the hash stored at key. /// /// The key of the hash. - /// The field containing the string + /// The field containing the string. /// The flags to use for this operation. - /// the length of the string at field, or 0 when key does not exist. - /// https://redis.io/commands/hstrlen + /// The length of the string at field, or 0 when key does not exist. + /// long HashStringLength(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None); /// @@ -374,7 +833,7 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The key of the hash. /// The flags to use for this operation. /// List of values in the hash, or an empty list when key does not exist. - /// https://redis.io/commands/hvals + /// RedisValue[] HashValues(RedisKey key, CommandFlags flags = CommandFlags.None); /// @@ -383,8 +842,8 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The key of the hyperloglog. /// The value to add. /// The flags to use for this operation. - /// True if at least 1 HyperLogLog internal register was altered, false otherwise. - /// https://redis.io/commands/pfadd + /// if at least 1 HyperLogLog internal register was altered, otherwise. + /// bool HyperLogLogAdd(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None); /// @@ -393,8 +852,8 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The key of the hyperloglog. /// The values to add. /// The flags to use for this operation. - /// True if at least 1 HyperLogLog internal register was altered, false otherwise. - /// https://redis.io/commands/pfadd + /// if at least 1 HyperLogLog internal register was altered, otherwise. + /// bool HyperLogLogAdd(RedisKey key, RedisValue[] values, CommandFlags flags = CommandFlags.None); /// @@ -403,7 +862,7 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The key of the hyperloglog. /// The flags to use for this operation. /// The approximated number of unique elements observed via HyperLogLogAdd. - /// https://redis.io/commands/pfcount + /// long HyperLogLogLength(RedisKey key, CommandFlags flags = CommandFlags.None); /// @@ -412,26 +871,26 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The keys of the hyperloglogs. /// The flags to use for this operation. /// The approximated number of unique elements observed via HyperLogLogAdd. - /// https://redis.io/commands/pfcount + /// long HyperLogLogLength(RedisKey[] keys, CommandFlags flags = CommandFlags.None); /// - /// Merge multiple HyperLogLog values into an unique value that will approximate the cardinality of the union of the observed Sets of the source HyperLogLog structures. + /// Merge multiple HyperLogLog values into a unique value that will approximate the cardinality of the union of the observed Sets of the source HyperLogLog structures. /// /// The key of the merged hyperloglog. /// The key of the first hyperloglog to merge. - /// The key of the first hyperloglog to merge. + /// The key of the second hyperloglog to merge. /// The flags to use for this operation. - /// https://redis.io/commands/pfmerge + /// void HyperLogLogMerge(RedisKey destination, RedisKey first, RedisKey second, CommandFlags flags = CommandFlags.None); /// - /// Merge multiple HyperLogLog values into an unique value that will approximate the cardinality of the union of the observed Sets of the source HyperLogLog structures. + /// Merge multiple HyperLogLog values into a unique value that will approximate the cardinality of the union of the observed Sets of the source HyperLogLog structures. /// /// The key of the merged hyperloglog. /// The keys of the hyperloglogs to merge. /// The flags to use for this operation. - /// https://redis.io/commands/pfmerge + /// void HyperLogLogMerge(RedisKey destination, RedisKey[] sourceKeys, CommandFlags flags = CommandFlags.None); /// @@ -440,7 +899,19 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The key to check. /// The flags to use for this operation. /// The endpoint serving the key. - EndPoint IdentifyEndpoint(RedisKey key = default(RedisKey), CommandFlags flags = CommandFlags.None); + EndPoint? IdentifyEndpoint(RedisKey key = default, CommandFlags flags = CommandFlags.None); + + /// + /// Copies the value from the to the specified . + /// + /// The key of the source value to copy. + /// The destination key to copy the source to. + /// The database ID to store in. If default (-1), current database is used. + /// Whether to overwrite an existing values at . If and the key exists, the copy will not succeed. + /// The flags to use for this operation. + /// if key was copied. if key was not copied. + /// + bool KeyCopy(RedisKey sourceKey, RedisKey destinationKey, int destinationDatabase = -1, bool replace = false, CommandFlags flags = CommandFlags.None); /// /// Removes the specified key. A key is ignored if it does not exist. @@ -448,9 +919,12 @@ public interface IDatabase : IRedis, IDatabaseAsync /// /// The key to delete. /// The flags to use for this operation. - /// True if the key was removed. - /// https://redis.io/commands/del - /// https://redis.io/commands/unlink + /// if the key was removed. + /// + /// See + /// and + /// . + /// bool KeyDelete(RedisKey key, CommandFlags flags = CommandFlags.None); /// @@ -460,26 +934,39 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The keys to delete. /// The flags to use for this operation. /// The number of keys that were removed. - /// https://redis.io/commands/del - /// https://redis.io/commands/unlink + /// + /// See + /// , + /// . + /// long KeyDelete(RedisKey[] keys, CommandFlags flags = CommandFlags.None); /// - /// Serialize the value stored at key in a Redis-specific format and return it to the user. The returned value can be synthesized back into a Redis key using the RESTORE command. + /// Serialize the value stored at key in a Redis-specific format and return it to the user. + /// The returned value can be synthesized back into a Redis key using the RESTORE command. + /// + /// The key to dump. + /// The flags to use for this operation. + /// The serialized value. + /// + byte[]? KeyDump(RedisKey key, CommandFlags flags = CommandFlags.None); + + /// + /// Returns the internal encoding for the Redis object stored at . /// /// The key to dump. /// The flags to use for this operation. - /// the serialized value. - /// https://redis.io/commands/dump - byte[] KeyDump(RedisKey key, CommandFlags flags = CommandFlags.None); + /// The Redis encoding for the value or is the key does not exist. + /// + string? KeyEncoding(RedisKey key, CommandFlags flags = CommandFlags.None); /// /// Returns if key exists. /// /// The key to check. /// The flags to use for this operation. - /// 1 if the key exists. 0 if the key does not exist. - /// https://redis.io/commands/exists + /// if the key exists. if the key does not exist. + /// bool KeyExists(RedisKey key, CommandFlags flags = CommandFlags.None); /// @@ -488,122 +975,250 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The keys to check. /// The flags to use for this operation. /// The number of keys that existed. - /// https://redis.io/commands/exists + /// long KeyExists(RedisKey[] keys, CommandFlags flags = CommandFlags.None); /// - /// Set a timeout on key. After the timeout has expired, the key will automatically be deleted. A key with an associated timeout is said to be volatile in Redis terminology. + /// Set a timeout on . + /// After the timeout has expired, the key will automatically be deleted. + /// A key with an associated timeout is said to be volatile in Redis terminology. + /// + /// The key to set the expiration for. + /// The timeout to set. + /// The flags to use for this operation. + /// if the timeout was set. if key does not exist or the timeout could not be set. + /// + /// If key is updated before the timeout has expired, then the timeout is removed as if the PERSIST command was invoked on key. + /// + /// For Redis versions < 2.1.3, existing timeouts cannot be overwritten. + /// So, if key already has an associated timeout, it will do nothing and return 0. + /// + /// + /// Since Redis 2.1.3, you can update the timeout of a key. + /// It is also possible to remove the timeout using the PERSIST command. + /// See the page on key expiry for more information. + /// + /// + /// See + /// , + /// , + /// . + /// + /// + [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] + bool KeyExpire(RedisKey key, TimeSpan? expiry, CommandFlags flags); + + /// + /// Set a timeout on . + /// After the timeout has expired, the key will automatically be deleted. + /// A key with an associated timeout is said to be volatile in Redis terminology. /// /// The key to set the expiration for. /// The timeout to set. + /// In Redis 7+, we can choose under which condition the expiration will be set using . /// The flags to use for this operation. - /// 1 if the timeout was set. 0 if key does not exist or the timeout could not be set. - /// If key is updated before the timeout has expired, then the timeout is removed as if the PERSIST command was invoked on key. - /// For Redis versions < 2.1.3, existing timeouts cannot be overwritten. So, if key already has an associated timeout, it will do nothing and return 0. Since Redis 2.1.3, you can update the timeout of a key. It is also possible to remove the timeout using the PERSIST command. See the page on key expiry for more information. - /// https://redis.io/commands/expire - /// https://redis.io/commands/pexpire - /// https://redis.io/commands/persist - bool KeyExpire(RedisKey key, TimeSpan? expiry, CommandFlags flags = CommandFlags.None); + /// if the timeout was set. if key does not exist or the timeout could not be set. + /// + /// See + /// , + /// . + /// + bool KeyExpire(RedisKey key, TimeSpan? expiry, ExpireWhen when = ExpireWhen.Always, CommandFlags flags = CommandFlags.None); /// - /// Set a timeout on key. After the timeout has expired, the key will automatically be deleted. A key with an associated timeout is said to be volatile in Redis terminology. + /// Set a timeout on . + /// After the timeout has expired, the key will automatically be deleted. + /// A key with an associated timeout is said to be volatile in Redis terminology. /// /// The key to set the expiration for. /// The exact date to expiry to set. /// The flags to use for this operation. - /// 1 if the timeout was set. 0 if key does not exist or the timeout could not be set. - /// If key is updated before the timeout has expired, then the timeout is removed as if the PERSIST command was invoked on key. - /// For Redis versions < 2.1.3, existing timeouts cannot be overwritten. So, if key already has an associated timeout, it will do nothing and return 0. Since Redis 2.1.3, you can update the timeout of a key. It is also possible to remove the timeout using the PERSIST command. See the page on key expiry for more information. - /// https://redis.io/commands/expireat - /// https://redis.io/commands/pexpireat - /// https://redis.io/commands/persist - bool KeyExpire(RedisKey key, DateTime? expiry, CommandFlags flags = CommandFlags.None); + /// if the timeout was set. if key does not exist or the timeout could not be set. + /// + /// If key is updated before the timeout has expired, then the timeout is removed as if the PERSIST command was invoked on key. + /// + /// For Redis versions < 2.1.3, existing timeouts cannot be overwritten. + /// So, if key already has an associated timeout, it will do nothing and return 0. + /// + /// + /// Since Redis 2.1.3, you can update the timeout of a key. + /// It is also possible to remove the timeout using the PERSIST command. + /// See the page on key expiry for more information. + /// + /// + /// See + /// or + /// or + /// . + /// + /// + [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] + bool KeyExpire(RedisKey key, DateTime? expiry, CommandFlags flags); + + /// + /// Set a timeout on . + /// After the timeout has expired, the key will automatically be deleted. + /// A key with an associated timeout is said to be volatile in Redis terminology. + /// + /// The key to set the expiration for. + /// The timeout to set. + /// In Redis 7+, we choose under which condition the expiration will be set using . + /// The flags to use for this operation. + /// if the timeout was set. if key does not exist or the timeout could not be set. + /// + /// See + /// , + /// . + /// + bool KeyExpire(RedisKey key, DateTime? expiry, ExpireWhen when = ExpireWhen.Always, CommandFlags flags = CommandFlags.None); + + /// + /// Returns the absolute time at which the given will expire, if it exists and has an expiration. + /// + /// The key to get the expiration for. + /// The flags to use for this operation. + /// The time at which the given key will expire, or if the key does not exist or has no associated expiration time. + /// + /// See + /// , + /// . + /// + DateTime? KeyExpireTime(RedisKey key, CommandFlags flags = CommandFlags.None); + + /// + /// Returns the logarithmic access frequency counter of the object stored at . + /// The command is only available when the maxmemory-policy configuration directive is set to + /// one of the LFU policies. + /// + /// The key to get a frequency count for. + /// The flags to use for this operation. + /// The number of logarithmic access frequency counter, ( if the key does not exist). + /// + long? KeyFrequency(RedisKey key, CommandFlags flags = CommandFlags.None); /// - /// Returns the time since the object stored at the specified key is idle (not requested by read or write operations) + /// Returns the time since the object stored at the specified key is idle (not requested by read or write operations). /// /// The key to get the time of. /// The flags to use for this operation. - /// The time since the object stored at the specified key is idle - /// https://redis.io/commands/object + /// The time since the object stored at the specified key is idle. + /// TimeSpan? KeyIdleTime(RedisKey key, CommandFlags flags = CommandFlags.None); /// - /// Move key from the currently selected database (see SELECT) to the specified destination database. When key already exists in the destination database, or it does not exist in the source database, it does nothing. It is possible to use MOVE as a locking primitive because of this. + /// Move key from the currently selected database (see SELECT) to the specified destination database. + /// When key already exists in the destination database, or it does not exist in the source database, it does nothing. + /// It is possible to use MOVE as a locking primitive because of this. /// /// The key to move. /// The database to move the key to. /// The flags to use for this operation. - /// 1 if key was moved; 0 if key was not moved. - /// https://redis.io/commands/move + /// if key was moved. if key was not moved. + /// bool KeyMove(RedisKey key, int database, CommandFlags flags = CommandFlags.None); /// - /// Remove the existing timeout on key, turning the key from volatile (a key with an expire set) to persistent (a key that will never expire as no timeout is associated). + /// Remove the existing timeout on key, turning the key from volatile (a key with an expiry set) to persistent (a key that will never expire as no timeout is associated). /// /// The key to persist. /// The flags to use for this operation. - /// 1 if the timeout was removed. 0 if key does not exist or does not have an associated timeout. - /// https://redis.io/commands/persist + /// if the timeout was removed. if key does not exist or does not have an associated timeout. + /// bool KeyPersist(RedisKey key, CommandFlags flags = CommandFlags.None); /// /// Return a random key from the currently selected database. /// /// The flags to use for this operation. - /// The random key, or nil when the database is empty. - /// https://redis.io/commands/randomkey + /// The random key, or when the database is empty. + /// RedisKey KeyRandom(CommandFlags flags = CommandFlags.None); /// - /// Renames key to newkey. It returns an error when the source and destination names are the same, or when key does not exist. + /// Returns the reference count of the object stored at . + /// + /// The key to get a reference count for. + /// The flags to use for this operation. + /// The number of references ( if the key does not exist). + /// + long? KeyRefCount(RedisKey key, CommandFlags flags = CommandFlags.None); + + /// + /// Renames to . + /// It returns an error when the source and destination names are the same, or when key does not exist. /// /// The key to rename. /// The key to rename to. /// What conditions to rename under (defaults to always). /// The flags to use for this operation. - /// True if the key was renamed, false otherwise. - /// https://redis.io/commands/rename - /// https://redis.io/commands/renamenx + /// if the key was renamed, otherwise. + /// + /// See + /// , + /// . + /// bool KeyRename(RedisKey key, RedisKey newKey, When when = When.Always, CommandFlags flags = CommandFlags.None); /// /// Create a key associated with a value that is obtained by deserializing the provided serialized value (obtained via DUMP). - /// If ttl is 0 the key is created without any expire, otherwise the specified expire time(in milliseconds) is set. + /// If is 0 the key is created without any expire, otherwise the specified expire time (in milliseconds) is set. /// /// The key to restore. /// The value of the key. /// The expiry to set. /// The flags to use for this operation. - /// https://redis.io/commands/restore + /// void KeyRestore(RedisKey key, byte[] value, TimeSpan? expiry = null, CommandFlags flags = CommandFlags.None); /// - /// Returns the remaining time to live of a key that has a timeout. This introspection capability allows a Redis client to check how many seconds a given key will continue to be part of the dataset. + /// Returns the remaining time to live of a key that has a timeout. + /// This introspection capability allows a Redis client to check how many seconds a given key will continue to be part of the dataset. /// /// The key to check. /// The flags to use for this operation. - /// TTL, or nil when key does not exist or does not have a timeout. - /// https://redis.io/commands/ttl + /// TTL, or when key does not exist or does not have a timeout. + /// TimeSpan? KeyTimeToLive(RedisKey key, CommandFlags flags = CommandFlags.None); /// - /// Returns the string representation of the type of the value stored at key. The different types that can be returned are: string, list, set, zset and hash. + /// Alters the last access time of a key. + /// + /// The key to touch. + /// The flags to use for this operation. + /// if the key was touched, otherwise. + /// + bool KeyTouch(RedisKey key, CommandFlags flags = CommandFlags.None); + + /// + /// Alters the last access time of the specified . A key is ignored if it does not exist. + /// + /// The keys to touch. + /// The flags to use for this operation. + /// The number of keys that were touched. + /// + long KeyTouch(RedisKey[] keys, CommandFlags flags = CommandFlags.None); + + /// + /// Returns the string representation of the type of the value stored at key. + /// The different types that can be returned are: string, list, set, zset and hash. /// /// The key to get the type of. /// The flags to use for this operation. /// Type of key, or none when key does not exist. - /// https://redis.io/commands/type + /// RedisType KeyType(RedisKey key, CommandFlags flags = CommandFlags.None); /// - /// Returns the element at index index in the list stored at key. The index is zero-based, so 0 means the first element, 1 the second element and so on. Negative indices can be used to designate elements starting at the tail of the list. Here, -1 means the last element, -2 means the penultimate and so forth. + /// Returns the element at index in the list stored at key. + /// The index is zero-based, so 0 means the first element, 1 the second element and so on. + /// Negative indices can be used to designate elements starting at the tail of the list. + /// Here, -1 means the last element, -2 means the penultimate and so forth. /// /// The key of the list. - /// The index position to ge the value at. + /// The index position to get the value at. /// The flags to use for this operation. - /// The requested element, or nil when index is out of range. - /// https://redis.io/commands/lindex + /// The requested element, or when index is out of range. + /// RedisValue ListGetByIndex(RedisKey key, long index, CommandFlags flags = CommandFlags.None); /// @@ -615,7 +1230,7 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The value to insert. /// The flags to use for this operation. /// The length of the list after the insert operation, or -1 when the value pivot was not found. - /// https://redis.io/commands/linsert + /// long ListInsertAfter(RedisKey key, RedisValue pivot, RedisValue value, CommandFlags flags = CommandFlags.None); /// @@ -627,7 +1242,7 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The value to insert. /// The flags to use for this operation. /// The length of the list after the insert operation, or -1 when the value pivot was not found. - /// https://redis.io/commands/linsert + /// long ListInsertBefore(RedisKey key, RedisValue pivot, RedisValue value, CommandFlags flags = CommandFlags.None); /// @@ -635,43 +1250,102 @@ public interface IDatabase : IRedis, IDatabaseAsync /// /// The key of the list. /// The flags to use for this operation. - /// The value of the first element, or nil when key does not exist. - /// https://redis.io/commands/lpop + /// The value of the first element, or when key does not exist. + /// RedisValue ListLeftPop(RedisKey key, CommandFlags flags = CommandFlags.None); /// - /// Insert the specified value at the head of the list stored at key. If key does not exist, it is created as empty list before performing the push operations. + /// Removes and returns count elements from the head of the list stored at key. + /// If the list contains less than count elements, removes and returns the number of elements in the list. + /// + /// The key of the list. + /// The number of elements to remove. + /// The flags to use for this operation. + /// Array of values that were popped, or if the key doesn't exist. + /// + RedisValue[] ListLeftPop(RedisKey key, long count, CommandFlags flags = CommandFlags.None); + + /// + /// Removes and returns at most elements from the first non-empty list in . + /// Starts on the left side of the list. + /// + /// The keys to look through for elements to pop. + /// The maximum number of elements to pop from the list. + /// The flags to use for this operation. + /// A span of contiguous elements from the list, or if no non-empty lists are found. + /// + ListPopResult ListLeftPop(RedisKey[] keys, long count, CommandFlags flags = CommandFlags.None); + + /// + /// Scans through the list stored at looking for , returning the 0-based + /// index of the first matching element. + /// + /// The key of the list. + /// The element to search for. + /// The rank of the first element to return, within the sub-list of matching indexes in the case of multiple matches. + /// The maximum number of elements to scan through before stopping, defaults to 0 (a full scan of the list.) + /// The flags to use for this operation. + /// The 0-based index of the first matching element, or -1 if not found. + /// + long ListPosition(RedisKey key, RedisValue element, long rank = 1, long maxLength = 0, CommandFlags flags = CommandFlags.None); + + /// + /// Scans through the list stored at looking for instances of , returning the 0-based + /// indexes of any matching elements. + /// + /// The key of the list. + /// The element to search for. + /// The number of matches to find. A count of 0 will return the indexes of all occurrences of the element. + /// The rank of the first element to return, within the sub-list of matching indexes in the case of multiple matches. + /// The maximum number of elements to scan through before stopping, defaults to 0 (a full scan of the list.) + /// The flags to use for this operation. + /// An array of at most of indexes of matching elements. If none are found, and empty array is returned. + /// + long[] ListPositions(RedisKey key, RedisValue element, long count, long rank = 1, long maxLength = 0, CommandFlags flags = CommandFlags.None); + + /// + /// Insert the specified value at the head of the list stored at key. + /// If key does not exist, it is created as empty list before performing the push operations. /// /// The key of the list. /// The value to add to the head of the list. /// Which conditions to add to the list under (defaults to always). /// The flags to use for this operation. /// The length of the list after the push operations. - /// https://redis.io/commands/lpush - /// https://redis.io/commands/lpushx + /// + /// See + /// , + /// . + /// long ListLeftPush(RedisKey key, RedisValue value, When when = When.Always, CommandFlags flags = CommandFlags.None); /// - /// Insert the specified value at the head of the list stored at key. If key does not exist, it is created as empty list before performing the push operations. + /// Insert the specified value at the head of the list stored at key. + /// If key does not exist, it is created as empty list before performing the push operations. /// /// The key of the list. - /// The values to add to the head of the list. + /// The value to add to the head of the list. /// Which conditions to add to the list under (defaults to always). /// The flags to use for this operation. /// The length of the list after the push operations. - /// https://redis.io/commands/lpush - /// https://redis.io/commands/lpushx + /// + /// See + /// , + /// . + /// long ListLeftPush(RedisKey key, RedisValue[] values, When when = When.Always, CommandFlags flags = CommandFlags.None); /// - /// Insert all the specified values at the head of the list stored at key. If key does not exist, it is created as empty list before performing the push operations. - /// Elements are inserted one after the other to the head of the list, from the leftmost element to the rightmost element. So for instance the command LPUSH mylist a b c will result into a list containing c as first element, b as second element and a as third element. + /// Insert all the specified values at the head of the list stored at key. + /// If key does not exist, it is created as empty list before performing the push operations. + /// Elements are inserted one after the other to the head of the list, from the leftmost element to the rightmost element. + /// So for instance the command LPUSH mylist a b c will result into a list containing c as first element, b as second element and a as third element. /// /// The key of the list. /// The values to add to the head of the list. /// The flags to use for this operation. /// The length of the list after the push operations. - /// https://redis.io/commands/lpush + /// long ListLeftPush(RedisKey key, RedisValue[] values, CommandFlags flags); /// @@ -680,11 +1354,25 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The key of the list. /// The flags to use for this operation. /// The length of the list at key. - /// https://redis.io/commands/llen + /// long ListLength(RedisKey key, CommandFlags flags = CommandFlags.None); /// - /// Returns the specified elements of the list stored at key. The offsets start and stop are zero-based indexes, with 0 being the first element of the list (the head of the list), 1 being the next element and so on. + /// Returns and removes the first or last element of the list stored at , and pushes the element + /// as the first or last element of the list stored at . + /// + /// The key of the list to remove from. + /// The key of the list to move to. + /// What side of the list to remove from. + /// What side of the list to move to. + /// The flags to use for this operation. + /// The element being popped and pushed or if there is no element to move. + /// + RedisValue ListMove(RedisKey sourceKey, RedisKey destinationKey, ListSide sourceSide, ListSide destinationSide, CommandFlags flags = CommandFlags.None); + + /// + /// Returns the specified elements of the list stored at key. + /// The offsets start and stop are zero-based indexes, with 0 being the first element of the list (the head of the list), 1 being the next element and so on. /// These offsets can also be negative numbers indicating offsets starting at the end of the list.For example, -1 is the last element of the list, -2 the penultimate, and so on. /// Note that if you have a list of numbers from 0 to 100, LRANGE list 0 10 will return 11 elements, that is, the rightmost item is included. /// @@ -693,21 +1381,24 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The stop index of the list. /// The flags to use for this operation. /// List of elements in the specified range. - /// https://redis.io/commands/lrange + /// RedisValue[] ListRange(RedisKey key, long start = 0, long stop = -1, CommandFlags flags = CommandFlags.None); /// - /// Removes the first count occurrences of elements equal to value from the list stored at key. The count argument influences the operation in the following ways: - /// count > 0: Remove elements equal to value moving from head to tail. - /// count < 0: Remove elements equal to value moving from tail to head. - /// count = 0: Remove all elements equal to value. + /// Removes the first count occurrences of elements equal to value from the list stored at key. + /// The count argument influences the operation in the following ways: + /// + /// count > 0: Remove elements equal to value moving from head to tail. + /// count < 0: Remove elements equal to value moving from tail to head. + /// count = 0: Remove all elements equal to value. + /// /// /// The key of the list. /// The value to remove from the list. /// The count behavior (see method summary). /// The flags to use for this operation. /// The number of removed elements. - /// https://redis.io/commands/lrem + /// long ListRemove(RedisKey key, RedisValue value, long count = 0, CommandFlags flags = CommandFlags.None); /// @@ -715,10 +1406,32 @@ public interface IDatabase : IRedis, IDatabaseAsync /// /// The key of the list. /// The flags to use for this operation. - /// The element being popped. - /// https://redis.io/commands/rpop + /// The element being popped, or when key does not exist.. + /// RedisValue ListRightPop(RedisKey key, CommandFlags flags = CommandFlags.None); + /// + /// Removes and returns count elements from the end the list stored at key. + /// If the list contains less than count elements, removes and returns the number of elements in the list. + /// + /// The key of the list. + /// The number of elements to pop. + /// The flags to use for this operation. + /// Array of values that were popped, or if the key doesn't exist. + /// + RedisValue[] ListRightPop(RedisKey key, long count, CommandFlags flags = CommandFlags.None); + + /// + /// Removes and returns at most elements from the first non-empty list in . + /// Starts on the right side of the list. + /// + /// The keys to look through for elements to pop. + /// The maximum number of elements to pop from the list. + /// The flags to use for this operation. + /// A span of contiguous elements from the list, or if no non-empty lists are found. + /// + ListPopResult ListRightPop(RedisKey[] keys, long count, CommandFlags flags = CommandFlags.None); + /// /// Atomically returns and removes the last element (tail) of the list stored at source, and pushes the element at the first element (head) of the list stored at destination. /// @@ -726,64 +1439,77 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The key of the destination list. /// The flags to use for this operation. /// The element being popped and pushed. - /// https://redis.io/commands/rpoplpush + /// RedisValue ListRightPopLeftPush(RedisKey source, RedisKey destination, CommandFlags flags = CommandFlags.None); /// - /// Insert the specified value at the tail of the list stored at key. If key does not exist, it is created as empty list before performing the push operation. + /// Insert the specified value at the tail of the list stored at key. + /// If key does not exist, it is created as empty list before performing the push operation. /// /// The key of the list. /// The value to add to the tail of the list. /// Which conditions to add to the list under. /// The flags to use for this operation. /// The length of the list after the push operation. - /// https://redis.io/commands/rpush - /// https://redis.io/commands/rpushx + /// + /// See + /// , + /// . + /// long ListRightPush(RedisKey key, RedisValue value, When when = When.Always, CommandFlags flags = CommandFlags.None); /// - /// Insert the specified value at the tail of the list stored at key. If key does not exist, it is created as empty list before performing the push operation. + /// Insert the specified value at the tail of the list stored at key. + /// If key does not exist, it is created as empty list before performing the push operation. /// /// The key of the list. /// The values to add to the tail of the list. /// Which conditions to add to the list under. /// The flags to use for this operation. /// The length of the list after the push operation. - /// https://redis.io/commands/rpush - /// https://redis.io/commands/rpushx + /// + /// See + /// , + /// . + /// long ListRightPush(RedisKey key, RedisValue[] values, When when = When.Always, CommandFlags flags = CommandFlags.None); /// - /// Insert all the specified values at the tail of the list stored at key. If key does not exist, it is created as empty list before performing the push operation. - /// Elements are inserted one after the other to the tail of the list, from the leftmost element to the rightmost element. So for instance the command RPUSH mylist a b c will result into a list containing a as first element, b as second element and c as third element. + /// Insert all the specified values at the tail of the list stored at key. + /// If key does not exist, it is created as empty list before performing the push operation. + /// Elements are inserted one after the other to the tail of the list, from the leftmost element to the rightmost element. + /// So for instance the command RPUSH mylist a b c will result into a list containing a as first element, b as second element and c as third element. /// /// The key of the list. /// The values to add to the tail of the list. /// The flags to use for this operation. /// The length of the list after the push operation. - /// https://redis.io/commands/rpush + /// long ListRightPush(RedisKey key, RedisValue[] values, CommandFlags flags); /// - /// Sets the list element at index to value. For more information on the index argument, see ListGetByIndex. An error is returned for out of range indexes. + /// Sets the list element at index to value. + /// For more information on the index argument, see . + /// An error is returned for out of range indexes. /// /// The key of the list. /// The index to set the value at. /// The values to add to the list. /// The flags to use for this operation. - /// https://redis.io/commands/lset + /// void ListSetByIndex(RedisKey key, long index, RedisValue value, CommandFlags flags = CommandFlags.None); /// - /// Trim an existing list so that it will contain only the specified range of elements specified. Both start and stop are zero-based indexes, where 0 is the first element of the list (the head), 1 the next element and so on. - /// For example: LTRIM foobar 0 2 will modify the list stored at foobar so that only the first three elements of the list will remain. + /// Trim an existing list so that it will contain only the specified range of elements specified. + /// Both start and stop are zero-based indexes, where 0 is the first element of the list (the head), 1 the next element and so on. + /// For example: LTRIM foobar 0 2 will modify the list stored at foobar so that only the first three elements of the list will remain. /// start and end can also be negative numbers indicating offsets from the end of the list, where -1 is the last element of the list, -2 the penultimate element and so on. /// /// The key of the list. /// The start index of the list to trim to. /// The end index of the list to trim to. /// The flags to use for this operation. - /// https://redis.io/commands/ltrim + /// void ListTrim(RedisKey key, long start, long stop, CommandFlags flags = CommandFlags.None); /// @@ -793,7 +1519,7 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The value to set at the key. /// The expiration of the lock key. /// The flags to use for this operation. - /// True if the lock was successfully extended. + /// if the lock was successfully extended. bool LockExtend(RedisKey key, RedisValue value, TimeSpan expiry, CommandFlags flags = CommandFlags.None); /// @@ -808,9 +1534,9 @@ public interface IDatabase : IRedis, IDatabaseAsync /// Releases a lock, if the token value is correct. /// /// The key of the lock. - /// The value at the key tht must match. + /// The value at the key that must match. /// The flags to use for this operation. - /// True if the lock was successfully released, false otherwise. + /// if the lock was successfully released, otherwise. bool LockRelease(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None); /// @@ -820,7 +1546,7 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The value to set at the key. /// The expiration of the lock key. /// The flags to use for this operation. - /// True if the lock was successfully taken, false otherwise. + /// if the lock was successfully taken, otherwise. bool LockTake(RedisKey key, RedisValue value, TimeSpan expiry, CommandFlags flags = CommandFlags.None); /// @@ -829,31 +1555,32 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The channel to publish to. /// The message to send. /// The flags to use for this operation. - /// The number of clients that received the message. - /// https://redis.io/commands/publish + /// + /// The number of clients that received the message *on the destination server*, + /// note that this doesn't mean much in a cluster as clients can get the message through other nodes. + /// + /// long Publish(RedisChannel channel, RedisValue message, CommandFlags flags = CommandFlags.None); /// - /// Execute an arbitrary command against the server; this is primarily intended for - /// executing modules, but may also be used to provide access to new features that lack - /// a direct API. + /// Execute an arbitrary command against the server; this is primarily intended for executing modules, + /// but may also be used to provide access to new features that lack a direct API. /// /// The command to run. /// The arguments to pass for the command. - /// This API should be considered an advanced feature; inappropriate use can be harmful - /// A dynamic representation of the command's result + /// A dynamic representation of the command's result. + /// This API should be considered an advanced feature; inappropriate use can be harmful. RedisResult Execute(string command, params object[] args); /// - /// Execute an arbitrary command against the server; this is primarily intended for - /// executing modules, but may also be used to provide access to new features that lack - /// a direct API. + /// Execute an arbitrary command against the server; this is primarily intended for executing modules, + /// but may also be used to provide access to new features that lack a direct API. /// /// The command to run. /// The arguments to pass for the command. /// The flags to use for this operation. - /// This API should be considered an advanced feature; inappropriate use can be harmful - /// A dynamic representation of the command's result + /// A dynamic representation of the command's result. + /// This API should be considered an advanced feature; inappropriate use can be harmful. RedisResult Execute(string command, ICollection args, CommandFlags flags = CommandFlags.None); /// @@ -863,21 +1590,28 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The keys to execute against. /// The values to execute against. /// The flags to use for this operation. - /// A dynamic representation of the script's result - /// https://redis.io/commands/eval - /// https://redis.io/commands/evalsha - RedisResult ScriptEvaluate(string script, RedisKey[] keys = null, RedisValue[] values = null, CommandFlags flags = CommandFlags.None); + /// A dynamic representation of the script's result. + /// + /// See + /// , + /// . + /// + RedisResult ScriptEvaluate(string script, RedisKey[]? keys = null, RedisValue[]? values = null, CommandFlags flags = CommandFlags.None); /// - /// Execute a Lua script against the server using just the SHA1 hash + /// Execute a Lua script against the server using just the SHA1 hash. /// /// The hash of the script to execute. /// The keys to execute against. /// The values to execute against. /// The flags to use for this operation. - /// A dynamic representation of the script's result - /// https://redis.io/commands/evalsha - RedisResult ScriptEvaluate(byte[] hash, RedisKey[] keys = null, RedisValue[] values = null, CommandFlags flags = CommandFlags.None); + /// A dynamic representation of the script's result. + /// + /// Be aware that this method is not resilient to Redis server restarts. Use instead. + /// + /// + [EditorBrowsable(EditorBrowsableState.Never)] + RedisResult ScriptEvaluate(byte[] hash, RedisKey[]? keys = null, RedisValue[]? values = null, CommandFlags flags = CommandFlags.None); /// /// Execute a lua script against the server, using previously prepared script. @@ -886,9 +1620,9 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The script to execute. /// The parameters to pass to the script. /// The flags to use for this operation. - /// A dynamic representation of the script's result - /// https://redis.io/commands/eval - RedisResult ScriptEvaluate(LuaScript script, object parameters = null, CommandFlags flags = CommandFlags.None); + /// A dynamic representation of the script's result. + /// + RedisResult ScriptEvaluate(LuaScript script, object? parameters = null, CommandFlags flags = CommandFlags.None); /// /// Execute a lua script against the server, using previously prepared and loaded script. @@ -898,9 +1632,35 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The already-loaded script to execute. /// The parameters to pass to the script. /// The flags to use for this operation. - /// A dynamic representation of the script's result - /// https://redis.io/commands/eval - RedisResult ScriptEvaluate(LoadedLuaScript script, object parameters = null, CommandFlags flags = CommandFlags.None); + /// A dynamic representation of the script's result. + /// + RedisResult ScriptEvaluate(LoadedLuaScript script, object? parameters = null, CommandFlags flags = CommandFlags.None); + + /// + /// Read-only variant of the EVAL command that cannot execute commands that modify data, Execute a Lua script against the server. + /// + /// The script to execute. + /// The keys to execute against. + /// The values to execute against. + /// The flags to use for this operation. + /// A dynamic representation of the script's result. + /// + /// See + /// , + /// . + /// + RedisResult ScriptEvaluateReadOnly(string script, RedisKey[]? keys = null, RedisValue[]? values = null, CommandFlags flags = CommandFlags.None); + + /// + /// Read-only variant of the EVALSHA command that cannot execute commands that modify data, Execute a Lua script against the server using just the SHA1 hash. + /// + /// The hash of the script to execute. + /// The keys to execute against. + /// The values to execute against. + /// The flags to use for this operation. + /// A dynamic representation of the script's result. + /// + RedisResult ScriptEvaluateReadOnly(byte[] hash, RedisKey[]? keys = null, RedisValue[]? values = null, CommandFlags flags = CommandFlags.None); /// /// Add the specified member to the set stored at key. @@ -910,8 +1670,8 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The key of the set. /// The value to add to the set. /// The flags to use for this operation. - /// True if the specified member was not already present in the set, else False - /// https://redis.io/commands/sadd + /// if the specified member was not already present in the set, else . + /// bool SetAdd(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None); /// @@ -923,7 +1683,7 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The values to add to the set. /// The flags to use for this operation. /// The number of elements that were added to the set, not including all the elements already present into the set. - /// https://redis.io/commands/sadd + /// long SetAdd(RedisKey key, RedisValue[] values, CommandFlags flags = CommandFlags.None); /// @@ -934,9 +1694,12 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The key of the second set. /// The flags to use for this operation. /// List with members of the resulting set. - /// https://redis.io/commands/sunion - /// https://redis.io/commands/sinter - /// https://redis.io/commands/sdiff + /// + /// See + /// , + /// , + /// . + /// RedisValue[] SetCombine(SetOperation operation, RedisKey first, RedisKey second, CommandFlags flags = CommandFlags.None); /// @@ -946,13 +1709,17 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The keys of the sets to operate on. /// The flags to use for this operation. /// List with members of the resulting set. - /// https://redis.io/commands/sunion - /// https://redis.io/commands/sinter - /// https://redis.io/commands/sdiff + /// + /// See + /// , + /// , + /// . + /// RedisValue[] SetCombine(SetOperation operation, RedisKey[] keys, CommandFlags flags = CommandFlags.None); /// - /// This command is equal to SetCombine, but instead of returning the resulting set, it is stored in destination. If destination already exists, it is overwritten. + /// This command is equal to SetCombine, but instead of returning the resulting set, it is stored in destination. + /// If destination already exists, it is overwritten. /// /// The operation to perform. /// The key of the destination set. @@ -960,41 +1727,80 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The key of the second set. /// The flags to use for this operation. /// The number of elements in the resulting set. - /// https://redis.io/commands/sunionstore - /// https://redis.io/commands/sinterstore - /// https://redis.io/commands/sdiffstore + /// + /// See + /// , + /// , + /// . + /// long SetCombineAndStore(SetOperation operation, RedisKey destination, RedisKey first, RedisKey second, CommandFlags flags = CommandFlags.None); /// - /// This command is equal to SetCombine, but instead of returning the resulting set, it is stored in destination. If destination already exists, it is overwritten. + /// This command is equal to SetCombine, but instead of returning the resulting set, it is stored in destination. + /// If destination already exists, it is overwritten. /// /// The operation to perform. /// The key of the destination set. /// The keys of the sets to operate on. /// The flags to use for this operation. /// The number of elements in the resulting set. - /// https://redis.io/commands/sunionstore - /// https://redis.io/commands/sinterstore - /// https://redis.io/commands/sdiffstore + /// + /// See + /// , + /// , + /// . + /// long SetCombineAndStore(SetOperation operation, RedisKey destination, RedisKey[] keys, CommandFlags flags = CommandFlags.None); /// - /// Returns if member is a member of the set stored at key. + /// Returns whether is a member of the set stored at . /// /// The key of the set. - /// The value to check for . + /// The value to check for. /// The flags to use for this operation. - /// 1 if the element is a member of the set. 0 if the element is not a member of the set, or if key does not exist. - /// https://redis.io/commands/sismember + /// + /// if the element is a member of the set. + /// if the element is not a member of the set, or if key does not exist. + /// + /// bool SetContains(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None); + /// + /// Returns whether each of is a member of the set stored at . + /// + /// The key of the set. + /// The members to check for. + /// The flags to use for this operation. + /// + /// if the element is a member of the set. + /// if the element is not a member of the set, or if key does not exist. + /// + /// + bool[] SetContains(RedisKey key, RedisValue[] values, CommandFlags flags = CommandFlags.None); + + /// + /// + /// Returns the set cardinality (number of elements) of the intersection between the sets stored at the given . + /// + /// + /// If the intersection cardinality reaches partway through the computation, + /// the algorithm will exit and yield as the cardinality. + /// + /// + /// The keys of the sets. + /// The number of elements to check (defaults to 0 and means unlimited). + /// The flags to use for this operation. + /// The cardinality (number of elements) of the set, or 0 if key does not exist. + /// + long SetIntersectionLength(RedisKey[] keys, long limit = 0, CommandFlags flags = CommandFlags.None); + /// /// Returns the set cardinality (number of elements) of the set stored at key. /// /// The key of the set. /// The flags to use for this operation. /// The cardinality (number of elements) of the set, or 0 if key does not exist. - /// https://redis.io/commands/scard + /// long SetLength(RedisKey key, CommandFlags flags = CommandFlags.None); /// @@ -1003,19 +1809,23 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The key of the set. /// The flags to use for this operation. /// All elements of the set. - /// https://redis.io/commands/smembers + /// RedisValue[] SetMembers(RedisKey key, CommandFlags flags = CommandFlags.None); /// - /// Move member from the set at source to the set at destination. This operation is atomic. In every given moment the element will appear to be a member of source or destination for other clients. + /// Move member from the set at source to the set at destination. + /// This operation is atomic. In every given moment the element will appear to be a member of source or destination for other clients. /// When the specified element already exists in the destination set, it is only removed from the source set. /// /// The key of the source set. /// The key of the destination set. /// The value to move. /// The flags to use for this operation. - /// 1 if the element is moved. 0 if the element is not a member of source and no operation was performed. - /// https://redis.io/commands/smove + /// + /// if the element is moved. + /// if the element is not a member of source and no operation was performed. + /// + /// bool SetMove(RedisKey source, RedisKey destination, RedisValue value, CommandFlags flags = CommandFlags.None); /// @@ -1023,8 +1833,8 @@ public interface IDatabase : IRedis, IDatabaseAsync /// /// The key of the set. /// The flags to use for this operation. - /// The removed element, or nil when key does not exist. - /// https://redis.io/commands/spop + /// The removed element, or when key does not exist. + /// RedisValue SetPop(RedisKey key, CommandFlags flags = CommandFlags.None); /// @@ -1034,62 +1844,66 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The number of elements to return. /// The flags to use for this operation. /// An array of elements, or an empty array when key does not exist. - /// https://redis.io/commands/spop + /// RedisValue[] SetPop(RedisKey key, long count, CommandFlags flags = CommandFlags.None); /// - /// Return a random element from the set value stored at key. + /// Return a random element from the set value stored at . /// /// The key of the set. /// The flags to use for this operation. - /// The randomly selected element, or nil when key does not exist - /// https://redis.io/commands/srandmember + /// The randomly selected element, or when does not exist. + /// RedisValue SetRandomMember(RedisKey key, CommandFlags flags = CommandFlags.None); /// - /// Return an array of count distinct elements if count is positive. If called with a negative count the behavior changes and the command is allowed to return the same element multiple times. + /// Return an array of count distinct elements if count is positive. + /// If called with a negative count the behavior changes and the command is allowed to return the same element multiple times. /// In this case the number of returned elements is the absolute value of the specified count. /// /// The key of the set. /// The count of members to get. /// The flags to use for this operation. - /// An array of elements, or an empty array when key does not exist - /// https://redis.io/commands/srandmember + /// An array of elements, or an empty array when does not exist. + /// RedisValue[] SetRandomMembers(RedisKey key, long count, CommandFlags flags = CommandFlags.None); /// - /// Remove the specified member from the set stored at key. Specified members that are not a member of this set are ignored. + /// Remove the specified member from the set stored at key. + /// Specified members that are not a member of this set are ignored. /// /// The key of the set. /// The value to remove. /// The flags to use for this operation. - /// True if the specified member was already present in the set, else False - /// https://redis.io/commands/srem + /// if the specified member was already present in the set, otherwise. + /// bool SetRemove(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None); /// - /// Remove the specified members from the set stored at key. Specified members that are not a member of this set are ignored. + /// Remove the specified members from the set stored at key. + /// Specified members that are not a member of this set are ignored. /// /// The key of the set. /// The values to remove. /// The flags to use for this operation. /// The number of members that were removed from the set, not including non existing members. - /// https://redis.io/commands/srem + /// long SetRemove(RedisKey key, RedisValue[] values, CommandFlags flags = CommandFlags.None); /// - /// The SSCAN command is used to incrementally iterate over set + /// The SSCAN command is used to incrementally iterate over a set. /// /// The key of the set. /// The pattern to match. /// The page size to iterate by. /// The flags to use for this operation. /// Yields all matching elements of the set. - /// https://redis.io/commands/sscan + /// IEnumerable SetScan(RedisKey key, RedisValue pattern, int pageSize, CommandFlags flags); /// - /// The SSCAN command is used to incrementally iterate over set; note: to resume an iteration via cursor, cast the original enumerable or enumerator to IScanningCursor. + /// The SSCAN command is used to incrementally iterate over set. + /// Note: to resume an iteration via cursor, cast the original enumerable or enumerator to . /// /// The key of the set. /// The pattern to match. @@ -1098,15 +1912,17 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The page offset to start at. /// The flags to use for this operation. /// Yields all matching elements of the set. - /// https://redis.io/commands/sscan - IEnumerable SetScan(RedisKey key, RedisValue pattern = default(RedisValue), int pageSize = RedisBase.CursorUtils.DefaultLibraryPageSize, long cursor = RedisBase.CursorUtils.Origin, int pageOffset = 0, CommandFlags flags = CommandFlags.None); + /// + IEnumerable SetScan(RedisKey key, RedisValue pattern = default, int pageSize = RedisBase.CursorUtils.DefaultLibraryPageSize, long cursor = RedisBase.CursorUtils.Origin, int pageOffset = 0, CommandFlags flags = CommandFlags.None); /// - /// Sorts a list, set or sorted set (numerically or alphabetically, ascending by default); By default, the elements themselves are compared, but the values can also be - /// used to perform external key-lookups using the by parameter. By default, the elements themselves are returned, but external key-lookups (one or many) can - /// be performed instead by specifying the get parameter (note that # specifies the element itself, when used in get). - /// Referring to the redis SORT documentation for examples is recommended. When used in hashes, by and get - /// can be used to specify fields using -> notation (again, refer to redis documentation). + /// Sorts a list, set or sorted set (numerically or alphabetically, ascending by default). + /// By default, the elements themselves are compared, but the values can also be used to perform external key-lookups using the by parameter. + /// By default, the elements themselves are returned, but external key-lookups (one or many) can be performed instead by specifying + /// the get parameter (note that # specifies the element itself, when used in get). + /// Referring to the redis SORT documentation for examples is recommended. + /// When used in hashes, by and get can be used to specify fields using -> notation (again, refer to redis documentation). + /// Uses SORT_RO when possible. /// /// The key of the list, set, or sorted set. /// How many entries to skip on the return. @@ -1117,15 +1933,20 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The key pattern to sort by, if any e.g. ExternalKey_* would return the value of ExternalKey_{listvalue} for each entry. /// The flags to use for this operation. /// The sorted elements, or the external values if get is specified. - /// https://redis.io/commands/sort - RedisValue[] Sort(RedisKey key, long skip = 0, long take = -1, Order order = Order.Ascending, SortType sortType = SortType.Numeric, RedisValue by = default(RedisValue), RedisValue[] get = null, CommandFlags flags = CommandFlags.None); + /// + /// See + /// , + /// . + /// + RedisValue[] Sort(RedisKey key, long skip = 0, long take = -1, Order order = Order.Ascending, SortType sortType = SortType.Numeric, RedisValue by = default, RedisValue[]? get = null, CommandFlags flags = CommandFlags.None); /// - /// Sorts a list, set or sorted set (numerically or alphabetically, ascending by default); By default, the elements themselves are compared, but the values can also be - /// used to perform external key-lookups using the by parameter. By default, the elements themselves are returned, but external key-lookups (one or many) can - /// be performed instead by specifying the get parameter (note that # specifies the element itself, when used in get). - /// Referring to the redis SORT documentation for examples is recommended. When used in hashes, by and get - /// can be used to specify fields using -> notation (again, refer to redis documentation). + /// Sorts a list, set or sorted set (numerically or alphabetically, ascending by default). + /// By default, the elements themselves are compared, but the values can also be used to perform external key-lookups using the by parameter. + /// By default, the elements themselves are returned, but external key-lookups (one or many) can be performed instead by specifying + /// the get parameter (note that # specifies the element itself, when used in get). + /// Referring to the redis SORT documentation for examples is recommended. + /// When used in hashes, by and get can be used to specify fields using -> notation (again, refer to redis documentation). /// /// The destination key to store results in. /// The key of the list, set, or sorted set. @@ -1137,56 +1958,92 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The key pattern to sort by, if any e.g. ExternalKey_* would return the value of ExternalKey_{listvalue} for each entry. /// The flags to use for this operation. /// The number of elements stored in the new list. - /// https://redis.io/commands/sort - long SortAndStore(RedisKey destination, RedisKey key, long skip = 0, long take = -1, Order order = Order.Ascending, SortType sortType = SortType.Numeric, RedisValue by = default(RedisValue), RedisValue[] get = null, CommandFlags flags = CommandFlags.None); + /// + long SortAndStore(RedisKey destination, RedisKey key, long skip = 0, long take = -1, Order order = Order.Ascending, SortType sortType = SortType.Numeric, RedisValue by = default, RedisValue[]? get = null, CommandFlags flags = CommandFlags.None); - /// - /// Adds the specified member with the specified score to the sorted set stored at key. If the specified member is already a member of the sorted set, the score is updated and the element reinserted at the right position to ensure the correct ordering. - /// - /// The key of the sorted set. - /// The member to add to the sorted set. - /// The score for the member to add to the sorted set. - /// The flags to use for this operation. - /// True if the value was added, False if it already existed (the score is still updated) - /// https://redis.io/commands/zadd + /// + [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] bool SortedSetAdd(RedisKey key, RedisValue member, double score, CommandFlags flags); + /// + [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] + bool SortedSetAdd(RedisKey key, RedisValue member, double score, When when, CommandFlags flags = CommandFlags.None); + /// - /// Adds the specified member with the specified score to the sorted set stored at key. If the specified member is already a member of the sorted set, the score is updated and the element reinserted at the right position to ensure the correct ordering. + /// Adds the specified member with the specified score to the sorted set stored at key. + /// If the specified member is already a member of the sorted set, the score is updated and the element reinserted at the right position to ensure the correct ordering. /// /// The key of the sorted set. /// The member to add to the sorted set. /// The score for the member to add to the sorted set. /// What conditions to add the element under (defaults to always). /// The flags to use for this operation. - /// True if the value was added, False if it already existed (the score is still updated) - /// https://redis.io/commands/zadd - bool SortedSetAdd(RedisKey key, RedisValue member, double score, When when = When.Always, CommandFlags flags = CommandFlags.None); + /// if the value was added. if it already existed (the score is still updated). + /// + bool SortedSetAdd(RedisKey key, RedisValue member, double score, SortedSetWhen when = SortedSetWhen.Always, CommandFlags flags = CommandFlags.None); + + /// + [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] + long SortedSetAdd(RedisKey key, SortedSetEntry[] values, CommandFlags flags); + + /// + [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] + long SortedSetAdd(RedisKey key, SortedSetEntry[] values, When when, CommandFlags flags = CommandFlags.None); /// - /// Adds all the specified members with the specified scores to the sorted set stored at key. If a specified member is already a member of the sorted set, the score is updated and the element reinserted at the right position to ensure the correct ordering. + /// Adds all the specified members with the specified scores to the sorted set stored at key. + /// If a specified member is already a member of the sorted set, the score is updated and the element reinserted at the right position to ensure the correct ordering. /// /// The key of the sorted set. /// The members and values to add to the sorted set. + /// What conditions to add the element under (defaults to always). /// The flags to use for this operation. /// The number of elements added to the sorted sets, not including elements already existing for which the score was updated. - /// https://redis.io/commands/zadd - long SortedSetAdd(RedisKey key, SortedSetEntry[] values, CommandFlags flags); + /// + long SortedSetAdd(RedisKey key, SortedSetEntry[] values, SortedSetWhen when = SortedSetWhen.Always, CommandFlags flags = CommandFlags.None); /// - /// Adds all the specified members with the specified scores to the sorted set stored at key. If a specified member is already a member of the sorted set, the score is updated and the element reinserted at the right position to ensure the correct ordering. + /// Computes a set operation for multiple sorted sets (optionally using per-set ), + /// optionally performing a specific aggregation (defaults to ). + /// cannot be used with weights or aggregation. /// - /// The key of the sorted set. - /// The members and values to add to the sorted set. - /// What conditions to add the element under (defaults to always). + /// The operation to perform. + /// The keys of the sorted sets. + /// The optional weights per set that correspond to . + /// The aggregation method (defaults to ). /// The flags to use for this operation. - /// The number of elements added to the sorted sets, not including elements already existing for which the score was updated. - /// https://redis.io/commands/zadd - long SortedSetAdd(RedisKey key, SortedSetEntry[] values, When when = When.Always, CommandFlags flags = CommandFlags.None); + /// The resulting sorted set. + /// + /// See + /// , + /// , + /// . + /// + RedisValue[] SortedSetCombine(SetOperation operation, RedisKey[] keys, double[]? weights = null, Aggregate aggregate = Aggregate.Sum, CommandFlags flags = CommandFlags.None); + + /// + /// Computes a set operation for multiple sorted sets (optionally using per-set ), + /// optionally performing a specific aggregation (defaults to ). + /// cannot be used with weights or aggregation. + /// + /// The operation to perform. + /// The keys of the sorted sets. + /// The optional weights per set that correspond to . + /// The aggregation method (defaults to ). + /// The flags to use for this operation. + /// The resulting sorted set with scores. + /// + /// See + /// , + /// , + /// . + /// + SortedSetEntry[] SortedSetCombineWithScores(SetOperation operation, RedisKey[] keys, double[]? weights = null, Aggregate aggregate = Aggregate.Sum, CommandFlags flags = CommandFlags.None); /// /// Computes a set operation over two sorted sets, and stores the result in destination, optionally performing /// a specific aggregation (defaults to sum). + /// cannot be used with aggregation. /// /// The operation to perform. /// The key to store the results in. @@ -1194,14 +2051,19 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The key of the second sorted set. /// The aggregation method (defaults to sum). /// The flags to use for this operation. - /// https://redis.io/commands/zunionstore - /// https://redis.io/commands/zinterstore - /// the number of elements in the resulting sorted set at destination + /// The number of elements in the resulting sorted set at destination. + /// + /// See + /// , + /// , + /// . + /// long SortedSetCombineAndStore(SetOperation operation, RedisKey destination, RedisKey first, RedisKey second, Aggregate aggregate = Aggregate.Sum, CommandFlags flags = CommandFlags.None); /// /// Computes a set operation over multiple sorted sets (optionally using per-set weights), and stores the result in destination, optionally performing /// a specific aggregation (defaults to sum). + /// cannot be used with aggregation. /// /// The operation to perform. /// The key to store the results in. @@ -1209,20 +2071,25 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The optional weights per set that correspond to . /// The aggregation method (defaults to sum). /// The flags to use for this operation. - /// https://redis.io/commands/zunionstore - /// https://redis.io/commands/zinterstore - /// the number of elements in the resulting sorted set at destination - long SortedSetCombineAndStore(SetOperation operation, RedisKey destination, RedisKey[] keys, double[] weights = null, Aggregate aggregate = Aggregate.Sum, CommandFlags flags = CommandFlags.None); + /// The number of elements in the resulting sorted set at destination. + /// + /// See + /// , + /// , + /// . + /// + long SortedSetCombineAndStore(SetOperation operation, RedisKey destination, RedisKey[] keys, double[]? weights = null, Aggregate aggregate = Aggregate.Sum, CommandFlags flags = CommandFlags.None); /// - /// Decrements the score of member in the sorted set stored at key by decrement. If member does not exist in the sorted set, it is added with -decrement as its score (as if its previous score was 0.0). + /// Decrements the score of member in the sorted set stored at key by decrement. + /// If member does not exist in the sorted set, it is added with -decrement as its score (as if its previous score was 0.0). /// /// The key of the sorted set. /// The member to decrement. /// The amount to decrement by. /// The flags to use for this operation. /// The new score of member. - /// https://redis.io/commands/zincrby + /// double SortedSetDecrement(RedisKey key, RedisValue member, double value, CommandFlags flags = CommandFlags.None); /// @@ -1233,9 +2100,19 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The amount to increment by. /// The flags to use for this operation. /// The new score of member. - /// https://redis.io/commands/zincrby + /// double SortedSetIncrement(RedisKey key, RedisValue member, double value, CommandFlags flags = CommandFlags.None); + /// + /// Returns the cardinality of the intersection of the sorted sets at . + /// + /// The keys of the sorted sets. + /// If the intersection cardinality reaches partway through the computation, the algorithm will exit and yield as the cardinality (defaults to 0 meaning unlimited). + /// The flags to use for this operation. + /// The number of elements in the resulting intersection. + /// + long SortedSetIntersectionLength(RedisKey[] keys, long limit = 0, CommandFlags flags = CommandFlags.None); + /// /// Returns the sorted set cardinality (number of elements) of the sorted set stored at key. /// @@ -1245,11 +2122,12 @@ public interface IDatabase : IRedis, IDatabaseAsync /// Whether to exclude and from the range check (defaults to both inclusive). /// The flags to use for this operation. /// The cardinality (number of elements) of the sorted set, or 0 if key does not exist. - /// https://redis.io/commands/zcard + /// long SortedSetLength(RedisKey key, double min = double.NegativeInfinity, double max = double.PositiveInfinity, Exclude exclude = Exclude.None, CommandFlags flags = CommandFlags.None); /// - /// When all the elements in a sorted set are inserted with the same score, in order to force lexicographical ordering, this command returns the number of elements in the sorted set at key with a value between min and max. + /// When all the elements in a sorted set are inserted with the same score, in order to force lexicographical ordering. + /// This command returns the number of elements in the sorted set at key with a value between min and max. /// /// The key of the sorted set. /// The min value to filter by. @@ -1257,12 +2135,62 @@ public interface IDatabase : IRedis, IDatabaseAsync /// Whether to exclude and from the range check (defaults to both inclusive). /// The flags to use for this operation. /// The number of elements in the specified score range. - /// https://redis.io/commands/zlexcount + /// long SortedSetLengthByValue(RedisKey key, RedisValue min, RedisValue max, Exclude exclude = Exclude.None, CommandFlags flags = CommandFlags.None); /// - /// Returns the specified range of elements in the sorted set stored at key. By default the elements are considered to be ordered from the lowest to the highest score. Lexicographical order is used for elements with equal score. - /// Both start and stop are zero-based indexes, where 0 is the first element, 1 is the next element and so on. They can also be negative numbers indicating offsets from the end of the sorted set, with -1 being the last element of the sorted set, -2 the penultimate element and so on. + /// Returns a random element from the sorted set value stored at . + /// + /// The key of the sorted set. + /// The flags to use for this operation. + /// The randomly selected element, or when does not exist. + /// + RedisValue SortedSetRandomMember(RedisKey key, CommandFlags flags = CommandFlags.None); + + /// + /// Returns an array of random elements from the sorted set value stored at . + /// + /// The key of the sorted set. + /// + /// + /// If the provided count argument is positive, returns an array of distinct elements. + /// The array's length is either or the sorted set's cardinality (ZCARD), whichever is lower. + /// + /// + /// If called with a negative count, the behavior changes and the command is allowed to return the same element multiple times. + /// In this case, the number of returned elements is the absolute value of the specified count. + /// + /// + /// The flags to use for this operation. + /// The randomly selected elements, or an empty array when does not exist. + /// + RedisValue[] SortedSetRandomMembers(RedisKey key, long count, CommandFlags flags = CommandFlags.None); + + /// + /// Returns an array of random elements from the sorted set value stored at . + /// + /// The key of the sorted set. + /// + /// + /// If the provided count argument is positive, returns an array of distinct elements. + /// The array's length is either or the sorted set's cardinality (ZCARD), whichever is lower. + /// + /// + /// If called with a negative count, the behavior changes and the command is allowed to return the same element multiple times. + /// In this case, the number of returned elements is the absolute value of the specified count. + /// + /// + /// The flags to use for this operation. + /// The randomly selected elements with scores, or an empty array when does not exist. + /// + SortedSetEntry[] SortedSetRandomMembersWithScores(RedisKey key, long count, CommandFlags flags = CommandFlags.None); + + /// + /// Returns the specified range of elements in the sorted set stored at key. + /// By default the elements are considered to be ordered from the lowest to the highest score. + /// Lexicographical order is used for elements with equal score. + /// Both start and stop are zero-based indexes, where 0 is the first element, 1 is the next element and so on. + /// They can also be negative numbers indicating offsets from the end of the sorted set, with -1 being the last element of the sorted set, -2 the penultimate element and so on. /// /// The key of the sorted set. /// The start index to get. @@ -1270,13 +2198,51 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The order to sort by (defaults to ascending). /// The flags to use for this operation. /// List of elements in the specified range. - /// https://redis.io/commands/zrange - /// https://redis.io/commands/zrevrange + /// + /// See + /// , + /// . + /// RedisValue[] SortedSetRangeByRank(RedisKey key, long start = 0, long stop = -1, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None); /// - /// Returns the specified range of elements in the sorted set stored at key. By default the elements are considered to be ordered from the lowest to the highest score. Lexicographical order is used for elements with equal score. - /// Both start and stop are zero-based indexes, where 0 is the first element, 1 is the next element and so on. They can also be negative numbers indicating offsets from the end of the sorted set, with -1 being the last element of the sorted set, -2 the penultimate element and so on. + /// Takes the specified range of elements in the sorted set of the + /// and stores them in a new sorted set at the . + /// + /// The sorted set to take the range from. + /// Where the resulting set will be stored. + /// The starting point in the sorted set. If is , this should be a string. + /// The stopping point in the range of the sorted set. If is , this should be a string. + /// The ordering criteria to use for the range. Choices are , , and (defaults to ). + /// Whether to exclude and from the range check (defaults to both inclusive). + /// + /// The direction to consider the and in. + /// If , the must be smaller than the . + /// If , must be smaller than . + /// + /// The number of elements into the sorted set to skip. Note: this iterates after sorting so incurs O(n) cost for large values. + /// The maximum number of elements to pull into the new () set. + /// The flags to use for this operation. + /// The cardinality of (number of elements in) the newly created sorted set. + /// + long SortedSetRangeAndStore( + RedisKey sourceKey, + RedisKey destinationKey, + RedisValue start, + RedisValue stop, + SortedSetOrder sortedSetOrder = SortedSetOrder.ByRank, + Exclude exclude = Exclude.None, + Order order = Order.Ascending, + long skip = 0, + long? take = null, + CommandFlags flags = CommandFlags.None); + + /// + /// Returns the specified range of elements in the sorted set stored at key. + /// By default the elements are considered to be ordered from the lowest to the highest score. + /// Lexicographical order is used for elements with equal score. + /// Both start and stop are zero-based indexes, where 0 is the first element, 1 is the next element and so on. + /// They can also be negative numbers indicating offsets from the end of the sorted set, with -1 being the last element of the sorted set, -2 the penultimate element and so on. /// /// The key of the sorted set. /// The start index to get. @@ -1284,13 +2250,19 @@ public interface IDatabase : IRedis, IDatabaseAsync /// The order to sort by (defaults to ascending). /// The flags to use for this operation. /// List of elements in the specified range. - /// https://redis.io/commands/zrange - /// https://redis.io/commands/zrevrange + /// + /// See + /// , + /// . + /// SortedSetEntry[] SortedSetRangeByRankWithScores(RedisKey key, long start = 0, long stop = -1, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None); /// - /// Returns the specified range of elements in the sorted set stored at key. By default the elements are considered to be ordered from the lowest to the highest score. Lexicographical order is used for elements with equal score. - /// Start and stop are used to specify the min and max range for score values. Similar to other range methods the values are inclusive. + /// Returns the specified range of elements in the sorted set stored at key. + /// By default the elements are considered to be ordered from the lowest to the highest score. + /// Lexicographical order is used for elements with equal score. + /// Start and stop are used to specify the min and max range for score values. + /// Similar to other range methods the values are inclusive. /// /// The key of the sorted set. /// The minimum score to filter by. @@ -1301,9 +2273,13 @@ public interface IDatabase : IRedis, IDatabaseAsync /// How many items to take. /// The flags to use for this operation. /// List of elements in the specified score range. - /// https://redis.io/commands/zrangebyscore - /// https://redis.io/commands/zrevrangebyscore - RedisValue[] SortedSetRangeByScore(RedisKey key, + /// + /// See + /// , + /// . + /// + RedisValue[] SortedSetRangeByScore( + RedisKey key, double start = double.NegativeInfinity, double stop = double.PositiveInfinity, Exclude exclude = Exclude.None, @@ -1313,8 +2289,11 @@ RedisValue[] SortedSetRangeByScore(RedisKey key, CommandFlags flags = CommandFlags.None); /// - /// Returns the specified range of elements in the sorted set stored at key. By default the elements are considered to be ordered from the lowest to the highest score. Lexicographical order is used for elements with equal score. - /// Start and stop are used to specify the min and max range for score values. Similar to other range methods the values are inclusive. + /// Returns the specified range of elements in the sorted set stored at key. + /// By default the elements are considered to be ordered from the lowest to the highest score. + /// Lexicographical order is used for elements with equal score. + /// Start and stop are used to specify the min and max range for score values. + /// Similar to other range methods the values are inclusive. /// /// The key of the sorted set. /// The minimum score to filter by. @@ -1325,9 +2304,13 @@ RedisValue[] SortedSetRangeByScore(RedisKey key, /// How many items to take. /// The flags to use for this operation. /// List of elements in the specified score range. - /// https://redis.io/commands/zrangebyscore - /// https://redis.io/commands/zrevrangebyscore - SortedSetEntry[] SortedSetRangeByScoreWithScores(RedisKey key, + /// + /// See + /// , + /// . + /// + SortedSetEntry[] SortedSetRangeByScoreWithScores( + RedisKey key, double start = double.NegativeInfinity, double stop = double.PositiveInfinity, Exclude exclude = Exclude.None, @@ -1337,7 +2320,8 @@ SortedSetEntry[] SortedSetRangeByScoreWithScores(RedisKey key, CommandFlags flags = CommandFlags.None); /// - /// When all the elements in a sorted set are inserted with the same score, in order to force lexicographical ordering, this command returns all the elements in the sorted set at key with a value between min and max. + /// When all the elements in a sorted set are inserted with the same score, in order to force lexicographical ordering. + /// This command returns all the elements in the sorted set at key with a value between min and max. /// /// The key of the sorted set. /// The min value to filter by. @@ -1346,9 +2330,10 @@ SortedSetEntry[] SortedSetRangeByScoreWithScores(RedisKey key, /// How many items to skip. /// How many items to take. /// The flags to use for this operation. - /// https://redis.io/commands/zrangebylex - /// list of elements in the specified score range. - RedisValue[] SortedSetRangeByValue(RedisKey key, + /// List of elements in the specified score range. + /// + RedisValue[] SortedSetRangeByValue( + RedisKey key, RedisValue min, RedisValue max, Exclude exclude, @@ -1357,22 +2342,27 @@ RedisValue[] SortedSetRangeByValue(RedisKey key, CommandFlags flags = CommandFlags.None); // defaults removed to avoid ambiguity with overload with order /// - /// When all the elements in a sorted set are inserted with the same score, in order to force lexicographical ordering, this command returns all the elements in the sorted set at key with a value between min and max. + /// When all the elements in a sorted set are inserted with the same score, in order to force lexicographical ordering. + /// This command returns all the elements in the sorted set at key with a value between min and max. /// /// The key of the sorted set. /// The min value to filter by. /// The max value to filter by. /// Which of and to exclude (defaults to both inclusive). - /// Whether to order the data ascending or descending + /// Whether to order the data ascending or descending. /// How many items to skip. /// How many items to take. /// The flags to use for this operation. - /// https://redis.io/commands/zrangebylex - /// https://redis.io/commands/zrevrangebylex - /// list of elements in the specified score range. - RedisValue[] SortedSetRangeByValue(RedisKey key, - RedisValue min = default(RedisValue), - RedisValue max = default(RedisValue), + /// List of elements in the specified score range. + /// + /// See + /// , + /// . + /// + RedisValue[] SortedSetRangeByValue( + RedisKey key, + RedisValue min = default, + RedisValue max = default, Exclude exclude = Exclude.None, Order order = Order.Ascending, long skip = 0, @@ -1380,15 +2370,19 @@ RedisValue[] SortedSetRangeByValue(RedisKey key, CommandFlags flags = CommandFlags.None); /// - /// Returns the rank of member in the sorted set stored at key, by default with the scores ordered from low to high. The rank (or index) is 0-based, which means that the member with the lowest score has rank 0. + /// Returns the rank of member in the sorted set stored at key, by default with the scores ordered from low to high. + /// The rank (or index) is 0-based, which means that the member with the lowest score has rank 0. /// /// The key of the sorted set. /// The member to get the rank of. /// The order to sort by (defaults to ascending). /// The flags to use for this operation. - /// If member exists in the sorted set, the rank of member; If member does not exist in the sorted set or key does not exist, null - /// https://redis.io/commands/zrank - /// https://redis.io/commands/zrevrank + /// If member exists in the sorted set, the rank of member. If member does not exist in the sorted set or key does not exist, . + /// + /// See + /// , + /// . + /// long? SortedSetRank(RedisKey key, RedisValue member, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None); /// @@ -1397,8 +2391,8 @@ RedisValue[] SortedSetRangeByValue(RedisKey key, /// The key of the sorted set. /// The member to remove. /// The flags to use for this operation. - /// True if the member existed in the sorted set and was removed; False otherwise. - /// https://redis.io/commands/zrem + /// if the member existed in the sorted set and was removed. otherwise. + /// bool SortedSetRemove(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None); /// @@ -1408,18 +2402,21 @@ RedisValue[] SortedSetRangeByValue(RedisKey key, /// The members to remove. /// The flags to use for this operation. /// The number of members removed from the sorted set, not including non existing members. - /// https://redis.io/commands/zrem + /// long SortedSetRemove(RedisKey key, RedisValue[] members, CommandFlags flags = CommandFlags.None); /// - /// Removes all elements in the sorted set stored at key with rank between start and stop. Both start and stop are 0 -based indexes with 0 being the element with the lowest score. These indexes can be negative numbers, where they indicate offsets starting at the element with the highest score. For example: -1 is the element with the highest score, -2 the element with the second highest score and so forth. + /// Removes all elements in the sorted set stored at key with rank between start and stop. + /// Both start and stop are 0 -based indexes with 0 being the element with the lowest score. + /// These indexes can be negative numbers, where they indicate offsets starting at the element with the highest score. + /// For example: -1 is the element with the highest score, -2 the element with the second highest score and so forth. /// /// The key of the sorted set. /// The minimum rank to remove. /// The maximum rank to remove. /// The flags to use for this operation. /// The number of elements removed. - /// https://redis.io/commands/zremrangebyrank + /// long SortedSetRemoveRangeByRank(RedisKey key, long start, long stop, CommandFlags flags = CommandFlags.None); /// @@ -1431,34 +2428,36 @@ RedisValue[] SortedSetRangeByValue(RedisKey key, /// Which of and to exclude (defaults to both inclusive). /// The flags to use for this operation. /// The number of elements removed. - /// https://redis.io/commands/zremrangebyscore + /// long SortedSetRemoveRangeByScore(RedisKey key, double start, double stop, Exclude exclude = Exclude.None, CommandFlags flags = CommandFlags.None); /// - /// When all the elements in a sorted set are inserted with the same score, in order to force lexicographical ordering, this command removes all elements in the sorted set stored at key between the lexicographical range specified by min and max. + /// When all the elements in a sorted set are inserted with the same score, in order to force lexicographical ordering. + /// This command removes all elements in the sorted set stored at key between the lexicographical range specified by min and max. /// /// The key of the sorted set. /// The minimum value to remove. /// The maximum value to remove. /// Which of and to exclude (defaults to both inclusive). /// The flags to use for this operation. - /// the number of elements removed. - /// https://redis.io/commands/zremrangebylex + /// The number of elements removed. + /// long SortedSetRemoveRangeByValue(RedisKey key, RedisValue min, RedisValue max, Exclude exclude = Exclude.None, CommandFlags flags = CommandFlags.None); /// - /// The ZSCAN command is used to incrementally iterate over a sorted set + /// The ZSCAN command is used to incrementally iterate over a sorted set. /// /// The key of the sorted set. /// The pattern to match. /// The page size to iterate by. /// The flags to use for this operation. /// Yields all matching elements of the sorted set. - /// https://redis.io/commands/zscan + /// IEnumerable SortedSetScan(RedisKey key, RedisValue pattern, int pageSize, CommandFlags flags); /// - /// The ZSCAN command is used to incrementally iterate over a sorted set; note: to resume an iteration via cursor, cast the original enumerable or enumerator to IScanningCursor. + /// The ZSCAN command is used to incrementally iterate over a sorted set + /// Note: to resume an iteration via cursor, cast the original enumerable or enumerator to IScanningCursor. /// /// The key of the sorted set. /// The pattern to match. @@ -1467,33 +2466,52 @@ RedisValue[] SortedSetRangeByValue(RedisKey key, /// The page offset to start at. /// The flags to use for this operation. /// Yields all matching elements of the sorted set. - /// https://redis.io/commands/zscan - IEnumerable SortedSetScan(RedisKey key, - RedisValue pattern = default(RedisValue), + /// + IEnumerable SortedSetScan( + RedisKey key, + RedisValue pattern = default, int pageSize = RedisBase.CursorUtils.DefaultLibraryPageSize, long cursor = RedisBase.CursorUtils.Origin, int pageOffset = 0, CommandFlags flags = CommandFlags.None); /// - /// Returns the score of member in the sorted set at key; If member does not exist in the sorted set, or key does not exist, nil is returned. + /// Returns the score of member in the sorted set at key. + /// If member does not exist in the sorted set, or key does not exist, is returned. /// /// The key of the sorted set. /// The member to get a score for. /// The flags to use for this operation. /// The score of the member. - /// https://redis.io/commands/zscore + /// double? SortedSetScore(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None); + /// + /// Returns the scores of members in the sorted set at . + /// If a member does not exist in the sorted set, or key does not exist, is returned. + /// + /// The key of the sorted set. + /// The members to get a score for. + /// The flags to use for this operation. + /// + /// The scores of the members in the same order as the array. + /// If a member does not exist in the set, is returned. + /// + /// + double?[] SortedSetScores(RedisKey key, RedisValue[] members, CommandFlags flags = CommandFlags.None); + /// /// Removes and returns the first element from the sorted set stored at key, by default with the scores ordered from low to high. /// /// The key of the sorted set. /// The order to sort by (defaults to ascending). /// The flags to use for this operation. - /// The removed element, or nil when key does not exist. - /// https://redis.io/commands/zpopmin - /// https://redis.io/commands/zpopmax + /// The removed element, or when key does not exist. + /// + /// See + /// , + /// . + /// SortedSetEntry? SortedSetPop(RedisKey key, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None); /// @@ -1504,10 +2522,48 @@ IEnumerable SortedSetScan(RedisKey key, /// The order to sort by (defaults to ascending). /// The flags to use for this operation. /// An array of elements, or an empty array when key does not exist. - /// https://redis.io/commands/zpopmin - /// https://redis.io/commands/zpopmax + /// + /// See + /// , + /// . + /// SortedSetEntry[] SortedSetPop(RedisKey key, long count, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None); + /// + /// Removes and returns up to entries from the first non-empty sorted set in . + /// Returns if none of the sets exist or contain any elements. + /// + /// The keys to check. + /// The maximum number of records to pop out of the sorted set. + /// The order to sort by when popping items out of the set. + /// The flags to use for the operation. + /// A contiguous collection of sorted set entries with the key they were popped from, or if no non-empty sorted sets are found. + /// + SortedSetPopResult SortedSetPop(RedisKey[] keys, long count, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None); + + /// + /// Same as but return the number of the elements changed. + /// + /// The key of the sorted set. + /// The member to add/update to the sorted set. + /// The score for the member to add/update to the sorted set. + /// What conditions to add the element under (defaults to always). + /// The flags to use for this operation. + /// The number of elements changed. + /// + bool SortedSetUpdate(RedisKey key, RedisValue member, double score, SortedSetWhen when = SortedSetWhen.Always, CommandFlags flags = CommandFlags.None); + + /// + /// Same as but return the number of the elements changed. + /// + /// The key of the sorted set. + /// The members and values to add/update to the sorted set. + /// What conditions to add the element under (defaults to always). + /// The flags to use for this operation. + /// The number of elements changed. + /// + long SortedSetUpdate(RedisKey key, SortedSetEntry[] values, SortedSetWhen when = SortedSetWhen.Always, CommandFlags flags = CommandFlags.None); + /// /// Allow the consumer to mark a pending message as correctly processed. Returns the number of messages acknowledged. /// @@ -1516,7 +2572,7 @@ IEnumerable SortedSetScan(RedisKey key, /// The ID of the message to acknowledge. /// The flags to use for this operation. /// The number of messages acknowledged. - /// https://redis.io/topics/streams-intro + /// long StreamAcknowledge(RedisKey key, RedisValue groupName, RedisValue messageId, CommandFlags flags = CommandFlags.None); /// @@ -1527,11 +2583,41 @@ IEnumerable SortedSetScan(RedisKey key, /// The IDs of the messages to acknowledge. /// The flags to use for this operation. /// The number of messages acknowledged. - /// https://redis.io/topics/streams-intro + /// long StreamAcknowledge(RedisKey key, RedisValue groupName, RedisValue[] messageIds, CommandFlags flags = CommandFlags.None); /// - /// Adds an entry using the specified values to the given stream key. If key does not exist, a new key holding a stream is created. The command returns the ID of the newly created stream entry. + /// Allow the consumer to mark a pending message as correctly processed. Returns the number of messages acknowledged. + /// + /// The key of the stream. + /// The name of the consumer group that received the message. + /// The delete mode to use when acknowledging the message. + /// The ID of the message to acknowledge. + /// The flags to use for this operation. + /// The outcome of the delete operation. + /// +#pragma warning disable RS0026 // similar overloads + StreamTrimResult StreamAcknowledgeAndDelete(RedisKey key, RedisValue groupName, StreamTrimMode mode, RedisValue messageId, CommandFlags flags = CommandFlags.None); +#pragma warning restore RS0026 + + /// + /// Allow the consumer to mark a pending message as correctly processed. Returns the number of messages acknowledged. + /// + /// The key of the stream. + /// The name of the consumer group that received the message. + /// /// The delete mode to use when acknowledging the message. + /// The IDs of the messages to acknowledge. + /// The flags to use for this operation. + /// The outcome of each delete operation. + /// +#pragma warning disable RS0026 // similar overloads + StreamTrimResult[] StreamAcknowledgeAndDelete(RedisKey key, RedisValue groupName, StreamTrimMode mode, RedisValue[] messageIds, CommandFlags flags = CommandFlags.None); +#pragma warning restore RS0026 + + /// + /// Adds an entry using the specified values to the given stream key. + /// If key does not exist, a new key holding a stream is created. + /// The command returns the ID of the newly created stream entry. /// /// The key of the stream. /// The field name for the stream entry. @@ -1541,11 +2627,13 @@ IEnumerable SortedSetScan(RedisKey key, /// If true, the "~" argument is used to allow the stream to exceed max length by a small number. This improves performance when removing messages. /// The flags to use for this operation. /// The ID of the newly created message. - /// https://redis.io/commands/xadd - RedisValue StreamAdd(RedisKey key, RedisValue streamField, RedisValue streamValue, RedisValue? messageId = null, int? maxLength = null, bool useApproximateMaxLength = false, CommandFlags flags = CommandFlags.None); + /// + RedisValue StreamAdd(RedisKey key, RedisValue streamField, RedisValue streamValue, RedisValue? messageId, int? maxLength, bool useApproximateMaxLength, CommandFlags flags); /// - /// Adds an entry using the specified values to the given stream key. If key does not exist, a new key holding a stream is created. The command returns the ID of the newly created stream entry. + /// Adds an entry using the specified values to the given stream key. + /// If key does not exist, a new key holding a stream is created. + /// The command returns the ID of the newly created stream entry. /// /// The key of the stream. /// The fields and their associated values to set in the stream entry. @@ -1554,11 +2642,130 @@ IEnumerable SortedSetScan(RedisKey key, /// If true, the "~" argument is used to allow the stream to exceed max length by a small number. This improves performance when removing messages. /// The flags to use for this operation. /// The ID of the newly created message. - /// https://redis.io/commands/xadd - RedisValue StreamAdd(RedisKey key, NameValueEntry[] streamPairs, RedisValue? messageId = null, int? maxLength = null, bool useApproximateMaxLength = false, CommandFlags flags = CommandFlags.None); + /// + RedisValue StreamAdd(RedisKey key, NameValueEntry[] streamPairs, RedisValue? messageId, int? maxLength, bool useApproximateMaxLength, CommandFlags flags); /// - /// Change ownership of messages consumed, but not yet acknowledged, by a different consumer. This method returns the complete message for the claimed message(s). + /// Adds an entry using the specified values to the given stream key. + /// If key does not exist, a new key holding a stream is created. + /// The command returns the ID of the newly created stream entry. + /// + /// The key of the stream. + /// The field name for the stream entry. + /// The value to set in the stream entry. + /// The ID to assign to the stream entry, defaults to an auto-generated ID ("*"). + /// The maximum length of the stream. + /// If true, the "~" argument is used to allow the stream to exceed max length by a small number. This improves performance when removing messages. + /// Specifies the maximal count of entries that will be evicted. + /// Determines how stream trimming should be performed. + /// The flags to use for this operation. + /// The ID of the newly created message. + /// +#pragma warning disable RS0026 // different shape + RedisValue StreamAdd(RedisKey key, RedisValue streamField, RedisValue streamValue, RedisValue? messageId = null, long? maxLength = null, bool useApproximateMaxLength = false, long? limit = null, StreamTrimMode trimMode = StreamTrimMode.KeepReferences, CommandFlags flags = CommandFlags.None); + + /// + /// Adds an entry using the specified values to the given stream key. + /// If key does not exist, a new key holding a stream is created. + /// The command returns the ID of the newly created stream entry, using + /// the idempotent id (pid/iid) mechanism to ensure at-most-once production. + /// See for more information of the idempotent API. + /// + /// The key of the stream. + /// The field name for the stream entry. + /// The value to set in the stream entry. + /// The idempotent producer (pid) and optionally id (iid) to use for this entry. + /// The maximum length of the stream. + /// If true, the "~" argument is used to allow the stream to exceed max length by a small number. This improves performance when removing messages. + /// Specifies the maximal count of entries that will be evicted. + /// Determines how stream trimming should be performed. + /// The flags to use for this operation. + /// The ID of the newly created message. + /// + [Experimental(Experiments.Server_8_6, UrlFormat = Experiments.UrlFormat)] + RedisValue StreamAdd(RedisKey key, RedisValue streamField, RedisValue streamValue, StreamIdempotentId idempotentId, long? maxLength = null, bool useApproximateMaxLength = false, long? limit = null, StreamTrimMode trimMode = StreamTrimMode.KeepReferences, CommandFlags flags = CommandFlags.None); + + /// + /// Adds an entry using the specified values to the given stream key. + /// If key does not exist, a new key holding a stream is created. + /// The command returns the ID of the newly created stream entry. + /// + /// The key of the stream. + /// The fields and their associated values to set in the stream entry. + /// The ID to assign to the stream entry, defaults to an auto-generated ID ("*"). + /// The maximum length of the stream. + /// If true, the "~" argument is used to allow the stream to exceed max length by a small number. This improves performance when removing messages. + /// Specifies the maximal count of entries that will be evicted. + /// Determines how stream trimming should be performed. + /// The flags to use for this operation. + /// The ID of the newly created message. + /// + RedisValue StreamAdd(RedisKey key, NameValueEntry[] streamPairs, RedisValue? messageId = null, long? maxLength = null, bool useApproximateMaxLength = false, long? limit = null, StreamTrimMode trimMode = StreamTrimMode.KeepReferences, CommandFlags flags = CommandFlags.None); + + /// + /// Adds an entry using the specified values to the given stream key. + /// If key does not exist, a new key holding a stream is created. + /// The command returns the ID of the newly created stream entry, using + /// the idempotent id (pid/iid) mechanism to ensure at-most-once production. + /// See for more information of the idempotent API. + /// + /// The key of the stream. + /// The fields and their associated values to set in the stream entry. + /// The idempotent producer (pid) and optionally id (iid) to use for this entry. + /// The maximum length of the stream. + /// If true, the "~" argument is used to allow the stream to exceed max length by a small number. This improves performance when removing messages. + /// Specifies the maximal count of entries that will be evicted. + /// Determines how stream trimming should be performed. + /// The flags to use for this operation. + /// The ID of the newly created message. + /// + [Experimental(Experiments.Server_8_6, UrlFormat = Experiments.UrlFormat)] + RedisValue StreamAdd(RedisKey key, NameValueEntry[] streamPairs, StreamIdempotentId idempotentId, long? maxLength = null, bool useApproximateMaxLength = false, long? limit = null, StreamTrimMode trimMode = StreamTrimMode.KeepReferences, CommandFlags flags = CommandFlags.None); +#pragma warning restore RS0026 + + /// + /// Configures a stream, in particular the IDMP map. + /// + /// The key of the stream. + /// The configuration to apply. + /// The flags to use for this operation. + [Experimental(Experiments.Server_8_6, UrlFormat = Experiments.UrlFormat)] + void StreamConfigure(RedisKey key, StreamConfiguration configuration, CommandFlags flags = CommandFlags.None); + + /// + /// Change ownership of messages consumed, but not yet acknowledged, by a different consumer. + /// Messages that have been idle for more than will be claimed. + /// + /// The key of the stream. + /// The consumer group. + /// The consumer claiming the messages(s). + /// The minimum idle time threshold for pending messages to be claimed. + /// The starting ID to scan for pending messages that have an idle time greater than . + /// The upper limit of the number of entries that the command attempts to claim. If , Redis will default the value to 100. + /// The flags to use for this operation. + /// An instance of . + /// + StreamAutoClaimResult StreamAutoClaim(RedisKey key, RedisValue consumerGroup, RedisValue claimingConsumer, long minIdleTimeInMs, RedisValue startAtId, int? count = null, CommandFlags flags = CommandFlags.None); + + /// + /// Change ownership of messages consumed, but not yet acknowledged, by a different consumer. + /// Messages that have been idle for more than will be claimed. + /// The result will contain the claimed message IDs instead of a instance. + /// + /// The key of the stream. + /// The consumer group. + /// The consumer claiming the messages(s). + /// The minimum idle time threshold for pending messages to be claimed. + /// The starting ID to scan for pending messages that have an idle time greater than . + /// The upper limit of the number of entries that the command attempts to claim. If , Redis will default the value to 100. + /// The flags to use for this operation. + /// An instance of . + /// + StreamAutoClaimIdsOnlyResult StreamAutoClaimIdsOnly(RedisKey key, RedisValue consumerGroup, RedisValue claimingConsumer, long minIdleTimeInMs, RedisValue startAtId, int? count = null, CommandFlags flags = CommandFlags.None); + + /// + /// Change ownership of messages consumed, but not yet acknowledged, by a different consumer. + /// This method returns the complete message for the claimed message(s). /// /// The key of the stream. /// The consumer group. @@ -1567,11 +2774,12 @@ IEnumerable SortedSetScan(RedisKey key, /// The IDs of the messages to claim for the given consumer. /// The flags to use for this operation. /// The messages successfully claimed by the given consumer. - /// https://redis.io/topics/streams-intro + /// StreamEntry[] StreamClaim(RedisKey key, RedisValue consumerGroup, RedisValue claimingConsumer, long minIdleTimeInMs, RedisValue[] messageIds, CommandFlags flags = CommandFlags.None); /// - /// Change ownership of messages consumed, but not yet acknowledged, by a different consumer. This method returns the IDs for the claimed message(s). + /// Change ownership of messages consumed, but not yet acknowledged, by a different consumer. + /// This method returns the IDs for the claimed message(s). /// /// The key of the stream. /// The consumer group. @@ -1580,7 +2788,7 @@ IEnumerable SortedSetScan(RedisKey key, /// The IDs of the messages to claim for the given consumer. /// The flags to use for this operation. /// The message IDs for the messages successfully claimed by the given consumer. - /// https://redis.io/topics/streams-intro + /// RedisValue[] StreamClaimIdsOnly(RedisKey key, RedisValue consumerGroup, RedisValue claimingConsumer, long minIdleTimeInMs, RedisValue[] messageIds, CommandFlags flags = CommandFlags.None); /// @@ -1590,17 +2798,19 @@ IEnumerable SortedSetScan(RedisKey key, /// The name of the consumer group. /// The position from which to read for the consumer group. /// The flags to use for this operation. - /// True if successful, otherwise false. + /// if successful, otherwise. + /// bool StreamConsumerGroupSetPosition(RedisKey key, RedisValue groupName, RedisValue position, CommandFlags flags = CommandFlags.None); /// - /// Retrieve information about the consumers for the given consumer group. This is the equivalent of calling "XINFO GROUPS key group". + /// Retrieve information about the consumers for the given consumer group. + /// This is the equivalent of calling "XINFO GROUPS key group". /// /// The key of the stream. /// The consumer group name. /// The flags to use for this operation. - /// An instance of for each of the consumer group's consumers. - /// https://redis.io/topics/streams-intro + /// An instance of for each of the consumer group's consumers. + /// StreamConsumerInfo[] StreamConsumerInfo(RedisKey key, RedisValue groupName, CommandFlags flags = CommandFlags.None); /// @@ -1610,8 +2820,8 @@ IEnumerable SortedSetScan(RedisKey key, /// The name of the group to create. /// The position to begin reading the stream. Defaults to . /// The flags to use for this operation. - /// True if the group was created. - /// https://redis.io/topics/streams-intro + /// if the group was created, otherwise. + /// bool StreamCreateConsumerGroup(RedisKey key, RedisValue groupName, RedisValue? position, CommandFlags flags); /// @@ -1622,8 +2832,8 @@ IEnumerable SortedSetScan(RedisKey key, /// The position to begin reading the stream. Defaults to . /// Create the stream if it does not already exist. /// The flags to use for this operation. - /// True if the group was created. - /// https://redis.io/topics/streams-intro + /// if the group was created, otherwise. + /// bool StreamCreateConsumerGroup(RedisKey key, RedisValue groupName, RedisValue? position = null, bool createStream = true, CommandFlags flags = CommandFlags.None); /// @@ -1633,8 +2843,23 @@ IEnumerable SortedSetScan(RedisKey key, /// The IDs of the messages to delete. /// The flags to use for this operation. /// Returns the number of messages successfully deleted from the stream. - /// https://redis.io/topics/streams-intro + /// +#pragma warning disable RS0026 // similar overloads long StreamDelete(RedisKey key, RedisValue[] messageIds, CommandFlags flags = CommandFlags.None); +#pragma warning restore RS0026 + + /// + /// Delete messages in the stream. This method does not delete the stream. + /// + /// The key of the stream. + /// The IDs of the messages to delete. + /// Determines how stream trimming should be performed. + /// The flags to use for this operation. + /// Returns the number of messages successfully deleted from the stream. + /// +#pragma warning disable RS0026 // similar overloads + StreamTrimResult[] StreamDelete(RedisKey key, RedisValue[] messageIds, StreamTrimMode mode, CommandFlags flags = CommandFlags.None); +#pragma warning restore RS0026 /// /// Delete a consumer from a consumer group. @@ -1644,6 +2869,7 @@ IEnumerable SortedSetScan(RedisKey key, /// The name of the consumer. /// The flags to use for this operation. /// The number of messages that were pending for the deleted consumer. + /// long StreamDeleteConsumer(RedisKey key, RedisValue groupName, RedisValue consumerName, CommandFlags flags = CommandFlags.None); /// @@ -1652,7 +2878,8 @@ IEnumerable SortedSetScan(RedisKey key, /// The key of the stream. /// The name of the consumer group. /// The flags to use for this operation. - /// True if deleted, otherwise false. + /// if deleted, otherwise. + /// bool StreamDeleteConsumerGroup(RedisKey key, RedisValue groupName, CommandFlags flags = CommandFlags.None); /// @@ -1660,8 +2887,8 @@ IEnumerable SortedSetScan(RedisKey key, /// /// The key of the stream. /// The flags to use for this operation. - /// An instance of for each of the stream's groups. - /// https://redis.io/topics/streams-intro + /// An instance of for each of the stream's groups. + /// StreamGroupInfo[] StreamGroupInfo(RedisKey key, CommandFlags flags = CommandFlags.None); /// @@ -1669,8 +2896,8 @@ IEnumerable SortedSetScan(RedisKey key, /// /// The key of the stream. /// The flags to use for this operation. - /// A instance with information about the stream. - /// https://redis.io/topics/streams-intro + /// A instance with information about the stream. + /// StreamInfo StreamInfo(RedisKey key, CommandFlags flags = CommandFlags.None); /// @@ -1679,20 +2906,40 @@ IEnumerable SortedSetScan(RedisKey key, /// The key of the stream. /// The flags to use for this operation. /// The number of entries inside the given stream. - /// https://redis.io/commands/xlen + /// long StreamLength(RedisKey key, CommandFlags flags = CommandFlags.None); /// /// View information about pending messages for a stream. + /// A pending message is a message read using StreamReadGroup (XREADGROUP) but not yet acknowledged. /// /// The key of the stream. - /// The name of the consumer group + /// The name of the consumer group. /// The flags to use for this operation. - /// An instance of . contains the number of pending messages, the highest and lowest ID of the pending messages, and the consumers with their pending message count. + /// + /// An instance of . + /// contains the number of pending messages. + /// The highest and lowest ID of the pending messages, and the consumers with their pending message count. + /// /// The equivalent of calling XPENDING key group. - /// https://redis.io/commands/xpending + /// StreamPendingInfo StreamPending(RedisKey key, RedisValue groupName, CommandFlags flags = CommandFlags.None); + /// + /// View information about each pending message. + /// + /// The key of the stream. + /// The name of the consumer group. + /// The maximum number of pending messages to return. + /// The consumer name for the pending messages. Pass RedisValue.Null to include pending messages for all consumers. + /// The minimum ID from which to read the stream of pending messages. Pass null to read from the beginning of the stream. + /// The maximum ID to read to within the stream of pending messages. Pass null to read to the end of the stream. + /// The flags to use for this operation. + /// An instance of for each pending message. + /// Equivalent of calling XPENDING key group start-id end-id count consumer-name. + /// + StreamPendingMessageInfo[] StreamPendingMessages(RedisKey key, RedisValue groupName, int count, RedisValue consumerName, RedisValue? minId, RedisValue? maxId, CommandFlags flags); + /// /// View information about each pending message. /// @@ -1702,11 +2949,12 @@ IEnumerable SortedSetScan(RedisKey key, /// The consumer name for the pending messages. Pass RedisValue.Null to include pending messages for all consumers. /// The minimum ID from which to read the stream of pending messages. The method will default to reading from the beginning of the stream. /// The maximum ID to read to within the stream of pending messages. The method will default to reading to the end of the stream. + /// The minimum idle time threshold for pending messages to be claimed. /// The flags to use for this operation. /// An instance of for each pending message. - /// Equivalent of calling XPENDING key group start-id end-id count consumer-name. - /// https://redis.io/commands/xpending - StreamPendingMessageInfo[] StreamPendingMessages(RedisKey key, RedisValue groupName, int count, RedisValue consumerName, RedisValue? minId = null, RedisValue? maxId = null, CommandFlags flags = CommandFlags.None); + /// Equivalent of calling XPENDING key group IDLE min-idle-time start-id end-id count consumer-name. + /// + StreamPendingMessageInfo[] StreamPendingMessages(RedisKey key, RedisValue groupName, int count, RedisValue consumerName, RedisValue? minId = null, RedisValue? maxId = null, long? minIdleTimeInMs = null, CommandFlags flags = CommandFlags.None); /// /// Read a stream using the given range of IDs. @@ -1715,10 +2963,10 @@ IEnumerable SortedSetScan(RedisKey key, /// The minimum ID from which to read the stream. The method will default to reading from the beginning of the stream. /// The maximum ID to read to within the stream. The method will default to reading to the end of the stream. /// The maximum number of messages to return. - /// The order of the messages. will execute XRANGE and wil execute XREVRANGE. + /// The order of the messages. will execute XRANGE and will execute XREVRANGE. /// The flags to use for this operation. /// Returns an instance of for each message returned. - /// https://redis.io/commands/xrange + /// StreamEntry[] StreamRange(RedisKey key, RedisValue? minId = null, RedisValue? maxId = null, int? count = null, Order messageOrder = Order.Ascending, CommandFlags flags = CommandFlags.None); /// @@ -1728,9 +2976,11 @@ IEnumerable SortedSetScan(RedisKey key, /// The position from which to read the stream. /// The maximum number of messages to return. /// The flags to use for this operation. - /// Returns a value of for each message returned. - /// Equivalent of calling XREAD COUNT num STREAMS key id. - /// https://redis.io/commands/xread + /// Returns an instance of for each message returned. + /// + /// Equivalent of calling XREAD COUNT num STREAMS key id. + /// + /// StreamEntry[] StreamRead(RedisKey key, RedisValue position, int? count = null, CommandFlags flags = CommandFlags.None); /// @@ -1740,8 +2990,10 @@ IEnumerable SortedSetScan(RedisKey key, /// The maximum number of messages to return from each stream. /// The flags to use for this operation. /// A value of for each stream. - /// Equivalent of calling XREAD COUNT num STREAMS key1 key2 id1 id2. - /// https://redis.io/commands/xread + /// + /// Equivalent of calling XREAD COUNT num STREAMS key1 key2 id1 id2. + /// + /// RedisStream[] StreamRead(StreamPosition[] streamPositions, int? countPerStream = null, CommandFlags flags = CommandFlags.None); /// @@ -1750,11 +3002,11 @@ IEnumerable SortedSetScan(RedisKey key, /// The key of the stream. /// The name of the consumer group. /// The consumer name. - /// The position from which to read the stream. Defaults to when null. + /// The position from which to read the stream. Defaults to when . /// The maximum number of messages to return. /// The flags to use for this operation. /// Returns a value of for each message returned. - /// https://redis.io/commands/xreadgroup + /// StreamEntry[] StreamReadGroup(RedisKey key, RedisValue groupName, RedisValue consumerName, RedisValue? position, int? count, CommandFlags flags); /// @@ -1763,42 +3015,79 @@ IEnumerable SortedSetScan(RedisKey key, /// The key of the stream. /// The name of the consumer group. /// The consumer name. - /// The position from which to read the stream. Defaults to when null. + /// The position from which to read the stream. Defaults to when . + /// The maximum number of messages to return. + /// When true, the message will not be added to the pending message list. + /// The flags to use for this operation. + /// Returns a value of for each message returned. + /// + StreamEntry[] StreamReadGroup(RedisKey key, RedisValue groupName, RedisValue consumerName, RedisValue? position, int? count, bool noAck, CommandFlags flags); + + /// + /// Read messages from a stream into an associated consumer group. + /// + /// The key of the stream. + /// The name of the consumer group. + /// The consumer name. + /// The position from which to read the stream. Defaults to when . /// The maximum number of messages to return. /// When true, the message will not be added to the pending message list. + /// Auto-claim messages that have been idle for at least this long. /// The flags to use for this operation. /// Returns a value of for each message returned. - /// https://redis.io/commands/xreadgroup - StreamEntry[] StreamReadGroup(RedisKey key, RedisValue groupName, RedisValue consumerName, RedisValue? position = null, int? count = null, bool noAck = false, CommandFlags flags = CommandFlags.None); + /// + StreamEntry[] StreamReadGroup(RedisKey key, RedisValue groupName, RedisValue consumerName, RedisValue? position = null, int? count = null, bool noAck = false, TimeSpan? claimMinIdleTime = null, CommandFlags flags = CommandFlags.None); /// - /// Read from multiple streams into the given consumer group. The consumer group with the given - /// will need to have been created for each stream prior to calling this method. + /// Read from multiple streams into the given consumer group. + /// The consumer group with the given will need to have been created for each stream prior to calling this method. /// /// Array of streams and the positions from which to begin reading for each stream. /// The name of the consumer group. - /// + /// The name of the consumer. /// The maximum number of messages to return from each stream. /// The flags to use for this operation. /// A value of for each stream. - /// Equivalent of calling XREADGROUP GROUP groupName consumerName COUNT countPerStream STREAMS stream1 stream2 id1 id2 - /// https://redis.io/commands/xreadgroup + /// + /// Equivalent of calling XREADGROUP GROUP groupName consumerName COUNT countPerStream STREAMS stream1 stream2 id1 id2. + /// + /// RedisStream[] StreamReadGroup(StreamPosition[] streamPositions, RedisValue groupName, RedisValue consumerName, int? countPerStream, CommandFlags flags); /// - /// Read from multiple streams into the given consumer group. The consumer group with the given - /// will need to have been created for each stream prior to calling this method. + /// Read from multiple streams into the given consumer group. + /// The consumer group with the given will need to have been created for each stream prior to calling this method. /// /// Array of streams and the positions from which to begin reading for each stream. /// The name of the consumer group. - /// + /// The name of the consumer. /// The maximum number of messages to return from each stream. /// When true, the message will not be added to the pending message list. /// The flags to use for this operation. /// A value of for each stream. - /// Equivalent of calling XREADGROUP GROUP groupName consumerName COUNT countPerStream STREAMS stream1 stream2 id1 id2 - /// https://redis.io/commands/xreadgroup - RedisStream[] StreamReadGroup(StreamPosition[] streamPositions, RedisValue groupName, RedisValue consumerName, int? countPerStream = null, bool noAck = false, CommandFlags flags = CommandFlags.None); + /// + /// Equivalent of calling XREADGROUP GROUP groupName consumerName COUNT countPerStream STREAMS stream1 stream2 id1 id2. + /// + /// + RedisStream[] StreamReadGroup(StreamPosition[] streamPositions, RedisValue groupName, RedisValue consumerName, int? countPerStream, bool noAck, CommandFlags flags); + + /// + /// Read from multiple streams into the given consumer group. + /// The consumer group with the given will need to have been created for each stream prior to calling this method. + /// + /// Array of streams and the positions from which to begin reading for each stream. + /// The name of the consumer group. + /// The name of the consumer. + /// The maximum number of messages to return from each stream. + /// When true, the message will not be added to the pending message list. + /// Auto-claim messages that have been idle for at least this long. + /// The flags to use for this operation. + /// A value of for each stream. + /// + /// Equivalent of calling XREADGROUP GROUP groupName consumerName COUNT countPerStream STREAMS stream1 stream2 id1 id2. + /// + /// + RedisStream[] StreamReadGroup(StreamPosition[] streamPositions, RedisValue groupName, RedisValue consumerName, int? countPerStream = null, bool noAck = false, TimeSpan? claimMinIdleTime = null, CommandFlags flags = CommandFlags.None); /// /// Trim the stream to a specified maximum length. @@ -1808,38 +3097,70 @@ IEnumerable SortedSetScan(RedisKey key, /// If true, the "~" argument is used to allow the stream to exceed max length by a small number. This improves performance when removing messages. /// The flags to use for this operation. /// The number of messages removed from the stream. - /// https://redis.io/topics/streams-intro - long StreamTrim(RedisKey key, int maxLength, bool useApproximateMaxLength = false, CommandFlags flags = CommandFlags.None); + /// + long StreamTrim(RedisKey key, int maxLength, bool useApproximateMaxLength, CommandFlags flags); /// - /// If key already exists and is a string, this command appends the value at the end of the string. If key does not exist it is created and set as an empty string, - /// so APPEND will be similar to SET in this special case. + /// Trim the stream to a specified maximum length. + /// + /// The key of the stream. + /// The maximum length of the stream. + /// If true, the "~" argument is used to allow the stream to exceed max length by a small number. This improves performance when removing messages. + /// Specifies the maximal count of entries that will be evicted. + /// Determines how stream trimming should be performed. + /// The flags to use for this operation. + /// The number of messages removed from the stream. + /// + long StreamTrim(RedisKey key, long maxLength, bool useApproximateMaxLength = false, long? limit = null, StreamTrimMode mode = StreamTrimMode.KeepReferences, CommandFlags flags = CommandFlags.None); + + /// + /// Trim the stream to a specified minimum timestamp. + /// + /// The key of the stream. + /// All entries with an id (timestamp) earlier minId will be removed. + /// If true, the "~" argument is used to allow the stream to exceed minId by a small number. This improves performance when removing messages. + /// The maximum number of entries to remove per call when useApproximateMaxLength = true. If 0, the limiting mechanism is disabled entirely. + /// Determines how stream trimming should be performed. + /// The flags to use for this operation. + /// The number of messages removed from the stream. + /// + long StreamTrimByMinId(RedisKey key, RedisValue minId, bool useApproximateMaxLength = false, long? limit = null, StreamTrimMode mode = StreamTrimMode.KeepReferences, CommandFlags flags = CommandFlags.None); + + /// + /// If key already exists and is a string, this command appends the value at the end of the string. + /// If key does not exist it is created and set as an empty string, so APPEND will be similar to SET in this special case. /// /// The key of the string. /// The value to append to the string. /// The flags to use for this operation. /// The length of the string after the append operation. - /// https://redis.io/commands/append + /// long StringAppend(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None); + /// + [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] + long StringBitCount(RedisKey key, long start, long end, CommandFlags flags); + /// /// Count the number of set bits (population counting) in a string. - /// By default all the bytes contained in the string are examined. It is possible to specify the counting operation only in an interval passing the additional arguments start and end. + /// By default all the bytes contained in the string are examined. + /// It is possible to specify the counting operation only in an interval passing the additional arguments start and end. /// Like for the GETRANGE command start and end can contain negative values in order to index bytes starting from the end of the string, where -1 is the last byte, -2 is the penultimate, and so forth. /// /// The key of the string. /// The start byte to count at. /// The end byte to count at. + /// In Redis 7+, we can choose if and specify a bit index or byte index (defaults to ). /// The flags to use for this operation. /// The number of bits set to 1. - /// https://redis.io/commands/bitcount - long StringBitCount(RedisKey key, long start = 0, long end = -1, CommandFlags flags = CommandFlags.None); + /// + long StringBitCount(RedisKey key, long start = 0, long end = -1, StringIndexType indexType = StringIndexType.Byte, CommandFlags flags = CommandFlags.None); /// /// Perform a bitwise operation between multiple keys (containing string values) and store the result in the destination key. /// The BITOP command supports four bitwise operations; note that NOT is a unary operator: the second key should be omitted in this case /// and only the first key will be considered. - /// The result of the operation is always stored at destkey. + /// The result of the operation is always stored at . /// /// The operation to perform. /// The destination key to store the result in. @@ -1847,85 +3168,121 @@ IEnumerable SortedSetScan(RedisKey key, /// The second key to get the bit value from. /// The flags to use for this operation. /// The size of the string stored in the destination key, that is equal to the size of the longest input string. - /// https://redis.io/commands/bitop - long StringBitOperation(Bitwise operation, RedisKey destination, RedisKey first, RedisKey second = default(RedisKey), CommandFlags flags = CommandFlags.None); + /// + long StringBitOperation(Bitwise operation, RedisKey destination, RedisKey first, RedisKey second = default, CommandFlags flags = CommandFlags.None); /// /// Perform a bitwise operation between multiple keys (containing string values) and store the result in the destination key. /// The BITOP command supports four bitwise operations; note that NOT is a unary operator. - /// The result of the operation is always stored at destkey. + /// The result of the operation is always stored at . /// /// The operation to perform. /// The destination key to store the result in. /// The keys to get the bit values from. /// The flags to use for this operation. /// The size of the string stored in the destination key, that is equal to the size of the longest input string. - /// https://redis.io/commands/bitop + /// long StringBitOperation(Bitwise operation, RedisKey destination, RedisKey[] keys, CommandFlags flags = CommandFlags.None); + /// + [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] + long StringBitPosition(RedisKey key, bool bit, long start, long end, CommandFlags flags); + /// /// Return the position of the first bit set to 1 or 0 in a string. /// The position is returned thinking at the string as an array of bits from left to right where the first byte most significant bit is at position 0, the second byte most significant bit is at position 8 and so forth. - /// An start and end may be specified; these are in bytes, not bits; start and end can contain negative values in order to index bytes starting from the end of the string, where -1 is the last byte, -2 is the penultimate, and so forth. + /// A and may be specified - these are in bytes, not bits. + /// and can contain negative values in order to index bytes starting from the end of the string, where -1 is the last byte, -2 is the penultimate, and so forth. /// /// The key of the string. /// True to check for the first 1 bit, false to check for the first 0 bit. /// The position to start looking (defaults to 0). /// The position to stop looking (defaults to -1, unlimited). + /// In Redis 7+, we can choose if and specify a bit index or byte index (defaults to ). /// The flags to use for this operation. - /// The command returns the position of the first bit set to 1 or 0 according to the request. - /// If we look for set bits(the bit argument is 1) and the string is empty or composed of just zero bytes, -1 is returned. - /// https://redis.io/commands/bitpos - long StringBitPosition(RedisKey key, bool bit, long start = 0, long end = -1, CommandFlags flags = CommandFlags.None); + /// + /// The command returns the position of the first bit set to 1 or 0 according to the request. + /// If we look for set bits(the bit argument is 1) and the string is empty or composed of just zero bytes, -1 is returned. + /// + /// + long StringBitPosition(RedisKey key, bool bit, long start = 0, long end = -1, StringIndexType indexType = StringIndexType.Byte, CommandFlags flags = CommandFlags.None); /// - /// Decrements the number stored at key by decrement. If the key does not exist, it is set to 0 before performing the operation. - /// An error is returned if the key contains a value of the wrong type or contains a string that is not representable as integer. This operation is limited to 64 bit signed integers. + /// Decrements the number stored at key by decrement. + /// If the key does not exist, it is set to 0 before performing the operation. + /// An error is returned if the key contains a value of the wrong type or contains a string that is not representable as integer. + /// This operation is limited to 64 bit signed integers. /// /// The key of the string. /// The amount to decrement by (defaults to 1). /// The flags to use for this operation. /// The value of key after the decrement. - /// https://redis.io/commands/decrby - /// https://redis.io/commands/decr + /// + /// See + /// , + /// . + /// long StringDecrement(RedisKey key, long value = 1, CommandFlags flags = CommandFlags.None); /// - /// Decrements the string representing a floating point number stored at key by the specified decrement. If the key does not exist, it is set to 0 before performing the operation. The precision of the output is fixed at 17 digits after the decimal point regardless of the actual internal precision of the computation. + /// Deletes if it matches the given condition. + /// + /// The key of the string. + /// The condition to enforce. + /// The flags to use for this operation. + /// See . + bool StringDelete(RedisKey key, ValueCondition when, CommandFlags flags = CommandFlags.None); + + /// + /// Decrements the string representing a floating point number stored at key by the specified decrement. + /// If the key does not exist, it is set to 0 before performing the operation. + /// The precision of the output is fixed at 17 digits after the decimal point regardless of the actual internal precision of the computation. /// /// The key of the string. /// The amount to decrement by (defaults to 1). /// The flags to use for this operation. /// The value of key after the decrement. - /// https://redis.io/commands/incrbyfloat + /// double StringDecrement(RedisKey key, double value, CommandFlags flags = CommandFlags.None); /// - /// Get the value of key. If the key does not exist the special value nil is returned. An error is returned if the value stored at key is not a string, because GET only handles string values. + /// Gets the digest (hash) value of the specified key, represented as a digest equality . + /// + /// The key of the string. + /// The flags to use for this operation. + /// + [Experimental(Experiments.Server_8_4, UrlFormat = Experiments.UrlFormat)] + ValueCondition? StringDigest(RedisKey key, CommandFlags flags = CommandFlags.None); + + /// + /// Get the value of key. If the key does not exist the special value is returned. + /// An error is returned if the value stored at key is not a string, because GET only handles string values. /// /// The key of the string. /// The flags to use for this operation. - /// The value of key, or nil when key does not exist. - /// https://redis.io/commands/get + /// The value of key, or when key does not exist. + /// RedisValue StringGet(RedisKey key, CommandFlags flags = CommandFlags.None); /// - /// Returns the values of all specified keys. For every key that does not hold a string value or does not exist, the special value nil is returned. + /// Returns the values of all specified keys. + /// For every key that does not hold a string value or does not exist, the special value is returned. /// /// The keys of the strings. /// The flags to use for this operation. - /// The values of the strings with nil for keys do not exist. - /// https://redis.io/commands/mget + /// The values of the strings with for keys do not exist. + /// RedisValue[] StringGet(RedisKey[] keys, CommandFlags flags = CommandFlags.None); /// - /// Get the value of key. If the key does not exist the special value nil is returned. An error is returned if the value stored at key is not a string, because GET only handles string values. + /// Get the value of key. If the key does not exist the special value is returned. + /// An error is returned if the value stored at key is not a string, because GET only handles string values. /// /// The key of the string. /// The flags to use for this operation. - /// The value of key, or nil when key does not exist. - /// https://redis.io/commands/get - Lease StringGetLease(RedisKey key, CommandFlags flags = CommandFlags.None); + /// The value of key, or when key does not exist. + /// + Lease? StringGetLease(RedisKey key, CommandFlags flags = CommandFlags.None); /// /// Returns the bit value at offset in the string value stored at key. @@ -1935,18 +3292,20 @@ IEnumerable SortedSetScan(RedisKey key, /// The offset in the string to get a bit at. /// The flags to use for this operation. /// The bit value stored at offset. - /// https://redis.io/commands/getbit + /// bool StringGetBit(RedisKey key, long offset, CommandFlags flags = CommandFlags.None); /// - /// Returns the substring of the string value stored at key, determined by the offsets start and end (both are inclusive). Negative offsets can be used in order to provide an offset starting from the end of the string. So -1 means the last character, -2 the penultimate and so forth. + /// Returns the substring of the string value stored at key, determined by the offsets start and end (both are inclusive). + /// Negative offsets can be used in order to provide an offset starting from the end of the string. + /// So -1 means the last character, -2 the penultimate and so forth. /// /// The key of the string. /// The start index of the substring to get. /// The end index of the substring to get. /// The flags to use for this operation. /// The substring of the string value stored at key. - /// https://redis.io/commands/getrange + /// RedisValue StringGetRange(RedisKey key, long start, long end, CommandFlags flags = CommandFlags.None); /// @@ -1955,38 +3314,81 @@ IEnumerable SortedSetScan(RedisKey key, /// The key of the string. /// The value to replace the existing value with. /// The flags to use for this operation. - /// The old value stored at key, or nil when key did not exist. - /// https://redis.io/commands/getset + /// The old value stored at key, or when key did not exist. + /// RedisValue StringGetSet(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None); /// - /// Get the value of key. If the key does not exist the special value nil is returned. An error is returned if the value stored at key is not a string, because GET only handles string values. + /// Gets the value of and update its (relative) expiry. + /// If the key does not exist, the result will be . + /// + /// The key of the string. + /// The expiry to set. will remove expiry. + /// The flags to use for this operation. + /// The value of key, or when key does not exist. + /// + RedisValue StringGetSetExpiry(RedisKey key, TimeSpan? expiry, CommandFlags flags = CommandFlags.None); + + /// + /// Gets the value of and update its (absolute) expiry. + /// If the key does not exist, the result will be . /// /// The key of the string. + /// The exact date and time to expire at. will remove expiry. /// The flags to use for this operation. - /// The value of key and its expiry, or nil when key does not exist. - /// https://redis.io/commands/get + /// The value of key, or when key does not exist. + /// + RedisValue StringGetSetExpiry(RedisKey key, DateTime expiry, CommandFlags flags = CommandFlags.None); + + /// + /// Get the value of key and delete the key. + /// If the key does not exist the special value is returned. + /// An error is returned if the value stored at key is not a string, because GET only handles string values. + /// + /// The key of the string. + /// The flags to use for this operation. + /// The value of key, or when key does not exist. + /// + RedisValue StringGetDelete(RedisKey key, CommandFlags flags = CommandFlags.None); + + /// + /// Get the value of key. + /// If the key does not exist the special value is returned. + /// An error is returned if the value stored at key is not a string, because GET only handles string values. + /// + /// The key of the string. + /// The flags to use for this operation. + /// The value of key and its expiry, or when key does not exist. + /// RedisValueWithExpiry StringGetWithExpiry(RedisKey key, CommandFlags flags = CommandFlags.None); /// - /// Increments the number stored at key by increment. If the key does not exist, it is set to 0 before performing the operation. An error is returned if the key contains a value of the wrong type or contains a string that is not representable as integer. This operation is limited to 64 bit signed integers. + /// Increments the number stored at key by increment. + /// If the key does not exist, it is set to 0 before performing the operation. + /// An error is returned if the key contains a value of the wrong type or contains a string that is not representable as integer. + /// This operation is limited to 64 bit signed integers. /// /// The key of the string. /// The amount to increment by (defaults to 1). /// The flags to use for this operation. /// The value of key after the increment. - /// https://redis.io/commands/incrby - /// https://redis.io/commands/incr + /// + /// See + /// , + /// . + /// long StringIncrement(RedisKey key, long value = 1, CommandFlags flags = CommandFlags.None); /// - /// Increments the string representing a floating point number stored at key by the specified increment. If the key does not exist, it is set to 0 before performing the operation. The precision of the output is fixed at 17 digits after the decimal point regardless of the actual internal precision of the computation. + /// Increments the string representing a floating point number stored at key by the specified increment. + /// If the key does not exist, it is set to 0 before performing the operation. + /// The precision of the output is fixed at 17 digits after the decimal point regardless of the actual internal precision of the computation. /// /// The key of the string. /// The amount to increment by (defaults to 1). /// The flags to use for this operation. /// The value of key after the increment. - /// https://redis.io/commands/incrbyfloat + /// double StringIncrement(RedisKey key, double value, CommandFlags flags = CommandFlags.None); /// @@ -1994,72 +3396,170 @@ IEnumerable SortedSetScan(RedisKey key, /// /// The key of the string. /// The flags to use for this operation. - /// the length of the string at key, or 0 when key does not exist. - /// https://redis.io/commands/strlen + /// The length of the string at key, or 0 when key does not exist. + /// long StringLength(RedisKey key, CommandFlags flags = CommandFlags.None); + /// + /// Implements the longest common subsequence algorithm between the values at and , + /// returning a string containing the common sequence. + /// Note that this is different than the longest common string algorithm, + /// since matching characters in the string does not need to be contiguous. + /// + /// The key of the first string. + /// The key of the second string. + /// The flags to use for this operation. + /// A string (sequence of characters) of the LCS match. + /// + string? StringLongestCommonSubsequence(RedisKey first, RedisKey second, CommandFlags flags = CommandFlags.None); + + /// + /// Implements the longest common subsequence algorithm between the values at and , + /// returning the length of the common sequence. + /// Note that this is different to the longest common string algorithm, + /// since matching characters in the string does not need to be contiguous. + /// + /// The key of the first string. + /// The key of the second string. + /// The flags to use for this operation. + /// The length of the LCS match. + /// + long StringLongestCommonSubsequenceLength(RedisKey first, RedisKey second, CommandFlags flags = CommandFlags.None); + + /// + /// Implements the longest common subsequence algorithm between the values at and , + /// returning a list of all common sequences. + /// Note that this is different than the longest common string algorithm, + /// since matching characters in the string does not need to be contiguous. + /// + /// The key of the first string. + /// The key of the second string. + /// Can be used to restrict the list of matches to the ones of a given minimum length. + /// The flags to use for this operation. + /// The result of LCS algorithm, based on the given parameters. + /// + LCSMatchResult StringLongestCommonSubsequenceWithMatches(RedisKey first, RedisKey second, long minLength = 0, CommandFlags flags = CommandFlags.None); + + /// + [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] + bool StringSet(RedisKey key, RedisValue value, TimeSpan? expiry, When when); + + /// + [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] + bool StringSet(RedisKey key, RedisValue value, TimeSpan? expiry, When when, CommandFlags flags); + /// /// Set key to hold the string value. If key already holds a value, it is overwritten, regardless of its type. /// /// The key of the string. /// The value to set. /// The expiry to set. + /// Whether to maintain the existing key's TTL (KEEPTTL flag). + /// Which condition to set the value under (defaults to always). + /// The flags to use for this operation. + /// if the string was set, otherwise. + /// + bool StringSet(RedisKey key, RedisValue value, TimeSpan? expiry, bool keepTtl, When when = When.Always, CommandFlags flags = CommandFlags.None); + + /// + /// Set to hold the string , if it matches the given condition. + /// + /// The key of the string. + /// The value to set. + /// The expiry to set. + /// The condition to enforce. + /// The flags to use for this operation. + /// See . +#pragma warning disable RS0027 // Public API with optional parameter(s) should have the most parameters amongst its public overloads + bool StringSet(RedisKey key, RedisValue value, Expiration expiry = default, ValueCondition when = default, CommandFlags flags = CommandFlags.None); +#pragma warning restore RS0027 + + /// + /// Sets the given keys to their respective values. + /// If is specified, this will not perform any operation at all even if just a single key already exists. + /// + /// The keys and values to set. /// Which condition to set the value under (defaults to always). /// The flags to use for this operation. - /// True if the string was set, false otherwise. - /// https://redis.io/commands/set - bool StringSet(RedisKey key, RedisValue value, TimeSpan? expiry = null, When when = When.Always, CommandFlags flags = CommandFlags.None); + /// if the keys were set, otherwise. + /// + /// See + /// , + /// . + /// . + /// + bool StringSet(KeyValuePair[] values, When when, CommandFlags flags); /// - /// Sets the given keys to their respective values. If "not exists" is specified, this will not perform any operation at all even if just a single key already exists. + /// Sets the given keys to their respective values, optionally including expiration. + /// If is specified, this will not perform any operation at all even if just a single key already exists. /// /// The keys and values to set. /// Which condition to set the value under (defaults to always). + /// The expiry to set. /// The flags to use for this operation. - /// True if the keys were set, else False - /// https://redis.io/commands/mset - /// https://redis.io/commands/msetnx - bool StringSet(KeyValuePair[] values, When when = When.Always, CommandFlags flags = CommandFlags.None); + /// if the keys were set, otherwise. + /// + /// See + /// , + /// . + /// . + /// + bool StringSet(KeyValuePair[] values, When when = When.Always, Expiration expiry = default, CommandFlags flags = CommandFlags.None); + + /// + /// Atomically sets key to value and returns the previous value (if any) stored at . + /// + /// The key of the string. + /// The value to set. + /// The expiry to set. + /// Which condition to set the value under (defaults to ). + /// The flags to use for this operation. + /// The previous value stored at , or when key did not exist. + /// + /// This method uses the SET command with the GET option introduced in Redis 6.2.0 instead of the deprecated GETSET command. + /// + /// + RedisValue StringSetAndGet(RedisKey key, RedisValue value, TimeSpan? expiry, When when, CommandFlags flags); + + /// + /// Atomically sets key to value and returns the previous value (if any) stored at . + /// + /// The key of the string. + /// The value to set. + /// The expiry to set. + /// Whether to maintain the existing key's TTL (KEEPTTL flag). + /// Which condition to set the value under (defaults to ). + /// The flags to use for this operation. + /// The previous value stored at , or when key did not exist. + /// This method uses the SET command with the GET option introduced in Redis 6.2.0 instead of the deprecated GETSET command. + /// + RedisValue StringSetAndGet(RedisKey key, RedisValue value, TimeSpan? expiry = null, bool keepTtl = false, When when = When.Always, CommandFlags flags = CommandFlags.None); /// /// Sets or clears the bit at offset in the string value stored at key. - /// The bit is either set or cleared depending on value, which can be either 0 or 1. When key does not exist, a new string value is created.The string is grown to make sure it can hold a bit at offset. + /// The bit is either set or cleared depending on value, which can be either 0 or 1. + /// When key does not exist, a new string value is created.The string is grown to make sure it can hold a bit at offset. /// /// The key of the string. /// The offset in the string to set . /// The bit value to set, true for 1, false for 0. /// The flags to use for this operation. /// The original bit value stored at offset. - /// https://redis.io/commands/setbit + /// bool StringSetBit(RedisKey key, long offset, bool bit, CommandFlags flags = CommandFlags.None); /// - /// Overwrites part of the string stored at key, starting at the specified offset, for the entire length of value. If the offset is larger than the current length of the string at key, the string is padded with zero-bytes to make offset fit. Non-existing keys are considered as empty strings, so this command will make sure it holds a string large enough to be able to set value at offset. + /// Overwrites part of the string stored at key, starting at the specified offset, for the entire length of value. + /// If the offset is larger than the current length of the string at key, the string is padded with zero-bytes to make offset fit. + /// Non-existing keys are considered as empty strings, so this command will make sure it holds a string large enough to be able to set value at offset. /// /// The key of the string. /// The offset in the string to overwrite. /// The value to overwrite with. /// The flags to use for this operation. /// The length of the string after it was modified by the command. - /// https://redis.io/commands/setrange + /// RedisValue StringSetRange(RedisKey key, long offset, RedisValue value, CommandFlags flags = CommandFlags.None); - - /// - /// Alters the last access time of a key. - /// - /// The key to touch. - /// The flags to use for this operation. - /// True if the key was touched. - /// https://redis.io/commands/touch - bool KeyTouch(RedisKey key, CommandFlags flags = CommandFlags.None); - - /// - /// Alters the last access time of a keys. A key is ignored if it does not exist. - /// - /// The keys to touch. - /// The flags to use for this operation. - /// The number of keys that were touched. - /// https://redis.io/commands/touch - long KeyTouch(RedisKey[] keys, CommandFlags flags = CommandFlags.None); } } diff --git a/src/StackExchange.Redis/Interfaces/IDatabaseAsync.VectorSets.cs b/src/StackExchange.Redis/Interfaces/IDatabaseAsync.VectorSets.cs new file mode 100644 index 000000000..a2d9b4058 --- /dev/null +++ b/src/StackExchange.Redis/Interfaces/IDatabaseAsync.VectorSets.cs @@ -0,0 +1,119 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using System.Threading.Tasks; +using RESPite; + +// ReSharper disable once CheckNamespace +namespace StackExchange.Redis; + +/// +/// Describes functionality that is common to both standalone redis servers and redis clusters. +/// +public partial interface IDatabaseAsync +{ + // Vector Set operations + + /// + [Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] + Task VectorSetAddAsync( + RedisKey key, + VectorSetAddRequest request, + CommandFlags flags = CommandFlags.None); + + /// + [Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] + Task VectorSetLengthAsync(RedisKey key, CommandFlags flags = CommandFlags.None); + + /// + [Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] + Task VectorSetDimensionAsync(RedisKey key, CommandFlags flags = CommandFlags.None); + + /// + [Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] + Task?> VectorSetGetApproximateVectorAsync( + RedisKey key, + RedisValue member, + CommandFlags flags = CommandFlags.None); + + /// + [Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] + Task VectorSetGetAttributesJsonAsync( + RedisKey key, + RedisValue member, + CommandFlags flags = CommandFlags.None); + + /// + [Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] + Task VectorSetInfoAsync(RedisKey key, CommandFlags flags = CommandFlags.None); + + /// + [Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] + Task VectorSetContainsAsync(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None); + + /// + [Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] + Task?> VectorSetGetLinksAsync( + RedisKey key, + RedisValue member, + CommandFlags flags = CommandFlags.None); + + /// + [Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] + Task?> VectorSetGetLinksWithScoresAsync( + RedisKey key, + RedisValue member, + CommandFlags flags = CommandFlags.None); + + /// + [Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] + Task VectorSetRandomMemberAsync(RedisKey key, CommandFlags flags = CommandFlags.None); + + /// + [Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] + Task VectorSetRandomMembersAsync(RedisKey key, long count, CommandFlags flags = CommandFlags.None); + + /// + [Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] + Task VectorSetRemoveAsync(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None); + + /// + [Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] + Task VectorSetSetAttributesJsonAsync( + RedisKey key, + RedisValue member, +#if NET8_0_OR_GREATER + [StringSyntax(StringSyntaxAttribute.Json)] +#endif + string attributesJson, + CommandFlags flags = CommandFlags.None); + + /// + [Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] + Task?> VectorSetSimilaritySearchAsync( + RedisKey key, + VectorSetSimilaritySearchRequest query, + CommandFlags flags = CommandFlags.None); + + /// + [Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] + Task?> VectorSetRangeAsync( + RedisKey key, + RedisValue start = default, + RedisValue end = default, + long count = -1, + Exclude exclude = Exclude.None, + CommandFlags flags = CommandFlags.None); + + /// + [Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] + System.Collections.Generic.IAsyncEnumerable VectorSetRangeEnumerateAsync( + RedisKey key, + RedisValue start = default, + RedisValue end = default, + long count = 100, + Exclude exclude = Exclude.None, + CommandFlags flags = CommandFlags.None); + + /// + Task StreamReadGroupAsync(StreamPosition[] streamPositions, RedisValue groupName, RedisValue consumerName, int? countPerStream = null, bool noAck = false, TimeSpan? claimMinIdleTime = null, CommandFlags flags = CommandFlags.None); +} diff --git a/src/StackExchange.Redis/Interfaces/IDatabaseAsync.cs b/src/StackExchange.Redis/Interfaces/IDatabaseAsync.cs index 496722667..c581470ca 100644 --- a/src/StackExchange.Redis/Interfaces/IDatabaseAsync.cs +++ b/src/StackExchange.Redis/Interfaces/IDatabaseAsync.cs @@ -1,1262 +1,548 @@ using System; using System.Collections.Generic; +using System.ComponentModel; +using System.Diagnostics.CodeAnalysis; +using System.IO; using System.Net; using System.Threading.Tasks; +using RESPite; +// ReSharper disable once CheckNamespace namespace StackExchange.Redis { /// - /// Describes functionality that is common to both standalone redis servers and redis clusters + /// Describes functionality that is common to both standalone redis servers and redis clusters. /// - public interface IDatabaseAsync : IRedisAsync + public partial interface IDatabaseAsync : IRedisAsync { /// - /// Indicates whether the instance can communicate with the server (resolved - /// using the supplied key and optional flags) + /// Indicates whether the instance can communicate with the server (resolved using the supplied key and optional flags). /// /// The key to check for. /// The flags to use for this operation. bool IsConnected(RedisKey key, CommandFlags flags = CommandFlags.None); - /// - /// Atomically transfer a key from a source Redis instance to a destination Redis instance. On success the key is deleted from the original instance by default, and is guaranteed to exist in the target instance. - /// - /// The key to migrate. - /// The server to migrate the key to. - /// The database to migrate the key to. - /// The timeout to use for the transfer. - /// The options to use for this migration. - /// The flags to use for this operation. - /// https://redis.io/commands/MIGRATE + /// Task KeyMigrateAsync(RedisKey key, EndPoint toServer, int toDatabase = 0, int timeoutMilliseconds = 0, MigrateOptions migrateOptions = MigrateOptions.None, CommandFlags flags = CommandFlags.None); - /// - /// Returns the raw DEBUG OBJECT output for a key; this command is not fully documented and should be avoided unless you have good reason, and then avoided anyway. - /// - /// The key to debug. - /// The flags to use for this migration. - /// The raw output from DEBUG OBJECT. - /// https://redis.io/commands/debug-object + /// " Task DebugObjectAsync(RedisKey key, CommandFlags flags = CommandFlags.None); - /// - /// Add the specified member to the set stored at key. Specified members that are already a member of this set are ignored. If key does not exist, a new set is created before adding the specified members. - /// - /// The key of the set. - /// The longitude of geo entry. - /// The latitude of the geo entry. - /// The value to set at this entry. - /// The flags to use for this operation. - /// True if the specified member was not already present in the set, else False. - /// https://redis.io/commands/geoadd + /// Task GeoAddAsync(RedisKey key, double longitude, double latitude, RedisValue member, CommandFlags flags = CommandFlags.None); - /// - /// Add the specified member to the set stored at key. Specified members that are already a member of this set are ignored. If key does not exist, a new set is created before adding the specified members. - /// - /// The key of the set. - /// The geo value to store. - /// The flags to use for this operation. - /// True if the specified member was not already present in the set, else False - /// https://redis.io/commands/geoadd - Task GeoAddAsync(RedisKey key, StackExchange.Redis.GeoEntry value, CommandFlags flags = CommandFlags.None); + /// + Task GeoAddAsync(RedisKey key, GeoEntry value, CommandFlags flags = CommandFlags.None); - /// - /// Add the specified members to the set stored at key. Specified members that are already a member of this set are ignored. If key does not exist, a new set is created before adding the specified members. - /// - /// The key of the set. - /// The geo values add to the set. - /// The flags to use for this operation. - /// The number of elements that were added to the set, not including all the elements already present into the set. - /// https://redis.io/commands/geoadd + /// Task GeoAddAsync(RedisKey key, GeoEntry[] values, CommandFlags flags = CommandFlags.None); - /// - /// Removes the specified member from the geo sorted set stored at key. Non existing members are ignored. - /// - /// The key of the set. - /// The geo value to remove. - /// The flags to use for this operation. - /// True if the member existed in the sorted set and was removed; False otherwise. - /// https://redis.io/commands/zrem + /// Task GeoRemoveAsync(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None); - /// - /// Return the distance between two members in the geospatial index represented by the sorted set. - /// - /// The key of the set. - /// The first member to check. - /// The second member to check. - /// The unit of distance to return (defaults to meters). - /// The flags to use for this operation. - /// The command returns the distance as a double (represented as a string) in the specified unit, or NULL if one or both the elements are missing. - /// https://redis.io/commands/geodist + /// Task GeoDistanceAsync(RedisKey key, RedisValue member1, RedisValue member2, GeoUnit unit = GeoUnit.Meters, CommandFlags flags = CommandFlags.None); - /// - /// Return valid Geohash strings representing the position of one or more elements in a sorted set value representing a geospatial index (where elements were added using GEOADD). - /// - /// The key of the set. - /// The members to get. - /// The flags to use for this operation. - /// The command returns an array where each element is the Geohash corresponding to each member name passed as argument to the command. - /// https://redis.io/commands/geohash - Task GeoHashAsync(RedisKey key, RedisValue[] members, CommandFlags flags = CommandFlags.None); + /// + Task GeoHashAsync(RedisKey key, RedisValue[] members, CommandFlags flags = CommandFlags.None); - /// - /// Return valid Geohash strings representing the position of one or more elements in a sorted set value representing a geospatial index (where elements were added using GEOADD). - /// - /// The key of the set. - /// The member to get. - /// The flags to use for this operation. - /// The command returns an array where each element is the Geohash corresponding to each member name passed as argument to the command. - /// https://redis.io/commands/geohash - Task GeoHashAsync(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None); + /// + Task GeoHashAsync(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None); - /// - /// Return the positions (longitude,latitude) of all the specified members of the geospatial index represented by the sorted set at key. - /// - /// The key of the set. - /// The members to get. - /// The flags to use for this operation. - /// The command returns an array where each element is a two elements array representing longitude and latitude (x,y) of each member name passed as argument to the command.Non existing elements are reported as NULL elements of the array. - /// https://redis.io/commands/geopos + /// Task GeoPositionAsync(RedisKey key, RedisValue[] members, CommandFlags flags = CommandFlags.None); - /// - /// Return the positions (longitude,latitude) of all the specified members of the geospatial index represented by the sorted set at key. - /// - /// The key of the set. - /// The member to get. - /// The flags to use for this operation. - /// The command returns an array where each element is a two elements array representing longitude and latitude (x,y) of each member name passed as argument to the command.Non existing elements are reported as NULL elements of the array. - /// https://redis.io/commands/geopos + /// Task GeoPositionAsync(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None); - /// - /// Return the members of a sorted set populated with geospatial information using GEOADD, which are within the borders of the area specified with the center location and the maximum distance from the center (the radius). - /// - /// The key of the set. - /// The member to get a radius of results from. - /// The radius to check. - /// The unit of (defaults to meters). - /// The count of results to get, -1 for unlimited. - /// The order of the results. - /// The search options to use. - /// The flags to use for this operation. - /// The results found within the radius, if any. - /// https://redis.io/commands/georadius + /// Task GeoRadiusAsync(RedisKey key, RedisValue member, double radius, GeoUnit unit = GeoUnit.Meters, int count = -1, Order? order = null, GeoRadiusOptions options = GeoRadiusOptions.Default, CommandFlags flags = CommandFlags.None); - /// - /// Return the members of a sorted set populated with geospatial information using GEOADD, which are within the borders of the area specified with the center location and the maximum distance from the center (the radius). - /// - /// The key of the set. - /// The longitude of the point to get a radius of results from. - /// The latitude of the point to get a radius of results from. - /// The radius to check. - /// The unit of (defaults to meters). - /// The count of results to get, -1 for unlimited. - /// The order of the results. - /// The search options to use. - /// The flags to use for this operation. - /// The results found within the radius, if any. - /// https://redis.io/commands/georadius + /// Task GeoRadiusAsync(RedisKey key, double longitude, double latitude, double radius, GeoUnit unit = GeoUnit.Meters, int count = -1, Order? order = null, GeoRadiusOptions options = GeoRadiusOptions.Default, CommandFlags flags = CommandFlags.None); - /// - /// Decrements the number stored at field in the hash stored at key by decrement. If key does not exist, a new key holding a hash is created. If field does not exist the value is set to 0 before the operation is performed. - /// - /// The key of the hash. - /// The field in the hash to decrement. - /// The amount to decrement by. - /// The flags to use for this operation. - /// The value at field after the decrement operation. - /// The range of values supported by HINCRBY is limited to 64 bit signed integers. - /// https://redis.io/commands/hincrby + /// + Task GeoSearchAsync(RedisKey key, RedisValue member, GeoSearchShape shape, int count = -1, bool demandClosest = true, Order? order = null, GeoRadiusOptions options = GeoRadiusOptions.Default, CommandFlags flags = CommandFlags.None); + + /// + Task GeoSearchAsync(RedisKey key, double longitude, double latitude, GeoSearchShape shape, int count = -1, bool demandClosest = true, Order? order = null, GeoRadiusOptions options = GeoRadiusOptions.Default, CommandFlags flags = CommandFlags.None); + + /// + Task GeoSearchAndStoreAsync(RedisKey sourceKey, RedisKey destinationKey, RedisValue member, GeoSearchShape shape, int count = -1, bool demandClosest = true, Order? order = null, bool storeDistances = false, CommandFlags flags = CommandFlags.None); + + /// + Task GeoSearchAndStoreAsync(RedisKey sourceKey, RedisKey destinationKey, double longitude, double latitude, GeoSearchShape shape, int count = -1, bool demandClosest = true, Order? order = null, bool storeDistances = false, CommandFlags flags = CommandFlags.None); + + /// Task HashDecrementAsync(RedisKey key, RedisValue hashField, long value = 1, CommandFlags flags = CommandFlags.None); - /// - /// Decrement the specified field of an hash stored at key, and representing a floating point number, by the specified decrement. If the field does not exist, it is set to 0 before performing the operation. - /// - /// The key of the hash. - /// The field in the hash to decrement. - /// The amount to decrement by. - /// The flags to use for this operation. - /// The value at field after the decrement operation. - /// The precision of the output is fixed at 17 digits after the decimal point regardless of the actual internal precision of the computation. - /// https://redis.io/commands/hincrbyfloat + /// Task HashDecrementAsync(RedisKey key, RedisValue hashField, double value, CommandFlags flags = CommandFlags.None); - /// - /// Removes the specified fields from the hash stored at key. Non-existing fields are ignored. Non-existing keys are treated as empty hashes and this command returns 0. - /// - /// The key of the hash. - /// The field in the hash to delete. - /// The flags to use for this operation. - /// The number of fields that were removed. - /// https://redis.io/commands/hdel + /// Task HashDeleteAsync(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None); - /// - /// Removes the specified fields from the hash stored at key. Non-existing fields are ignored. Non-existing keys are treated as empty hashes and this command returns 0. - /// - /// The key of the hash. - /// The fields in the hash to delete. - /// The flags to use for this operation. - /// The number of fields that were removed. - /// https://redis.io/commands/hdel + /// Task HashDeleteAsync(RedisKey key, RedisValue[] hashFields, CommandFlags flags = CommandFlags.None); - /// - /// Returns if field is an existing field in the hash stored at key. - /// - /// The key of the hash. - /// The field in the hash to check. - /// The flags to use for this operation. - /// 1 if the hash contains field. 0 if the hash does not contain field, or key does not exist. - /// https://redis.io/commands/hexists + /// Task HashExistsAsync(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None); - /// - /// Returns the value associated with field in the hash stored at key. - /// - /// The key of the hash. - /// The field in the hash to get. - /// The flags to use for this operation. - /// The value associated with field, or nil when field is not present in the hash or key does not exist. - /// https://redis.io/commands/hget + /// + Task HashFieldGetAndDeleteAsync(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None); + + /// + Task?> HashFieldGetLeaseAndDeleteAsync(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None); + + /// + Task HashFieldGetAndDeleteAsync(RedisKey key, RedisValue[] hashFields, CommandFlags flags = CommandFlags.None); + + /// + Task HashFieldGetAndSetExpiryAsync(RedisKey key, RedisValue hashField, TimeSpan? expiry = null, bool persist = false, CommandFlags flags = CommandFlags.None); + + /// + Task HashFieldGetAndSetExpiryAsync(RedisKey key, RedisValue hashField, DateTime expiry, CommandFlags flags = CommandFlags.None); + + /// + Task?> HashFieldGetLeaseAndSetExpiryAsync(RedisKey key, RedisValue hashField, TimeSpan? expiry = null, bool persist = false, CommandFlags flags = CommandFlags.None); + + /// + Task?> HashFieldGetLeaseAndSetExpiryAsync(RedisKey key, RedisValue hashField, DateTime expiry, CommandFlags flags = CommandFlags.None); + + /// + Task HashFieldGetAndSetExpiryAsync(RedisKey key, RedisValue[] hashFields, TimeSpan? expiry = null, bool persist = false, CommandFlags flags = CommandFlags.None); + + /// + Task HashFieldGetAndSetExpiryAsync(RedisKey key, RedisValue[] hashFields, DateTime expiry, CommandFlags flags = CommandFlags.None); + + /// + Task HashFieldSetAndSetExpiryAsync(RedisKey key, RedisValue field, RedisValue value, TimeSpan? expiry = null, bool keepTtl = false, When when = When.Always, CommandFlags flags = CommandFlags.None); + + /// + Task HashFieldSetAndSetExpiryAsync(RedisKey key, RedisValue field, RedisValue value, DateTime expiry, When when = When.Always, CommandFlags flags = CommandFlags.None); + + /// + Task HashFieldSetAndSetExpiryAsync(RedisKey key, HashEntry[] hashFields, TimeSpan? expiry = null, bool keepTtl = false, When when = When.Always, CommandFlags flags = CommandFlags.None); + + /// + Task HashFieldSetAndSetExpiryAsync(RedisKey key, HashEntry[] hashFields, DateTime expiry, When when = When.Always, CommandFlags flags = CommandFlags.None); + + /// + Task HashFieldExpireAsync(RedisKey key, RedisValue[] hashFields, TimeSpan expiry, ExpireWhen when = ExpireWhen.Always, CommandFlags flags = CommandFlags.None); + + /// + Task HashFieldExpireAsync(RedisKey key, RedisValue[] hashFields, DateTime expiry, ExpireWhen when = ExpireWhen.Always, CommandFlags flags = CommandFlags.None); + + /// + Task HashFieldGetExpireDateTimeAsync(RedisKey key, RedisValue[] hashFields, CommandFlags flags = CommandFlags.None); + + /// + Task HashFieldPersistAsync(RedisKey key, RedisValue[] hashFields, CommandFlags flags = CommandFlags.None); + + /// + Task HashFieldGetTimeToLiveAsync(RedisKey key, RedisValue[] hashFields, CommandFlags flags = CommandFlags.None); + + /// Task HashGetAsync(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None); - /// - /// Returns the value associated with field in the hash stored at key. - /// - /// The key of the hash. - /// The field in the hash to get. - /// The flags to use for this operation. - /// The value associated with field, or nil when field is not present in the hash or key does not exist. - /// https://redis.io/commands/hget - Task> HashGetLeaseAsync(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None); + /// + Task?> HashGetLeaseAsync(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None); - /// - /// Returns the values associated with the specified fields in the hash stored at key. - /// For every field that does not exist in the hash, a nil value is returned.Because a non-existing keys are treated as empty hashes, running HMGET against a non-existing key will return a list of nil values. - /// - /// The key of the hash. - /// The fields in the hash to get. - /// The flags to use for this operation. - /// List of values associated with the given fields, in the same order as they are requested. - /// https://redis.io/commands/hmget + /// Task HashGetAsync(RedisKey key, RedisValue[] hashFields, CommandFlags flags = CommandFlags.None); - /// - /// Returns all fields and values of the hash stored at key. - /// - /// The key of the hash to get all entries from. - /// The flags to use for this operation. - /// List of fields and their values stored in the hash, or an empty list when key does not exist. - /// https://redis.io/commands/hgetall + /// Task HashGetAllAsync(RedisKey key, CommandFlags flags = CommandFlags.None); - /// - /// Increments the number stored at field in the hash stored at key by increment. If key does not exist, a new key holding a hash is created. If field does not exist the value is set to 0 before the operation is performed. - /// - /// The key of the hash. - /// The field in the hash to increment. - /// The amount to increment by. - /// The flags to use for this operation. - /// The value at field after the increment operation. - /// The range of values supported by HINCRBY is limited to 64 bit signed integers. - /// https://redis.io/commands/hincrby + /// Task HashIncrementAsync(RedisKey key, RedisValue hashField, long value = 1, CommandFlags flags = CommandFlags.None); - /// - /// Increment the specified field of an hash stored at key, and representing a floating point number, by the specified increment. If the field does not exist, it is set to 0 before performing the operation. - /// - /// The key of the hash. - /// The field in the hash to increment. - /// The amount to increment by. - /// The flags to use for this operation. - /// The value at field after the increment operation. - /// The precision of the output is fixed at 17 digits after the decimal point regardless of the actual internal precision of the computation. - /// https://redis.io/commands/hincrbyfloat + /// Task HashIncrementAsync(RedisKey key, RedisValue hashField, double value, CommandFlags flags = CommandFlags.None); - /// - /// Returns all field names in the hash stored at key. - /// - /// The key of the hash. - /// The flags to use for this operation. - /// List of fields in the hash, or an empty list when key does not exist. - /// https://redis.io/commands/hkeys + /// Task HashKeysAsync(RedisKey key, CommandFlags flags = CommandFlags.None); - /// - /// Returns the number of fields contained in the hash stored at key. - /// - /// The key of the hash. - /// The flags to use for this operation. - /// The number of fields in the hash, or 0 when key does not exist. - /// https://redis.io/commands/hlen + /// Task HashLengthAsync(RedisKey key, CommandFlags flags = CommandFlags.None); - /// - /// The HSCAN command is used to incrementally iterate over a hash; note: to resume an iteration via cursor, cast the original enumerable or enumerator to IScanningCursor. - /// - /// The key of the hash. - /// The pattern of keys to get entries for. - /// The page size to iterate by. - /// The cursor position to start at. - /// The page offset to start at. - /// The flags to use for this operation. - /// Yields all elements of the hash matching the pattern. - /// https://redis.io/commands/hscan - IAsyncEnumerable HashScanAsync(RedisKey key, RedisValue pattern = default(RedisValue), int pageSize = RedisBase.CursorUtils.DefaultLibraryPageSize, long cursor = RedisBase.CursorUtils.Origin, int pageOffset = 0, CommandFlags flags = CommandFlags.None); + /// + Task HashRandomFieldAsync(RedisKey key, CommandFlags flags = CommandFlags.None); - /// - /// Sets the specified fields to their respective values in the hash stored at key. This command overwrites any specified fields that already exist in the hash, leaving other unspecified fields untouched. If key does not exist, a new key holding a hash is created. - /// - /// The key of the hash. - /// The entries to set in the hash. - /// The flags to use for this operation. - /// https://redis.io/commands/hmset + /// + Task HashRandomFieldsAsync(RedisKey key, long count, CommandFlags flags = CommandFlags.None); + + /// + Task HashRandomFieldsWithValuesAsync(RedisKey key, long count, CommandFlags flags = CommandFlags.None); + + /// + IAsyncEnumerable HashScanAsync(RedisKey key, RedisValue pattern = default, int pageSize = RedisBase.CursorUtils.DefaultLibraryPageSize, long cursor = RedisBase.CursorUtils.Origin, int pageOffset = 0, CommandFlags flags = CommandFlags.None); + + /// + IAsyncEnumerable HashScanNoValuesAsync(RedisKey key, RedisValue pattern = default, int pageSize = RedisBase.CursorUtils.DefaultLibraryPageSize, long cursor = RedisBase.CursorUtils.Origin, int pageOffset = 0, CommandFlags flags = CommandFlags.None); + + /// Task HashSetAsync(RedisKey key, HashEntry[] hashFields, CommandFlags flags = CommandFlags.None); - /// - /// Sets field in the hash stored at key to value. If key does not exist, a new key holding a hash is created. If field already exists in the hash, it is overwritten. - /// - /// The key of the hash. - /// The field to set in the hash. - /// The value to set. - /// Which conditions under which to set the field value (defaults to always). - /// The flags to use for this operation. - /// 1 if field is a new field in the hash and value was set. 0 if field already exists in the hash and the value was updated. - /// https://redis.io/commands/hset - /// https://redis.io/commands/hsetnx + /// Task HashSetAsync(RedisKey key, RedisValue hashField, RedisValue value, When when = When.Always, CommandFlags flags = CommandFlags.None); - /// - /// Returns the string length of the value associated with field in the hash stored at key. - /// - /// The key of the hash. - /// The field containing the string - /// The flags to use for this operation. - /// the length of the string at field, or 0 when key does not exist. - /// https://redis.io/commands/hstrlen + /// Task HashStringLengthAsync(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None); - /// - /// Returns all values in the hash stored at key. - /// - /// The key of the hash. - /// The flags to use for this operation. - /// List of values in the hash, or an empty list when key does not exist. - /// https://redis.io/commands/hvals + /// Task HashValuesAsync(RedisKey key, CommandFlags flags = CommandFlags.None); - /// - /// Adds the element to the HyperLogLog data structure stored at the variable name specified as first argument. - /// - /// The key of the hyperloglog. - /// The value to add. - /// The flags to use for this operation. - /// True if at least 1 HyperLogLog internal register was altered, false otherwise. - /// https://redis.io/commands/pfadd + /// Task HyperLogLogAddAsync(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None); - /// - /// Adds all the element arguments to the HyperLogLog data structure stored at the variable name specified as first argument. - /// - /// The key of the hyperloglog. - /// The values to add. - /// The flags to use for this operation. - /// True if at least 1 HyperLogLog internal register was altered, false otherwise. - /// https://redis.io/commands/pfadd + /// Task HyperLogLogAddAsync(RedisKey key, RedisValue[] values, CommandFlags flags = CommandFlags.None); - /// - /// Returns the approximated cardinality computed by the HyperLogLog data structure stored at the specified variable, or 0 if the variable does not exist. - /// - /// The key of the hyperloglog. - /// The flags to use for this operation. - /// The approximated number of unique elements observed via HyperLogLogAdd. - /// https://redis.io/commands/pfcount + /// Task HyperLogLogLengthAsync(RedisKey key, CommandFlags flags = CommandFlags.None); - /// - /// Returns the approximated cardinality of the union of the HyperLogLogs passed, by internally merging the HyperLogLogs stored at the provided keys into a temporary hyperLogLog, or 0 if the variable does not exist. - /// - /// The keys of the hyperloglogs. - /// The flags to use for this operation. - /// The approximated number of unique elements observed via HyperLogLogAdd. - /// https://redis.io/commands/pfcount + /// Task HyperLogLogLengthAsync(RedisKey[] keys, CommandFlags flags = CommandFlags.None); - /// - /// Merge multiple HyperLogLog values into an unique value that will approximate the cardinality of the union of the observed Sets of the source HyperLogLog structures. - /// - /// The key of the merged hyperloglog. - /// The key of the first hyperloglog to merge. - /// The key of the first hyperloglog to merge. - /// The flags to use for this operation. - /// https://redis.io/commands/pfmerge + /// Task HyperLogLogMergeAsync(RedisKey destination, RedisKey first, RedisKey second, CommandFlags flags = CommandFlags.None); - /// - /// Merge multiple HyperLogLog values into an unique value that will approximate the cardinality of the union of the observed Sets of the source HyperLogLog structures. - /// - /// The key of the merged hyperloglog. - /// The keys of the hyperloglogs to merge. - /// The flags to use for this operation. - /// https://redis.io/commands/pfmerge + /// Task HyperLogLogMergeAsync(RedisKey destination, RedisKey[] sourceKeys, CommandFlags flags = CommandFlags.None); - /// - /// Indicate exactly which redis server we are talking to. - /// - /// The key to check. - /// The flags to use for this operation. - /// The endpoint serving the key. - Task IdentifyEndpointAsync(RedisKey key = default(RedisKey), CommandFlags flags = CommandFlags.None); + /// + Task IdentifyEndpointAsync(RedisKey key = default, CommandFlags flags = CommandFlags.None); - /// - /// Removes the specified key. A key is ignored if it does not exist. - /// If UNLINK is available (Redis 4.0+), it will be used. - /// - /// The key to delete. - /// The flags to use for this operation. - /// True if the key was removed. - /// https://redis.io/commands/del - /// https://redis.io/commands/unlink + /// + Task KeyCopyAsync(RedisKey sourceKey, RedisKey destinationKey, int destinationDatabase = -1, bool replace = false, CommandFlags flags = CommandFlags.None); + + /// Task KeyDeleteAsync(RedisKey key, CommandFlags flags = CommandFlags.None); - /// - /// Removes the specified keys. A key is ignored if it does not exist. - /// If UNLINK is available (Redis 4.0+), it will be used. - /// - /// The keys to delete. - /// The flags to use for this operation. - /// The number of keys that were removed. - /// https://redis.io/commands/del - /// https://redis.io/commands/unlink + /// Task KeyDeleteAsync(RedisKey[] keys, CommandFlags flags = CommandFlags.None); - /// - /// Serialize the value stored at key in a Redis-specific format and return it to the user. The returned value can be synthesized back into a Redis key using the RESTORE command. - /// - /// The key to dump. - /// The flags to use for this operation. - /// the serialized value. - /// https://redis.io/commands/dump - Task KeyDumpAsync(RedisKey key, CommandFlags flags = CommandFlags.None); + /// + Task KeyDumpAsync(RedisKey key, CommandFlags flags = CommandFlags.None); - /// - /// Returns if key exists. - /// - /// The key to check. - /// The flags to use for this operation. - /// 1 if the key exists. 0 if the key does not exist. - /// https://redis.io/commands/exists + /// + Task KeyEncodingAsync(RedisKey key, CommandFlags flags = CommandFlags.None); + + /// Task KeyExistsAsync(RedisKey key, CommandFlags flags = CommandFlags.None); - /// - /// Indicates how many of the supplied keys exists. - /// - /// The keys to check. - /// The flags to use for this operation. - /// The number of keys that existed. - /// https://redis.io/commands/exists + /// Task KeyExistsAsync(RedisKey[] keys, CommandFlags flags = CommandFlags.None); - /// - /// Set a timeout on key. After the timeout has expired, the key will automatically be deleted. A key with an associated timeout is said to be volatile in Redis terminology. - /// - /// The key to set the expiration for. - /// The timeout to set. - /// The flags to use for this operation. - /// 1 if the timeout was set. 0 if key does not exist or the timeout could not be set. - /// If key is updated before the timeout has expired, then the timeout is removed as if the PERSIST command was invoked on key. - /// For Redis versions < 2.1.3, existing timeouts cannot be overwritten. So, if key already has an associated timeout, it will do nothing and return 0. Since Redis 2.1.3, you can update the timeout of a key. It is also possible to remove the timeout using the PERSIST command. See the page on key expiry for more information. - /// https://redis.io/commands/expire - /// https://redis.io/commands/pexpire - /// https://redis.io/commands/persist - Task KeyExpireAsync(RedisKey key, TimeSpan? expiry, CommandFlags flags = CommandFlags.None); + /// + [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] + Task KeyExpireAsync(RedisKey key, TimeSpan? expiry, CommandFlags flags); - /// - /// Set a timeout on key. After the timeout has expired, the key will automatically be deleted. A key with an associated timeout is said to be volatile in Redis terminology. - /// - /// The key to set the expiration for. - /// The exact date to expiry to set. - /// The flags to use for this operation. - /// 1 if the timeout was set. 0 if key does not exist or the timeout could not be set. - /// If key is updated before the timeout has expired, then the timeout is removed as if the PERSIST command was invoked on key. - /// For Redis versions < 2.1.3, existing timeouts cannot be overwritten. So, if key already has an associated timeout, it will do nothing and return 0. Since Redis 2.1.3, you can update the timeout of a key. It is also possible to remove the timeout using the PERSIST command. See the page on key expiry for more information. - /// https://redis.io/commands/expireat - /// https://redis.io/commands/pexpireat - /// https://redis.io/commands/persist - Task KeyExpireAsync(RedisKey key, DateTime? expiry, CommandFlags flags = CommandFlags.None); + /// + Task KeyExpireAsync(RedisKey key, TimeSpan? expiry, ExpireWhen when = ExpireWhen.Always, CommandFlags flags = CommandFlags.None); - /// - /// Returns the time since the object stored at the specified key is idle (not requested by read or write operations) - /// - /// The key to get the time of. - /// The flags to use for this operation. - /// The time since the object stored at the specified key is idle - /// https://redis.io/commands/object + /// + [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] + Task KeyExpireAsync(RedisKey key, DateTime? expiry, CommandFlags flags); + + /// + Task KeyExpireAsync(RedisKey key, DateTime? expiry, ExpireWhen when = ExpireWhen.Always, CommandFlags flags = CommandFlags.None); + + /// + Task KeyExpireTimeAsync(RedisKey key, CommandFlags flags = CommandFlags.None); + + /// + Task KeyFrequencyAsync(RedisKey key, CommandFlags flags = CommandFlags.None); + + /// Task KeyIdleTimeAsync(RedisKey key, CommandFlags flags = CommandFlags.None); - /// - /// Move key from the currently selected database (see SELECT) to the specified destination database. When key already exists in the destination database, or it does not exist in the source database, it does nothing. It is possible to use MOVE as a locking primitive because of this. - /// - /// The key to move. - /// The database to move the key to. - /// The flags to use for this operation. - /// 1 if key was moved; 0 if key was not moved. - /// https://redis.io/commands/move + /// Task KeyMoveAsync(RedisKey key, int database, CommandFlags flags = CommandFlags.None); - /// - /// Remove the existing timeout on key, turning the key from volatile (a key with an expire set) to persistent (a key that will never expire as no timeout is associated). - /// - /// The key to persist. - /// The flags to use for this operation. - /// 1 if the timeout was removed. 0 if key does not exist or does not have an associated timeout. - /// https://redis.io/commands/persist + /// Task KeyPersistAsync(RedisKey key, CommandFlags flags = CommandFlags.None); - /// - /// Return a random key from the currently selected database. - /// - /// The flags to use for this operation. - /// The random key, or nil when the database is empty. - /// https://redis.io/commands/randomkey + /// Task KeyRandomAsync(CommandFlags flags = CommandFlags.None); - /// - /// Renames key to newkey. It returns an error when the source and destination names are the same, or when key does not exist. - /// - /// The key to rename. - /// The key to rename to. - /// What conditions to rename under (defaults to always). - /// The flags to use for this operation. - /// True if the key was renamed, false otherwise. - /// https://redis.io/commands/rename - /// https://redis.io/commands/renamenx + /// + Task KeyRefCountAsync(RedisKey key, CommandFlags flags = CommandFlags.None); + + /// Task KeyRenameAsync(RedisKey key, RedisKey newKey, When when = When.Always, CommandFlags flags = CommandFlags.None); - /// - /// Create a key associated with a value that is obtained by deserializing the provided serialized value (obtained via DUMP). - /// If ttl is 0 the key is created without any expire, otherwise the specified expire time(in milliseconds) is set. - /// - /// The key to restore. - /// The value of the key. - /// The expiry to set. - /// The flags to use for this operation. - /// https://redis.io/commands/restore + /// Task KeyRestoreAsync(RedisKey key, byte[] value, TimeSpan? expiry = null, CommandFlags flags = CommandFlags.None); - /// - /// Returns the remaining time to live of a key that has a timeout. This introspection capability allows a Redis client to check how many seconds a given key will continue to be part of the dataset. - /// - /// The key to check. - /// The flags to use for this operation. - /// TTL, or nil when key does not exist or does not have a timeout. - /// https://redis.io/commands/ttl + /// Task KeyTimeToLiveAsync(RedisKey key, CommandFlags flags = CommandFlags.None); - /// - /// Returns the string representation of the type of the value stored at key. The different types that can be returned are: string, list, set, zset and hash. - /// - /// The key to get the type of. - /// The flags to use for this operation. - /// Type of key, or none when key does not exist. - /// https://redis.io/commands/type + /// + Task KeyTouchAsync(RedisKey key, CommandFlags flags = CommandFlags.None); + + /// + Task KeyTouchAsync(RedisKey[] keys, CommandFlags flags = CommandFlags.None); + + /// Task KeyTypeAsync(RedisKey key, CommandFlags flags = CommandFlags.None); - /// - /// Returns the element at index index in the list stored at key. The index is zero-based, so 0 means the first element, 1 the second element and so on. Negative indices can be used to designate elements starting at the tail of the list. Here, -1 means the last element, -2 means the penultimate and so forth. - /// - /// The key of the list. - /// The index position to ge the value at. - /// The flags to use for this operation. - /// The requested element, or nil when index is out of range. - /// https://redis.io/commands/lindex + /// Task ListGetByIndexAsync(RedisKey key, long index, CommandFlags flags = CommandFlags.None); - /// - /// Inserts value in the list stored at key either before or after the reference value pivot. - /// When key does not exist, it is considered an empty list and no operation is performed. - /// - /// The key of the list. - /// The value to insert after. - /// The value to insert. - /// The flags to use for this operation. - /// The length of the list after the insert operation, or -1 when the value pivot was not found. - /// https://redis.io/commands/linsert + /// Task ListInsertAfterAsync(RedisKey key, RedisValue pivot, RedisValue value, CommandFlags flags = CommandFlags.None); - /// - /// Inserts value in the list stored at key either before or after the reference value pivot. - /// When key does not exist, it is considered an empty list and no operation is performed. - /// - /// The key of the list. - /// The value to insert before. - /// The value to insert. - /// The flags to use for this operation. - /// The length of the list after the insert operation, or -1 when the value pivot was not found. - /// https://redis.io/commands/linsert + /// Task ListInsertBeforeAsync(RedisKey key, RedisValue pivot, RedisValue value, CommandFlags flags = CommandFlags.None); - /// - /// Removes and returns the first element of the list stored at key. - /// - /// The key of the list. - /// The flags to use for this operation. - /// The value of the first element, or nil when key does not exist. - /// https://redis.io/commands/lpop + /// Task ListLeftPopAsync(RedisKey key, CommandFlags flags = CommandFlags.None); - /// - /// Insert the specified value at the head of the list stored at key. If key does not exist, it is created as empty list before performing the push operations. - /// - /// The key of the list. - /// The value to add to the head of the list. - /// Which conditions to add to the list under (defaults to always). - /// The flags to use for this operation. - /// The length of the list after the push operations. - /// https://redis.io/commands/lpush - /// https://redis.io/commands/lpushx + /// + Task ListLeftPopAsync(RedisKey key, long count, CommandFlags flags = CommandFlags.None); + + /// + Task ListLeftPopAsync(RedisKey[] keys, long count, CommandFlags flags = CommandFlags.None); + + /// + Task ListPositionAsync(RedisKey key, RedisValue element, long rank = 1, long maxLength = 0, CommandFlags flags = CommandFlags.None); + + /// + Task ListPositionsAsync(RedisKey key, RedisValue element, long count, long rank = 1, long maxLength = 0, CommandFlags flags = CommandFlags.None); + + /// Task ListLeftPushAsync(RedisKey key, RedisValue value, When when = When.Always, CommandFlags flags = CommandFlags.None); - /// - /// Insert the specified value at the head of the list stored at key. If key does not exist, it is created as empty list before performing the push operations. - /// - /// The key of the list. - /// The value to add to the head of the list. - /// Which conditions to add to the list under (defaults to always). - /// The flags to use for this operation. - /// The length of the list after the push operations. - /// https://redis.io/commands/lpush - /// https://redis.io/commands/lpushx + /// Task ListLeftPushAsync(RedisKey key, RedisValue[] values, When when = When.Always, CommandFlags flags = CommandFlags.None); - /// - /// Insert all the specified values at the head of the list stored at key. If key does not exist, it is created as empty list before performing the push operations. - /// Elements are inserted one after the other to the head of the list, from the leftmost element to the rightmost element. So for instance the command LPUSH mylist a b c will result into a list containing c as first element, b as second element and a as third element. - /// - /// The key of the list. - /// The values to add to the head of the list. - /// The flags to use for this operation. - /// The length of the list after the push operations. - /// https://redis.io/commands/lpush + /// Task ListLeftPushAsync(RedisKey key, RedisValue[] values, CommandFlags flags); - /// - /// Returns the length of the list stored at key. If key does not exist, it is interpreted as an empty list and 0 is returned. - /// - /// The key of the list. - /// The flags to use for this operation. - /// The length of the list at key. - /// https://redis.io/commands/llen + /// Task ListLengthAsync(RedisKey key, CommandFlags flags = CommandFlags.None); - /// - /// Returns the specified elements of the list stored at key. The offsets start and stop are zero-based indexes, with 0 being the first element of the list (the head of the list), 1 being the next element and so on. - /// These offsets can also be negative numbers indicating offsets starting at the end of the list.For example, -1 is the last element of the list, -2 the penultimate, and so on. - /// Note that if you have a list of numbers from 0 to 100, LRANGE list 0 10 will return 11 elements, that is, the rightmost item is included. - /// - /// The key of the list. - /// The start index of the list. - /// The stop index of the list. - /// The flags to use for this operation. - /// List of elements in the specified range. - /// https://redis.io/commands/lrange + /// + Task ListMoveAsync(RedisKey sourceKey, RedisKey destinationKey, ListSide sourceSide, ListSide destinationSide, CommandFlags flags = CommandFlags.None); + + /// Task ListRangeAsync(RedisKey key, long start = 0, long stop = -1, CommandFlags flags = CommandFlags.None); - /// - /// Removes the first count occurrences of elements equal to value from the list stored at key. The count argument influences the operation in the following ways: - /// count > 0: Remove elements equal to value moving from head to tail. - /// count < 0: Remove elements equal to value moving from tail to head. - /// count = 0: Remove all elements equal to value. - /// - /// The key of the list. - /// The value to remove from the list. - /// The count behavior (see method summary). - /// The flags to use for this operation. - /// The number of removed elements. - /// https://redis.io/commands/lrem + /// Task ListRemoveAsync(RedisKey key, RedisValue value, long count = 0, CommandFlags flags = CommandFlags.None); - /// - /// Removes and returns the last element of the list stored at key. - /// - /// The key of the list. - /// The flags to use for this operation. - /// The element being popped. - /// https://redis.io/commands/rpop + /// Task ListRightPopAsync(RedisKey key, CommandFlags flags = CommandFlags.None); - /// - /// Atomically returns and removes the last element (tail) of the list stored at source, and pushes the element at the first element (head) of the list stored at destination. - /// - /// The key of the source list. - /// The key of the destination list. - /// The flags to use for this operation. - /// The element being popped and pushed. - /// https://redis.io/commands/rpoplpush + /// + Task ListRightPopAsync(RedisKey key, long count, CommandFlags flags = CommandFlags.None); + + /// + Task ListRightPopAsync(RedisKey[] keys, long count, CommandFlags flags = CommandFlags.None); + + /// Task ListRightPopLeftPushAsync(RedisKey source, RedisKey destination, CommandFlags flags = CommandFlags.None); - /// - /// Insert the specified value at the tail of the list stored at key. If key does not exist, it is created as empty list before performing the push operation. - /// - /// The key of the list. - /// The value to add to the tail of the list. - /// Which conditions to add to the list under. - /// The flags to use for this operation. - /// The length of the list after the push operation. - /// https://redis.io/commands/rpush - /// https://redis.io/commands/rpushx + /// Task ListRightPushAsync(RedisKey key, RedisValue value, When when = When.Always, CommandFlags flags = CommandFlags.None); - /// - /// Insert the specified value at the tail of the list stored at key. If key does not exist, it is created as empty list before performing the push operation. - /// - /// The key of the list. - /// The values to add to the tail of the list. - /// Which conditions to add to the list under. - /// The flags to use for this operation. - /// The length of the list after the push operation. - /// https://redis.io/commands/rpush - /// https://redis.io/commands/rpushx + /// Task ListRightPushAsync(RedisKey key, RedisValue[] values, When when = When.Always, CommandFlags flags = CommandFlags.None); - /// - /// Insert all the specified values at the tail of the list stored at key. If key does not exist, it is created as empty list before performing the push operation. - /// Elements are inserted one after the other to the tail of the list, from the leftmost element to the rightmost element. So for instance the command RPUSH mylist a b c will result into a list containing a as first element, b as second element and c as third element. - /// - /// The key of the list. - /// The values to add to the tail of the list. - /// The flags to use for this operation. - /// The length of the list after the push operation. - /// https://redis.io/commands/rpush + /// Task ListRightPushAsync(RedisKey key, RedisValue[] values, CommandFlags flags); - /// - /// Sets the list element at index to value. For more information on the index argument, see ListGetByIndex. An error is returned for out of range indexes. - /// - /// The key of the list. - /// The index to set the value at. - /// The values to add to the list. - /// The flags to use for this operation. - /// https://redis.io/commands/lset + /// Task ListSetByIndexAsync(RedisKey key, long index, RedisValue value, CommandFlags flags = CommandFlags.None); - /// - /// Trim an existing list so that it will contain only the specified range of elements specified. Both start and stop are zero-based indexes, where 0 is the first element of the list (the head), 1 the next element and so on. - /// For example: LTRIM foobar 0 2 will modify the list stored at foobar so that only the first three elements of the list will remain. - /// start and end can also be negative numbers indicating offsets from the end of the list, where -1 is the last element of the list, -2 the penultimate element and so on. - /// - /// The key of the list. - /// The start index of the list to trim to. - /// The end index of the list to trim to. - /// The flags to use for this operation. - /// https://redis.io/commands/ltrim + /// Task ListTrimAsync(RedisKey key, long start, long stop, CommandFlags flags = CommandFlags.None); - /// - /// Extends a lock, if the token value is correct. - /// - /// The key of the lock. - /// The value to set at the key. - /// The expiration of the lock key. - /// The flags to use for this operation. - /// True if the lock was successfully extended. + /// Task LockExtendAsync(RedisKey key, RedisValue value, TimeSpan expiry, CommandFlags flags = CommandFlags.None); - /// - /// Queries the token held against a lock. - /// - /// The key of the lock. - /// The flags to use for this operation. - /// The current value of the lock, if any. + /// Task LockQueryAsync(RedisKey key, CommandFlags flags = CommandFlags.None); - /// - /// Releases a lock, if the token value is correct. - /// - /// The key of the lock. - /// The value at the key tht must match. - /// The flags to use for this operation. - /// True if the lock was successfully released, false otherwise. + /// Task LockReleaseAsync(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None); - /// - /// Takes a lock (specifying a token value) if it is not already taken. - /// - /// The key of the lock. - /// The value to set at the key. - /// The expiration of the lock key. - /// The flags to use for this operation. - /// True if the lock was successfully taken, false otherwise. + /// Task LockTakeAsync(RedisKey key, RedisValue value, TimeSpan expiry, CommandFlags flags = CommandFlags.None); - /// - /// Posts a message to the given channel. - /// - /// The channel to publish to. - /// The message to send. - /// The flags to use for this operation. - /// The number of clients that received the message. - /// https://redis.io/commands/publish + /// Task PublishAsync(RedisChannel channel, RedisValue message, CommandFlags flags = CommandFlags.None); - /// - /// Execute an arbitrary command against the server; this is primarily intended for - /// executing modules, but may also be used to provide access to new features that lack - /// a direct API. - /// - /// The command to run. - /// The arguments to pass for the command. - /// This API should be considered an advanced feature; inappropriate use can be harmful - /// A dynamic representation of the command's result + /// Task ExecuteAsync(string command, params object[] args); - /// - /// Execute an arbitrary command against the server; this is primarily intended for - /// executing modules, but may also be used to provide access to new features that lack - /// a direct API. - /// - /// The command to run. - /// The arguments to pass for the command. - /// The flags to use for this operation. - /// This API should be considered an advanced feature; inappropriate use can be harmful - /// A dynamic representation of the command's result - Task ExecuteAsync(string command, ICollection args, CommandFlags flags = CommandFlags.None); + /// + Task ExecuteAsync(string command, ICollection? args, CommandFlags flags = CommandFlags.None); - /// - /// Execute a Lua script against the server. - /// - /// The script to execute. - /// The keys to execute against. - /// The values to execute against. - /// The flags to use for this operation. - /// A dynamic representation of the script's result - /// https://redis.io/commands/eval - /// https://redis.io/commands/evalsha - Task ScriptEvaluateAsync(string script, RedisKey[] keys = null, RedisValue[] values = null, CommandFlags flags = CommandFlags.None); + /// + Task ScriptEvaluateAsync(string script, RedisKey[]? keys = null, RedisValue[]? values = null, CommandFlags flags = CommandFlags.None); - /// - /// Execute a Lua script against the server using just the SHA1 hash - /// - /// The hash of the script to execute. - /// The keys to execute against. - /// The values to execute against. - /// The flags to use for this operation. - /// A dynamic representation of the script's result - /// https://redis.io/commands/evalsha - Task ScriptEvaluateAsync(byte[] hash, RedisKey[] keys = null, RedisValue[] values = null, CommandFlags flags = CommandFlags.None); + /// + [EditorBrowsable(EditorBrowsableState.Never)] + Task ScriptEvaluateAsync(byte[] hash, RedisKey[]? keys = null, RedisValue[]? values = null, CommandFlags flags = CommandFlags.None); - /// - /// Execute a lua script against the server, using previously prepared script. - /// Named parameters, if any, are provided by the `parameters` object. - /// - /// The script to execute. - /// The parameters to pass to the script. - /// The flags to use for this operation. - /// A dynamic representation of the script's result - /// https://redis.io/commands/eval - Task ScriptEvaluateAsync(LuaScript script, object parameters = null, CommandFlags flags = CommandFlags.None); + /// + Task ScriptEvaluateAsync(LuaScript script, object? parameters = null, CommandFlags flags = CommandFlags.None); - /// - /// Execute a lua script against the server, using previously prepared and loaded script. - /// This method sends only the SHA1 hash of the lua script to Redis. - /// Named parameters, if any, are provided by the `parameters` object. - /// - /// The already-loaded script to execute. - /// The parameters to pass to the script. - /// The flags to use for this operation. - /// A dynamic representation of the script's result - /// https://redis.io/commands/eval - Task ScriptEvaluateAsync(LoadedLuaScript script, object parameters = null, CommandFlags flags = CommandFlags.None); + /// + Task ScriptEvaluateAsync(LoadedLuaScript script, object? parameters = null, CommandFlags flags = CommandFlags.None); - /// - /// Add the specified member to the set stored at key. - /// Specified members that are already a member of this set are ignored. - /// If key does not exist, a new set is created before adding the specified members. - /// - /// The key of the set. - /// The value to add to the set. - /// The flags to use for this operation. - /// True if the specified member was not already present in the set, else False - /// https://redis.io/commands/sadd + /// + Task ScriptEvaluateReadOnlyAsync(string script, RedisKey[]? keys = null, RedisValue[]? values = null, CommandFlags flags = CommandFlags.None); + + /// + Task ScriptEvaluateReadOnlyAsync(byte[] hash, RedisKey[]? keys = null, RedisValue[]? values = null, CommandFlags flags = CommandFlags.None); + + /// Task SetAddAsync(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None); - /// - /// Add the specified members to the set stored at key. - /// Specified members that are already a member of this set are ignored. - /// If key does not exist, a new set is created before adding the specified members. - /// - /// The key of the set. - /// The values to add to the set. - /// The flags to use for this operation. - /// The number of elements that were added to the set, not including all the elements already present into the set. - /// https://redis.io/commands/sadd + /// Task SetAddAsync(RedisKey key, RedisValue[] values, CommandFlags flags = CommandFlags.None); - /// - /// Returns the members of the set resulting from the specified operation against the given sets. - /// - /// The operation to perform. - /// The key of the first set. - /// The key of the second set. - /// The flags to use for this operation. - /// List with members of the resulting set. - /// https://redis.io/commands/sunion - /// https://redis.io/commands/sinter - /// https://redis.io/commands/sdiff + /// Task SetCombineAsync(SetOperation operation, RedisKey first, RedisKey second, CommandFlags flags = CommandFlags.None); - /// - /// Returns the members of the set resulting from the specified operation against the given sets. - /// - /// The operation to perform. - /// The keys of the sets to operate on. - /// The flags to use for this operation. - /// List with members of the resulting set. - /// https://redis.io/commands/sunion - /// https://redis.io/commands/sinter - /// https://redis.io/commands/sdiff + /// Task SetCombineAsync(SetOperation operation, RedisKey[] keys, CommandFlags flags = CommandFlags.None); - /// - /// This command is equal to SetCombine, but instead of returning the resulting set, it is stored in destination. If destination already exists, it is overwritten. - /// - /// The operation to perform. - /// The key of the destination set. - /// The key of the first set. - /// The key of the second set. - /// The flags to use for this operation. - /// The number of elements in the resulting set. - /// https://redis.io/commands/sunionstore - /// https://redis.io/commands/sinterstore - /// https://redis.io/commands/sdiffstore + /// Task SetCombineAndStoreAsync(SetOperation operation, RedisKey destination, RedisKey first, RedisKey second, CommandFlags flags = CommandFlags.None); - /// - /// This command is equal to SetCombine, but instead of returning the resulting set, it is stored in destination. If destination already exists, it is overwritten. - /// - /// The operation to perform. - /// The key of the destination set. - /// The keys of the sets to operate on. - /// The flags to use for this operation. - /// The number of elements in the resulting set. - /// https://redis.io/commands/sunionstore - /// https://redis.io/commands/sinterstore - /// https://redis.io/commands/sdiffstore + /// Task SetCombineAndStoreAsync(SetOperation operation, RedisKey destination, RedisKey[] keys, CommandFlags flags = CommandFlags.None); - /// - /// Returns if member is a member of the set stored at key. - /// - /// The key of the set. - /// The value to check for . - /// The flags to use for this operation. - /// 1 if the element is a member of the set. 0 if the element is not a member of the set, or if key does not exist. - /// https://redis.io/commands/sismember + /// Task SetContainsAsync(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None); - /// - /// Returns the set cardinality (number of elements) of the set stored at key. - /// - /// The key of the set. - /// The flags to use for this operation. - /// The cardinality (number of elements) of the set, or 0 if key does not exist. - /// https://redis.io/commands/scard + /// + Task SetContainsAsync(RedisKey key, RedisValue[] values, CommandFlags flags = CommandFlags.None); + + /// + Task SetIntersectionLengthAsync(RedisKey[] keys, long limit = 0, CommandFlags flags = CommandFlags.None); + + /// Task SetLengthAsync(RedisKey key, CommandFlags flags = CommandFlags.None); - /// - /// Returns all the members of the set value stored at key. - /// - /// The key of the set. - /// The flags to use for this operation. - /// All elements of the set. - /// https://redis.io/commands/smembers + /// Task SetMembersAsync(RedisKey key, CommandFlags flags = CommandFlags.None); - /// - /// Move member from the set at source to the set at destination. This operation is atomic. In every given moment the element will appear to be a member of source or destination for other clients. - /// When the specified element already exists in the destination set, it is only removed from the source set. - /// - /// The key of the source set. - /// The key of the destination set. - /// The value to move. - /// The flags to use for this operation. - /// 1 if the element is moved. 0 if the element is not a member of source and no operation was performed. - /// https://redis.io/commands/smove + /// Task SetMoveAsync(RedisKey source, RedisKey destination, RedisValue value, CommandFlags flags = CommandFlags.None); - /// - /// Removes and returns a random element from the set value stored at key. - /// - /// The key of the set. - /// The flags to use for this operation. - /// The removed element, or nil when key does not exist. - /// https://redis.io/commands/spop + /// Task SetPopAsync(RedisKey key, CommandFlags flags = CommandFlags.None); - /// - /// Removes and returns the specified number of random elements from the set value stored at key. - /// - /// The key of the set. - /// The number of elements to return. - /// The flags to use for this operation. - /// An array of elements, or an empty array when key does not exist. - /// https://redis.io/commands/spop + /// Task SetPopAsync(RedisKey key, long count, CommandFlags flags = CommandFlags.None); - /// - /// Return a random element from the set value stored at key. - /// - /// The key of the set. - /// The flags to use for this operation. - /// The randomly selected element, or nil when key does not exist - /// https://redis.io/commands/srandmember + /// Task SetRandomMemberAsync(RedisKey key, CommandFlags flags = CommandFlags.None); - /// - /// Return an array of count distinct elements if count is positive. If called with a negative count the behavior changes and the command is allowed to return the same element multiple times. - /// In this case the number of returned elements is the absolute value of the specified count. - /// - /// The key of the set. - /// The count of members to get. - /// The flags to use for this operation. - /// An array of elements, or an empty array when key does not exist - /// https://redis.io/commands/srandmember + /// Task SetRandomMembersAsync(RedisKey key, long count, CommandFlags flags = CommandFlags.None); - /// - /// Remove the specified member from the set stored at key. Specified members that are not a member of this set are ignored. - /// - /// The key of the set. - /// The value to remove. - /// The flags to use for this operation. - /// True if the specified member was already present in the set, else False - /// https://redis.io/commands/srem + /// Task SetRemoveAsync(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None); - /// - /// Remove the specified members from the set stored at key. Specified members that are not a member of this set are ignored. - /// - /// The key of the set. - /// The values to remove. - /// The flags to use for this operation. - /// The number of members that were removed from the set, not including non existing members. - /// https://redis.io/commands/srem + /// Task SetRemoveAsync(RedisKey key, RedisValue[] values, CommandFlags flags = CommandFlags.None); - /// - /// Sorts a list, set or sorted set (numerically or alphabetically, ascending by default); By default, the elements themselves are compared, but the values can also be - /// used to perform external key-lookups using the by parameter. By default, the elements themselves are returned, but external key-lookups (one or many) can - /// be performed instead by specifying the get parameter (note that # specifies the element itself, when used in get). - /// Referring to the redis SORT documentation for examples is recommended. When used in hashes, by and get - /// can be used to specify fields using -> notation (again, refer to redis documentation). - /// - /// The key of the list, set, or sorted set. - /// How many entries to skip on the return. - /// How many entries to take on the return. - /// The ascending or descending order (defaults to ascending). - /// The sorting method (defaults to numeric). - /// The key pattern to sort by, if any. e.g. ExternalKey_* would sort by ExternalKey_{listvalue} as a lookup. - /// The key pattern to sort by, if any e.g. ExternalKey_* would return the value of ExternalKey_{listvalue} for each entry. - /// The flags to use for this operation. - /// The sorted elements, or the external values if get is specified. - /// https://redis.io/commands/sort - Task SortAsync(RedisKey key, long skip = 0, long take = -1, Order order = Order.Ascending, SortType sortType = SortType.Numeric, RedisValue by = default(RedisValue), RedisValue[] get = null, CommandFlags flags = CommandFlags.None); + /// + IAsyncEnumerable SetScanAsync(RedisKey key, RedisValue pattern = default, int pageSize = RedisBase.CursorUtils.DefaultLibraryPageSize, long cursor = RedisBase.CursorUtils.Origin, int pageOffset = 0, CommandFlags flags = CommandFlags.None); - /// - /// Sorts a list, set or sorted set (numerically or alphabetically, ascending by default); By default, the elements themselves are compared, but the values can also be - /// used to perform external key-lookups using the by parameter. By default, the elements themselves are returned, but external key-lookups (one or many) can - /// be performed instead by specifying the get parameter (note that # specifies the element itself, when used in get). - /// Referring to the redis SORT documentation for examples is recommended. When used in hashes, by and get - /// can be used to specify fields using -> notation (again, refer to redis documentation). - /// - /// The destination key to store results in. - /// The key of the list, set, or sorted set. - /// How many entries to skip on the return. - /// How many entries to take on the return. - /// The ascending or descending order (defaults to ascending). - /// The sorting method (defaults to numeric). - /// The key pattern to sort by, if any. e.g. ExternalKey_* would sort by ExternalKey_{listvalue} as a lookup. - /// The key pattern to sort by, if any e.g. ExternalKey_* would return the value of ExternalKey_{listvalue} for each entry. - /// The flags to use for this operation. - /// The number of elements stored in the new list. - /// https://redis.io/commands/sort - Task SortAndStoreAsync(RedisKey destination, RedisKey key, long skip = 0, long take = -1, Order order = Order.Ascending, SortType sortType = SortType.Numeric, RedisValue by = default(RedisValue), RedisValue[] get = null, CommandFlags flags = CommandFlags.None); + /// + Task SortAsync(RedisKey key, long skip = 0, long take = -1, Order order = Order.Ascending, SortType sortType = SortType.Numeric, RedisValue by = default, RedisValue[]? get = null, CommandFlags flags = CommandFlags.None); - /// - /// Adds the specified member with the specified score to the sorted set stored at key. If the specified member is already a member of the sorted set, the score is updated and the element reinserted at the right position to ensure the correct ordering. - /// - /// The key of the sorted set. - /// The member to add to the sorted set. - /// The score for the member to add to the sorted set. - /// The flags to use for this operation. - /// True if the value was added, False if it already existed (the score is still updated) - /// https://redis.io/commands/zadd + /// + Task SortAndStoreAsync(RedisKey destination, RedisKey key, long skip = 0, long take = -1, Order order = Order.Ascending, SortType sortType = SortType.Numeric, RedisValue by = default, RedisValue[]? get = null, CommandFlags flags = CommandFlags.None); + + /// + [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] Task SortedSetAddAsync(RedisKey key, RedisValue member, double score, CommandFlags flags); - /// - /// Adds the specified member with the specified score to the sorted set stored at key. If the specified member is already a member of the sorted set, the score is updated and the element reinserted at the right position to ensure the correct ordering. - /// - /// The key of the sorted set. - /// The member to add to the sorted set. - /// The score for the member to add to the sorted set. - /// What conditions to add the element under (defaults to always). - /// The flags to use for this operation. - /// True if the value was added, False if it already existed (the score is still updated) - /// https://redis.io/commands/zadd - Task SortedSetAddAsync(RedisKey key, RedisValue member, double score, When when = When.Always, CommandFlags flags = CommandFlags.None); + /// + [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] + Task SortedSetAddAsync(RedisKey key, RedisValue member, double score, When when, CommandFlags flags = CommandFlags.None); - /// - /// Adds all the specified members with the specified scores to the sorted set stored at key. If a specified member is already a member of the sorted set, the score is updated and the element reinserted at the right position to ensure the correct ordering. - /// - /// The key of the sorted set. - /// The members and values to add to the sorted set. - /// The flags to use for this operation. - /// The number of elements added to the sorted sets, not including elements already existing for which the score was updated. - /// https://redis.io/commands/zadd + /// + Task SortedSetAddAsync(RedisKey key, RedisValue member, double score, SortedSetWhen when = SortedSetWhen.Always, CommandFlags flags = CommandFlags.None); + + /// + [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] Task SortedSetAddAsync(RedisKey key, SortedSetEntry[] values, CommandFlags flags); - /// - /// Adds all the specified members with the specified scores to the sorted set stored at key. If a specified member is already a member of the sorted set, the score is updated and the element reinserted at the right position to ensure the correct ordering. - /// - /// The key of the sorted set. - /// The members and values to add to the sorted set. - /// What conditions to add the element under (defaults to always). - /// The flags to use for this operation. - /// The number of elements added to the sorted sets, not including elements already existing for which the score was updated. - /// https://redis.io/commands/zadd - Task SortedSetAddAsync(RedisKey key, SortedSetEntry[] values, When when = When.Always, CommandFlags flags = CommandFlags.None); + /// + [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] + Task SortedSetAddAsync(RedisKey key, SortedSetEntry[] values, When when, CommandFlags flags = CommandFlags.None); - /// - /// Computes a set operation over two sorted sets, and stores the result in destination, optionally performing - /// a specific aggregation (defaults to sum). - /// - /// The operation to perform. - /// The key to store the results in. - /// The key of the first sorted set. - /// The key of the second sorted set. - /// The aggregation method (defaults to sum). - /// The flags to use for this operation. - /// https://redis.io/commands/zunionstore - /// https://redis.io/commands/zinterstore - /// the number of elements in the resulting sorted set at destination - Task SortedSetCombineAndStoreAsync(SetOperation operation, RedisKey destination, RedisKey first, RedisKey second, Aggregate aggregate = Aggregate.Sum, CommandFlags flags = CommandFlags.None); + /// + Task SortedSetAddAsync(RedisKey key, SortedSetEntry[] values, SortedSetWhen when = SortedSetWhen.Always, CommandFlags flags = CommandFlags.None); - /// - /// Computes a set operation over multiple sorted sets (optionally using per-set weights), and stores the result in destination, optionally performing - /// a specific aggregation (defaults to sum). - /// - /// The operation to perform. - /// The key to store the results in. - /// The keys of the sorted sets. - /// The optional weights per set that correspond to . - /// The aggregation method (defaults to sum). - /// The flags to use for this operation. - /// https://redis.io/commands/zunionstore - /// https://redis.io/commands/zinterstore - /// the number of elements in the resulting sorted set at destination - Task SortedSetCombineAndStoreAsync(SetOperation operation, RedisKey destination, RedisKey[] keys, double[] weights = null, Aggregate aggregate = Aggregate.Sum, CommandFlags flags = CommandFlags.None); + /// + Task SortedSetCombineAsync(SetOperation operation, RedisKey[] keys, double[]? weights = null, Aggregate aggregate = Aggregate.Sum, CommandFlags flags = CommandFlags.None); - /// - /// Decrements the score of member in the sorted set stored at key by decrement. If member does not exist in the sorted set, it is added with -decrement as its score (as if its previous score was 0.0). - /// - /// The key of the sorted set. - /// The member to decrement. - /// The amount to decrement by. - /// The flags to use for this operation. - /// The new score of member. - /// https://redis.io/commands/zincrby + /// + Task SortedSetCombineWithScoresAsync(SetOperation operation, RedisKey[] keys, double[]? weights = null, Aggregate aggregate = Aggregate.Sum, CommandFlags flags = CommandFlags.None); + + /// + Task SortedSetCombineAndStoreAsync(SetOperation operation, RedisKey destination, RedisKey first, RedisKey second, Aggregate aggregate = Aggregate.Sum, CommandFlags flags = CommandFlags.None); + + /// + Task SortedSetCombineAndStoreAsync(SetOperation operation, RedisKey destination, RedisKey[] keys, double[]? weights = null, Aggregate aggregate = Aggregate.Sum, CommandFlags flags = CommandFlags.None); + + /// Task SortedSetDecrementAsync(RedisKey key, RedisValue member, double value, CommandFlags flags = CommandFlags.None); - /// - /// Increments the score of member in the sorted set stored at key by increment. If member does not exist in the sorted set, it is added with increment as its score (as if its previous score was 0.0). - /// - /// The key of the sorted set. - /// The member to increment. - /// The amount to increment by. - /// The flags to use for this operation. - /// The new score of member. - /// https://redis.io/commands/zincrby + /// Task SortedSetIncrementAsync(RedisKey key, RedisValue member, double value, CommandFlags flags = CommandFlags.None); - /// - /// Returns the sorted set cardinality (number of elements) of the sorted set stored at key. - /// - /// The key of the sorted set. - /// The min score to filter by (defaults to negative infinity). - /// The max score to filter by (defaults to positive infinity). - /// Whether to exclude and from the range check (defaults to both inclusive). - /// The flags to use for this operation. - /// The cardinality (number of elements) of the sorted set, or 0 if key does not exist. - /// https://redis.io/commands/zcard + /// + Task SortedSetIntersectionLengthAsync(RedisKey[] keys, long limit = 0, CommandFlags flags = CommandFlags.None); + + /// Task SortedSetLengthAsync(RedisKey key, double min = double.NegativeInfinity, double max = double.PositiveInfinity, Exclude exclude = Exclude.None, CommandFlags flags = CommandFlags.None); - /// - /// When all the elements in a sorted set are inserted with the same score, in order to force lexicographical ordering, this command returns the number of elements in the sorted set at key with a value between min and max. - /// - /// The key of the sorted set. - /// The min value to filter by. - /// The max value to filter by. - /// Whether to exclude and from the range check (defaults to both inclusive). - /// The flags to use for this operation. - /// The number of elements in the specified score range. - /// https://redis.io/commands/zlexcount + /// Task SortedSetLengthByValueAsync(RedisKey key, RedisValue min, RedisValue max, Exclude exclude = Exclude.None, CommandFlags flags = CommandFlags.None); - /// - /// Returns the specified range of elements in the sorted set stored at key. By default the elements are considered to be ordered from the lowest to the highest score. Lexicographical order is used for elements with equal score. - /// Both start and stop are zero-based indexes, where 0 is the first element, 1 is the next element and so on. They can also be negative numbers indicating offsets from the end of the sorted set, with -1 being the last element of the sorted set, -2 the penultimate element and so on. - /// - /// The key of the sorted set. - /// The start index to get. - /// The stop index to get. - /// The order to sort by (defaults to ascending). - /// The flags to use for this operation. - /// List of elements in the specified range. - /// https://redis.io/commands/zrange - /// https://redis.io/commands/zrevrange + /// + Task SortedSetRandomMemberAsync(RedisKey key, CommandFlags flags = CommandFlags.None); + + /// + Task SortedSetRandomMembersAsync(RedisKey key, long count, CommandFlags flags = CommandFlags.None); + + /// + Task SortedSetRandomMembersWithScoresAsync(RedisKey key, long count, CommandFlags flags = CommandFlags.None); + + /// Task SortedSetRangeByRankAsync(RedisKey key, long start = 0, long stop = -1, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None); - /// - /// Returns the specified range of elements in the sorted set stored at key. By default the elements are considered to be ordered from the lowest to the highest score. Lexicographical order is used for elements with equal score. - /// Both start and stop are zero-based indexes, where 0 is the first element, 1 is the next element and so on. They can also be negative numbers indicating offsets from the end of the sorted set, with -1 being the last element of the sorted set, -2 the penultimate element and so on. - /// - /// The key of the sorted set. - /// The start index to get. - /// The stop index to get. - /// The order to sort by (defaults to ascending). - /// The flags to use for this operation. - /// List of elements in the specified range. - /// https://redis.io/commands/zrange - /// https://redis.io/commands/zrevrange + /// + Task SortedSetRangeAndStoreAsync( + RedisKey sourceKey, + RedisKey destinationKey, + RedisValue start, + RedisValue stop, + SortedSetOrder sortedSetOrder = SortedSetOrder.ByRank, + Exclude exclude = Exclude.None, + Order order = Order.Ascending, + long skip = 0, + long? take = null, + CommandFlags flags = CommandFlags.None); + + /// Task SortedSetRangeByRankWithScoresAsync(RedisKey key, long start = 0, long stop = -1, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None); - /// - /// Returns the specified range of elements in the sorted set stored at key. By default the elements are considered to be ordered from the lowest to the highest score. Lexicographical order is used for elements with equal score. - /// Start and stop are used to specify the min and max range for score values. Similar to other range methods the values are inclusive. - /// - /// The key of the sorted set. - /// The minimum score to filter by. - /// The maximum score to filter by. - /// Which of and to exclude (defaults to both inclusive). - /// The order to sort by (defaults to ascending). - /// How many items to skip. - /// How many items to take. - /// The flags to use for this operation. - /// List of elements in the specified score range. - /// https://redis.io/commands/zrangebyscore - /// https://redis.io/commands/zrevrangebyscore - Task SortedSetRangeByScoreAsync(RedisKey key, + /// + Task SortedSetRangeByScoreAsync( + RedisKey key, double start = double.NegativeInfinity, double stop = double.PositiveInfinity, Exclude exclude = Exclude.None, @@ -1265,22 +551,9 @@ Task SortedSetRangeByScoreAsync(RedisKey key, long take = -1, CommandFlags flags = CommandFlags.None); - /// - /// Returns the specified range of elements in the sorted set stored at key. By default the elements are considered to be ordered from the lowest to the highest score. Lexicographical order is used for elements with equal score. - /// Start and stop are used to specify the min and max range for score values. Similar to other range methods the values are inclusive. - /// - /// The key of the sorted set. - /// The minimum score to filter by. - /// The maximum score to filter by. - /// Which of and to exclude (defaults to both inclusive). - /// The order to sort by (defaults to ascending). - /// How many items to skip. - /// How many items to take. - /// The flags to use for this operation. - /// List of elements in the specified score range. - /// https://redis.io/commands/zrangebyscore - /// https://redis.io/commands/zrevrangebyscore - Task SortedSetRangeByScoreWithScoresAsync(RedisKey key, + /// + Task SortedSetRangeByScoreWithScoresAsync( + RedisKey key, double start = double.NegativeInfinity, double stop = double.PositiveInfinity, Exclude exclude = Exclude.None, @@ -1289,19 +562,9 @@ Task SortedSetRangeByScoreWithScoresAsync(RedisKey key, long take = -1, CommandFlags flags = CommandFlags.None); - /// - /// When all the elements in a sorted set are inserted with the same score, in order to force lexicographical ordering, this command returns all the elements in the sorted set at key with a value between min and max. - /// - /// The key of the sorted set. - /// The min value to filter by. - /// The max value to filter by. - /// Which of and to exclude (defaults to both inclusive). - /// How many items to skip. - /// How many items to take. - /// The flags to use for this operation. - /// https://redis.io/commands/zrangebylex - /// list of elements in the specified score range. - Task SortedSetRangeByValueAsync(RedisKey key, + /// + Task SortedSetRangeByValueAsync( + RedisKey key, RedisValue min, RedisValue max, Exclude exclude, @@ -1309,708 +572,310 @@ Task SortedSetRangeByValueAsync(RedisKey key, long take = -1, CommandFlags flags = CommandFlags.None); // defaults removed to avoid ambiguity with overload with order - /// - /// When all the elements in a sorted set are inserted with the same score, in order to force lexicographical ordering, this command returns all the elements in the sorted set at key with a value between min and max. - /// - /// The key of the sorted set. - /// The min value to filter by. - /// The max value to filter by. - /// Which of and to exclude (defaults to both inclusive). - /// Whether to order the data ascending or descending - /// How many items to skip. - /// How many items to take. - /// The flags to use for this operation. - /// https://redis.io/commands/zrangebylex - /// https://redis.io/commands/zrevrangebylex - /// list of elements in the specified score range. - Task SortedSetRangeByValueAsync(RedisKey key, - RedisValue min = default(RedisValue), - RedisValue max = default(RedisValue), + /// + Task SortedSetRangeByValueAsync( + RedisKey key, + RedisValue min = default, + RedisValue max = default, Exclude exclude = Exclude.None, Order order = Order.Ascending, long skip = 0, long take = -1, CommandFlags flags = CommandFlags.None); - /// - /// Returns the rank of member in the sorted set stored at key, by default with the scores ordered from low to high. The rank (or index) is 0-based, which means that the member with the lowest score has rank 0. - /// - /// The key of the sorted set. - /// The member to get the rank of. - /// The order to sort by (defaults to ascending). - /// The flags to use for this operation. - /// If member exists in the sorted set, the rank of member; If member does not exist in the sorted set or key does not exist, null - /// https://redis.io/commands/zrank - /// https://redis.io/commands/zrevrank + /// Task SortedSetRankAsync(RedisKey key, RedisValue member, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None); - /// - /// Removes the specified member from the sorted set stored at key. Non existing members are ignored. - /// - /// The key of the sorted set. - /// The member to remove. - /// The flags to use for this operation. - /// True if the member existed in the sorted set and was removed; False otherwise. - /// https://redis.io/commands/zrem + /// Task SortedSetRemoveAsync(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None); - /// - /// Removes the specified members from the sorted set stored at key. Non existing members are ignored. - /// - /// The key of the sorted set. - /// The members to remove. - /// The flags to use for this operation. - /// The number of members removed from the sorted set, not including non existing members. - /// https://redis.io/commands/zrem + /// Task SortedSetRemoveAsync(RedisKey key, RedisValue[] members, CommandFlags flags = CommandFlags.None); - /// - /// Removes all elements in the sorted set stored at key with rank between start and stop. Both start and stop are 0 -based indexes with 0 being the element with the lowest score. These indexes can be negative numbers, where they indicate offsets starting at the element with the highest score. For example: -1 is the element with the highest score, -2 the element with the second highest score and so forth. - /// - /// The key of the sorted set. - /// The minimum rank to remove. - /// The maximum rank to remove. - /// The flags to use for this operation. - /// The number of elements removed. - /// https://redis.io/commands/zremrangebyrank + /// Task SortedSetRemoveRangeByRankAsync(RedisKey key, long start, long stop, CommandFlags flags = CommandFlags.None); - /// - /// Removes all elements in the sorted set stored at key with a score between min and max (inclusive by default). - /// - /// The key of the sorted set. - /// The minimum score to remove. - /// The maximum score to remove. - /// Which of and to exclude (defaults to both inclusive). - /// The flags to use for this operation. - /// The number of elements removed. - /// https://redis.io/commands/zremrangebyscore + /// Task SortedSetRemoveRangeByScoreAsync(RedisKey key, double start, double stop, Exclude exclude = Exclude.None, CommandFlags flags = CommandFlags.None); - /// - /// When all the elements in a sorted set are inserted with the same score, in order to force lexicographical ordering, this command removes all elements in the sorted set stored at key between the lexicographical range specified by min and max. - /// - /// The key of the sorted set. - /// The minimum value to remove. - /// The maximum value to remove. - /// Which of and to exclude (defaults to both inclusive). - /// The flags to use for this operation. - /// the number of elements removed. - /// https://redis.io/commands/zremrangebylex + /// Task SortedSetRemoveRangeByValueAsync(RedisKey key, RedisValue min, RedisValue max, Exclude exclude = Exclude.None, CommandFlags flags = CommandFlags.None); - /// - /// The SSCAN command is used to incrementally iterate over set; note: to resume an iteration via cursor, cast the original enumerable or enumerator to IScanningCursor. - /// - /// The key of the set. - /// The pattern to match. - /// The page size to iterate by. - /// The cursor position to start at. - /// The page offset to start at. - /// The flags to use for this operation. - /// Yields all matching elements of the set. - /// https://redis.io/commands/sscan - IAsyncEnumerable SetScanAsync(RedisKey key, RedisValue pattern = default(RedisValue), int pageSize = RedisBase.CursorUtils.DefaultLibraryPageSize, long cursor = RedisBase.CursorUtils.Origin, int pageOffset = 0, CommandFlags flags = CommandFlags.None); - - /// - /// The ZSCAN command is used to incrementally iterate over a sorted set - /// - /// The key of the sorted set. - /// The pattern to match. - /// The page size to iterate by. - /// The flags to use for this operation. - /// The cursor position to start at. - /// The page offset to start at. - /// Yields all matching elements of the sorted set. - /// https://redis.io/commands/zscan - IAsyncEnumerable SortedSetScanAsync(RedisKey key, RedisValue pattern = default(RedisValue), int pageSize = RedisBase.CursorUtils.DefaultLibraryPageSize, long cursor = RedisBase.CursorUtils.Origin, int pageOffset = 0, CommandFlags flags = CommandFlags.None); + /// + IAsyncEnumerable SortedSetScanAsync( + RedisKey key, + RedisValue pattern = default, + int pageSize = RedisBase.CursorUtils.DefaultLibraryPageSize, + long cursor = RedisBase.CursorUtils.Origin, + int pageOffset = 0, + CommandFlags flags = CommandFlags.None); - /// - /// Returns the score of member in the sorted set at key; If member does not exist in the sorted set, or key does not exist, nil is returned. - /// - /// The key of the sorted set. - /// The member to get a score for. - /// The flags to use for this operation. - /// The score of the member. - /// https://redis.io/commands/zscore + /// Task SortedSetScoreAsync(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None); - /// - /// Removes and returns the first element from the sorted set stored at key, by default with the scores ordered from low to high. - /// - /// The key of the sorted set. - /// The order to sort by (defaults to ascending). - /// The flags to use for this operation. - /// The removed element, or nil when key does not exist. - /// https://redis.io/commands/zpopmin - /// https://redis.io/commands/zpopmax + /// + Task SortedSetScoresAsync(RedisKey key, RedisValue[] members, CommandFlags flags = CommandFlags.None); + + /// + Task SortedSetUpdateAsync(RedisKey key, RedisValue member, double score, SortedSetWhen when = SortedSetWhen.Always, CommandFlags flags = CommandFlags.None); + + /// + Task SortedSetUpdateAsync(RedisKey key, SortedSetEntry[] values, SortedSetWhen when = SortedSetWhen.Always, CommandFlags flags = CommandFlags.None); + + /// Task SortedSetPopAsync(RedisKey key, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None); - /// - /// Removes and returns the specified number of first elements from the sorted set stored at key, by default with the scores ordered from low to high. - /// - /// The key of the sorted set. - /// The number of elements to return. - /// The order to sort by (defaults to ascending). - /// The flags to use for this operation. - /// An array of elements, or an empty array when key does not exist. - /// https://redis.io/commands/zpopmin - /// https://redis.io/commands/zpopmax + /// Task SortedSetPopAsync(RedisKey key, long count, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None); - /// - /// Allow the consumer to mark a pending message as correctly processed. Returns the number of messages acknowledged. - /// - /// The key of the stream. - /// The name of the consumer group that received the message. - /// The ID of the message to acknowledge. - /// The flags to use for this operation. - /// The number of messages acknowledged. - /// https://redis.io/topics/streams-intro + /// + Task SortedSetPopAsync(RedisKey[] keys, long count, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None); + + /// Task StreamAcknowledgeAsync(RedisKey key, RedisValue groupName, RedisValue messageId, CommandFlags flags = CommandFlags.None); - /// - /// Allow the consumer to mark a pending message as correctly processed. Returns the number of messages acknowledged. - /// - /// The key of the stream. - /// The name of the consumer group that received the message. - /// The IDs of the messages to acknowledge. - /// The flags to use for this operation. - /// The number of messages acknowledged. - /// https://redis.io/topics/streams-intro + /// Task StreamAcknowledgeAsync(RedisKey key, RedisValue groupName, RedisValue[] messageIds, CommandFlags flags = CommandFlags.None); - /// - /// Adds an entry using the specified values to the given stream key. If key does not exist, a new key holding a stream is created. The command returns the ID of the newly created stream entry. - /// - /// The key of the stream. - /// The field name for the stream entry. - /// The value to set in the stream entry. - /// The ID to assign to the stream entry, defaults to an auto-generated ID ("*"). - /// The maximum length of the stream. - /// If true, the "~" argument is used to allow the stream to exceed max length by a small number. This improves performance when removing messages. - /// The flags to use for this operation. - /// The ID of the newly created message. - /// https://redis.io/commands/xadd - Task StreamAddAsync(RedisKey key, RedisValue streamField, RedisValue streamValue, RedisValue? messageId = null, int? maxLength = null, bool useApproximateMaxLength = false, CommandFlags flags = CommandFlags.None); +#pragma warning disable RS0026 // similar overloads + /// + Task StreamAcknowledgeAndDeleteAsync(RedisKey key, RedisValue groupName, StreamTrimMode mode, RedisValue messageId, CommandFlags flags = CommandFlags.None); - /// - /// Adds an entry using the specified values to the given stream key. If key does not exist, a new key holding a stream is created. The command returns the ID of the newly created stream entry. - /// - /// The key of the stream. - /// The fields and their associated values to set in the stream entry. - /// The ID to assign to the stream entry, defaults to an auto-generated ID ("*"). - /// The maximum length of the stream. - /// If true, the "~" argument is used to allow the stream to exceed max length by a small number. This improves performance when removing messages. - /// The flags to use for this operation. - /// The ID of the newly created message. - /// https://redis.io/commands/xadd - Task StreamAddAsync(RedisKey key, NameValueEntry[] streamPairs, RedisValue? messageId = null, int? maxLength = null, bool useApproximateMaxLength = false, CommandFlags flags = CommandFlags.None); + /// + Task StreamAcknowledgeAndDeleteAsync(RedisKey key, RedisValue groupName, StreamTrimMode mode, RedisValue[] messageIds, CommandFlags flags = CommandFlags.None); +#pragma warning restore RS0026 - /// - /// Change ownership of messages consumed, but not yet acknowledged, by a different consumer. This method returns the complete message for the claimed message(s). - /// - /// The key of the stream. - /// The consumer group. - /// The consumer claiming the given messages. - /// The minimum message idle time to allow the reassignment of the message(s). - /// The IDs of the messages to claim for the given consumer. - /// The flags to use for this operation. - /// The messages successfully claimed by the given consumer. - /// https://redis.io/topics/streams-intro + /// + Task StreamAddAsync(RedisKey key, RedisValue streamField, RedisValue streamValue, RedisValue? messageId, int? maxLength, bool useApproximateMaxLength, CommandFlags flags); + + /// + Task StreamAddAsync(RedisKey key, NameValueEntry[] streamPairs, RedisValue? messageId, int? maxLength, bool useApproximateMaxLength, CommandFlags flags); + +#pragma warning disable RS0026 // similar overloads + /// + Task StreamAddAsync(RedisKey key, RedisValue streamField, RedisValue streamValue, RedisValue? messageId = null, long? maxLength = null, bool useApproximateMaxLength = false, long? limit = null, StreamTrimMode trimMode = StreamTrimMode.KeepReferences, CommandFlags flags = CommandFlags.None); + + /// + Task StreamAddAsync(RedisKey key, NameValueEntry[] streamPairs, RedisValue? messageId = null, long? maxLength = null, bool useApproximateMaxLength = false, long? limit = null, StreamTrimMode trimMode = StreamTrimMode.KeepReferences, CommandFlags flags = CommandFlags.None); + + /// + [Experimental(Experiments.Server_8_6, UrlFormat = Experiments.UrlFormat)] + Task StreamAddAsync(RedisKey key, RedisValue streamField, RedisValue streamValue, StreamIdempotentId idempotentId, long? maxLength = null, bool useApproximateMaxLength = false, long? limit = null, StreamTrimMode trimMode = StreamTrimMode.KeepReferences, CommandFlags flags = CommandFlags.None); + + /// + [Experimental(Experiments.Server_8_6, UrlFormat = Experiments.UrlFormat)] + Task StreamAddAsync(RedisKey key, NameValueEntry[] streamPairs, StreamIdempotentId idempotentId, long? maxLength = null, bool useApproximateMaxLength = false, long? limit = null, StreamTrimMode trimMode = StreamTrimMode.KeepReferences, CommandFlags flags = CommandFlags.None); +#pragma warning restore RS0026 + + /// + [Experimental(Experiments.Server_8_6, UrlFormat = Experiments.UrlFormat)] + Task StreamConfigureAsync(RedisKey key, StreamConfiguration configuration, CommandFlags flags = CommandFlags.None); + + /// + Task StreamAutoClaimAsync(RedisKey key, RedisValue consumerGroup, RedisValue claimingConsumer, long minIdleTimeInMs, RedisValue startAtId, int? count = null, CommandFlags flags = CommandFlags.None); + + /// + Task StreamAutoClaimIdsOnlyAsync(RedisKey key, RedisValue consumerGroup, RedisValue claimingConsumer, long minIdleTimeInMs, RedisValue startAtId, int? count = null, CommandFlags flags = CommandFlags.None); + + /// Task StreamClaimAsync(RedisKey key, RedisValue consumerGroup, RedisValue claimingConsumer, long minIdleTimeInMs, RedisValue[] messageIds, CommandFlags flags = CommandFlags.None); - /// - /// Change ownership of messages consumed, but not yet acknowledged, by a different consumer. This method returns the IDs for the claimed message(s). - /// - /// The key of the stream. - /// The consumer group. - /// The consumer claiming the given message(s). - /// The minimum message idle time to allow the reassignment of the message(s). - /// The IDs of the messages to claim for the given consumer. - /// The flags to use for this operation. - /// The message IDs for the messages successfully claimed by the given consumer. - /// https://redis.io/topics/streams-intro + /// Task StreamClaimIdsOnlyAsync(RedisKey key, RedisValue consumerGroup, RedisValue claimingConsumer, long minIdleTimeInMs, RedisValue[] messageIds, CommandFlags flags = CommandFlags.None); - /// - /// Set the position from which to read a stream for a consumer group. - /// - /// The key of the stream. - /// The name of the consumer group. - /// The position from which to read for the consumer group. - /// The flags to use for this operation. - /// True if successful, otherwise false. + /// Task StreamConsumerGroupSetPositionAsync(RedisKey key, RedisValue groupName, RedisValue position, CommandFlags flags = CommandFlags.None); - /// - /// Retrieve information about the consumers for the given consumer group. This is the equivalent of calling "XINFO GROUPS key group". - /// - /// The key of the stream. - /// The consumer group name. - /// The flags to use for this operation. - /// An instance of for each of the consumer group's consumers. - /// https://redis.io/topics/streams-intro + /// Task StreamConsumerInfoAsync(RedisKey key, RedisValue groupName, CommandFlags flags = CommandFlags.None); - /// - /// Create a consumer group for the given stream. - /// - /// The key of the stream. - /// The name of the group to create. - /// The position to begin reading the stream. Defaults to . - /// The flags to use for this operation. - /// True if the group was created. - /// https://redis.io/topics/streams-intro + /// Task StreamCreateConsumerGroupAsync(RedisKey key, RedisValue groupName, RedisValue? position, CommandFlags flags); - /// - /// Create a consumer group for the given stream. - /// - /// The key of the stream. - /// The name of the group to create. - /// The position to begin reading the stream. Defaults to . - /// Create the stream if it does not already exist. - /// The flags to use for this operation. - /// True if the group was created. - /// https://redis.io/topics/streams-intro + /// Task StreamCreateConsumerGroupAsync(RedisKey key, RedisValue groupName, RedisValue? position = null, bool createStream = true, CommandFlags flags = CommandFlags.None); - /// - /// Delete messages in the stream. This method does not delete the stream. - /// - /// The key of the stream. - /// The IDs of the messages to delete. - /// The flags to use for this operation. - /// Returns the number of messages successfully deleted from the stream. - /// https://redis.io/topics/streams-intro +#pragma warning disable RS0026 + /// Task StreamDeleteAsync(RedisKey key, RedisValue[] messageIds, CommandFlags flags = CommandFlags.None); - /// - /// Delete a consumer from a consumer group. - /// - /// The key of the stream. - /// The name of the consumer group. - /// The name of the consumer. - /// The flags to use for this operation. - /// The number of messages that were pending for the deleted consumer. + /// + Task StreamDeleteAsync(RedisKey key, RedisValue[] messageIds, StreamTrimMode mode, CommandFlags flags = CommandFlags.None); +#pragma warning restore RS0026 + + /// Task StreamDeleteConsumerAsync(RedisKey key, RedisValue groupName, RedisValue consumerName, CommandFlags flags = CommandFlags.None); - /// - /// Delete a consumer group. - /// - /// The key of the stream. - /// The name of the consumer group. - /// The flags to use for this operation. - /// True if deleted, otherwise false. + /// Task StreamDeleteConsumerGroupAsync(RedisKey key, RedisValue groupName, CommandFlags flags = CommandFlags.None); - /// - /// Retrieve information about the groups created for the given stream. This is the equivalent of calling "XINFO GROUPS key". - /// - /// The key of the stream. - /// The flags to use for this operation. - /// An instance of for each of the stream's groups. - /// https://redis.io/topics/streams-intro + /// Task StreamGroupInfoAsync(RedisKey key, CommandFlags flags = CommandFlags.None); - /// - /// Retrieve information about the given stream. This is the equivalent of calling "XINFO STREAM key". - /// - /// The key of the stream. - /// The flags to use for this operation. - /// A instance with information about the stream. - /// https://redis.io/topics/streams-intro + /// Task StreamInfoAsync(RedisKey key, CommandFlags flags = CommandFlags.None); - /// - /// Return the number of entries in a stream. - /// - /// The key of the stream. - /// The flags to use for this operation. - /// The number of entries inside the given stream. - /// https://redis.io/commands/xlen + /// Task StreamLengthAsync(RedisKey key, CommandFlags flags = CommandFlags.None); - /// - /// View information about pending messages for a stream. A pending message is a message read using StreamReadGroup (XREADGROUP) but not yet acknowledged. - /// - /// The key of the stream. - /// The name of the consumer group - /// The flags to use for this operation. - /// An instance of . contains the number of pending messages, the highest and lowest ID of the pending messages, and the consumers with their pending message count. - /// The equivalent of calling XPENDING key group. - /// https://redis.io/commands/xpending + /// Task StreamPendingAsync(RedisKey key, RedisValue groupName, CommandFlags flags = CommandFlags.None); - /// - /// View information about each pending message. - /// - /// The key of the stream. - /// The name of the consumer group. - /// The maximum number of pending messages to return. - /// The consumer name for the pending messages. Pass RedisValue.Null to include pending messages for all consumers. - /// The minimum ID from which to read the stream of pending messages. The method will default to reading from the beginning of the stream. - /// The maximum ID to read to within the stream of pending messages. The method will default to reading to the end of the stream. - /// The flags to use for this operation. - /// An instance of for each pending message. - /// Equivalent of calling XPENDING key group start-id end-id count consumer-name. - /// https://redis.io/commands/xpending - Task StreamPendingMessagesAsync(RedisKey key, RedisValue groupName, int count, RedisValue consumerName, RedisValue? minId = null, RedisValue? maxId = null, CommandFlags flags = CommandFlags.None); + /// + Task StreamPendingMessagesAsync(RedisKey key, RedisValue groupName, int count, RedisValue consumerName, RedisValue? minId, RedisValue? maxId, CommandFlags flags); - /// - /// Read a stream using the given range of IDs. - /// - /// The key of the stream. - /// The minimum ID from which to read the stream. The method will default to reading from the beginning of the stream. - /// The maximum ID to read to within the stream. The method will default to reading to the end of the stream. - /// The maximum number of messages to return. - /// The order of the messages. will execute XRANGE and wil execute XREVRANGE. - /// The flags to use for this operation. - /// Returns an instance of for each message returned. - /// https://redis.io/commands/xrange + /// + Task StreamPendingMessagesAsync(RedisKey key, RedisValue groupName, int count, RedisValue consumerName, RedisValue? minId = null, RedisValue? maxId = null, long? minIdleTimeInMs = null, CommandFlags flags = CommandFlags.None); + + /// Task StreamRangeAsync(RedisKey key, RedisValue? minId = null, RedisValue? maxId = null, int? count = null, Order messageOrder = Order.Ascending, CommandFlags flags = CommandFlags.None); - /// - /// Read from a single stream. - /// - /// The key of the stream. - /// The position from which to read the stream. - /// The maximum number of messages to return. - /// The flags to use for this operation. - /// Returns an instance of for each message returned. - /// Equivalent of calling XREAD COUNT num STREAMS key id. - /// https://redis.io/commands/xread + /// Task StreamReadAsync(RedisKey key, RedisValue position, int? count = null, CommandFlags flags = CommandFlags.None); - /// - /// Read from multiple streams. - /// - /// Array of streams and the positions from which to begin reading for each stream. - /// The maximum number of messages to return from each stream. - /// The flags to use for this operation. - /// A value of for each stream. - /// Equivalent of calling XREAD COUNT num STREAMS key1 key2 id1 id2. - /// https://redis.io/commands/xread + /// Task StreamReadAsync(StreamPosition[] streamPositions, int? countPerStream = null, CommandFlags flags = CommandFlags.None); - /// - /// Read messages from a stream into an associated consumer group. - /// - /// The key of the stream. - /// The name of the consumer group. - /// The consumer name. - /// The position from which to read the stream. Defaults to when null. - /// The maximum number of messages to return. - /// The flags to use for this operation. - /// Returns a value of for each message returned. - /// https://redis.io/commands/xreadgroup + /// Task StreamReadGroupAsync(RedisKey key, RedisValue groupName, RedisValue consumerName, RedisValue? position, int? count, CommandFlags flags); - /// - /// Read messages from a stream into an associated consumer group. - /// - /// The key of the stream. - /// The name of the consumer group. - /// The consumer name. - /// The position from which to read the stream. Defaults to when null. - /// The maximum number of messages to return. - /// When true, the message will not be added to the pending message list. - /// The flags to use for this operation. - /// Returns a value of for each message returned. - /// https://redis.io/commands/xreadgroup - Task StreamReadGroupAsync(RedisKey key, RedisValue groupName, RedisValue consumerName, RedisValue? position = null, int? count = null, bool noAck = false, CommandFlags flags = CommandFlags.None); + /// + Task StreamReadGroupAsync(RedisKey key, RedisValue groupName, RedisValue consumerName, RedisValue? position, int? count, bool noAck, CommandFlags flags); - /// - /// Read from multiple streams into the given consumer group. The consumer group with the given - /// will need to have been created for each stream prior to calling this method. - /// - /// Array of streams and the positions from which to begin reading for each stream. - /// The name of the consumer group. - /// - /// The maximum number of messages to return from each stream. - /// The flags to use for this operation. - /// A value of for each stream. - /// Equivalent of calling XREADGROUP GROUP groupName consumerName COUNT countPerStream STREAMS stream1 stream2 id1 id2 - /// https://redis.io/commands/xreadgroup + /// + Task StreamReadGroupAsync(RedisKey key, RedisValue groupName, RedisValue consumerName, RedisValue? position = null, int? count = null, bool noAck = false, TimeSpan? claimMinIdleTime = null, CommandFlags flags = CommandFlags.None); + + /// Task StreamReadGroupAsync(StreamPosition[] streamPositions, RedisValue groupName, RedisValue consumerName, int? countPerStream, CommandFlags flags); - /// - /// Read from multiple streams into the given consumer group. The consumer group with the given - /// will need to have been created for each stream prior to calling this method. - /// - /// Array of streams and the positions from which to begin reading for each stream. - /// The name of the consumer group. - /// - /// The maximum number of messages to return from each stream. - /// When true, the message will not be added to the pending message list. - /// The flags to use for this operation. - /// A value of for each stream. - /// Equivalent of calling XREADGROUP GROUP groupName consumerName COUNT countPerStream STREAMS stream1 stream2 id1 id2 - /// https://redis.io/commands/xreadgroup - Task StreamReadGroupAsync(StreamPosition[] streamPositions, RedisValue groupName, RedisValue consumerName, int? countPerStream = null, bool noAck = false, CommandFlags flags = CommandFlags.None); + /// + Task StreamReadGroupAsync(StreamPosition[] streamPositions, RedisValue groupName, RedisValue consumerName, int? countPerStream, bool noAck, CommandFlags flags); - /// - /// Trim the stream to a specified maximum length. - /// - /// The key of the stream. - /// The maximum length of the stream. - /// If true, the "~" argument is used to allow the stream to exceed max length by a small number. This improves performance when removing messages. - /// The flags to use for this operation. - /// The number of messages removed from the stream. - /// https://redis.io/topics/streams-intro - Task StreamTrimAsync(RedisKey key, int maxLength, bool useApproximateMaxLength = false, CommandFlags flags = CommandFlags.None); + /// + Task StreamTrimAsync(RedisKey key, int maxLength, bool useApproximateMaxLength, CommandFlags flags); - /// - /// If key already exists and is a string, this command appends the value at the end of the string. If key does not exist it is created and set as an empty string, - /// so APPEND will be similar to SET in this special case. - /// - /// The key of the string. - /// The value to append to the string. - /// The flags to use for this operation. - /// The length of the string after the append operation. - /// https://redis.io/commands/append + /// + Task StreamTrimAsync(RedisKey key, long maxLength, bool useApproximateMaxLength = false, long? limit = null, StreamTrimMode mode = StreamTrimMode.KeepReferences, CommandFlags flags = CommandFlags.None); + + /// + Task StreamTrimByMinIdAsync(RedisKey key, RedisValue minId, bool useApproximateMaxLength = false, long? limit = null, StreamTrimMode mode = StreamTrimMode.KeepReferences, CommandFlags flags = CommandFlags.None); + + /// Task StringAppendAsync(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None); - /// - /// Count the number of set bits (population counting) in a string. - /// By default all the bytes contained in the string are examined. It is possible to specify the counting operation only in an interval passing the additional arguments start and end. - /// Like for the GETRANGE command start and end can contain negative values in order to index bytes starting from the end of the string, where -1 is the last byte, -2 is the penultimate, and so forth. - /// - /// The key of the string. - /// The start byte to count at. - /// The end byte to count at. - /// The flags to use for this operation. - /// The number of bits set to 1. - /// https://redis.io/commands/bitcount - Task StringBitCountAsync(RedisKey key, long start = 0, long end = -1, CommandFlags flags = CommandFlags.None); + /// + [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] + Task StringBitCountAsync(RedisKey key, long start, long end, CommandFlags flags); - /// - /// Perform a bitwise operation between multiple keys (containing string values) and store the result in the destination key. - /// The BITOP command supports four bitwise operations; note that NOT is a unary operator: the second key should be omitted in this case - /// and only the first key will be considered. - /// The result of the operation is always stored at destkey. - /// - /// The operation to perform. - /// The destination key to store the result in. - /// The first key to get the bit value from. - /// The second key to get the bit value from. - /// The flags to use for this operation. - /// The size of the string stored in the destination key, that is equal to the size of the longest input string. - /// https://redis.io/commands/bitop - Task StringBitOperationAsync(Bitwise operation, RedisKey destination, RedisKey first, RedisKey second = default(RedisKey), CommandFlags flags = CommandFlags.None); + /// + Task StringBitCountAsync(RedisKey key, long start = 0, long end = -1, StringIndexType indexType = StringIndexType.Byte, CommandFlags flags = CommandFlags.None); - /// - /// Perform a bitwise operation between multiple keys (containing string values) and store the result in the destination key. - /// The BITOP command supports four bitwise operations; note that NOT is a unary operator. - /// The result of the operation is always stored at destkey. - /// - /// The operation to perform. - /// The destination key to store the result in. - /// The keys to get the bit values from. - /// The flags to use for this operation. - /// The size of the string stored in the destination key, that is equal to the size of the longest input string. - /// https://redis.io/commands/bitop + /// + Task StringBitOperationAsync(Bitwise operation, RedisKey destination, RedisKey first, RedisKey second = default, CommandFlags flags = CommandFlags.None); + + /// Task StringBitOperationAsync(Bitwise operation, RedisKey destination, RedisKey[] keys, CommandFlags flags = CommandFlags.None); - /// - /// Return the position of the first bit set to 1 or 0 in a string. - /// The position is returned thinking at the string as an array of bits from left to right where the first byte most significant bit is at position 0, the second byte most significant bit is at position 8 and so forth. - /// An start and end may be specified; these are in bytes, not bits; start and end can contain negative values in order to index bytes starting from the end of the string, where -1 is the last byte, -2 is the penultimate, and so forth. - /// - /// The key of the string. - /// True to check for the first 1 bit, false to check for the first 0 bit. - /// The position to start looking (defaults to 0). - /// The position to stop looking (defaults to -1, unlimited). - /// The flags to use for this operation. - /// The command returns the position of the first bit set to 1 or 0 according to the request. - /// If we look for set bits(the bit argument is 1) and the string is empty or composed of just zero bytes, -1 is returned. - /// https://redis.io/commands/bitpos - Task StringBitPositionAsync(RedisKey key, bool bit, long start = 0, long end = -1, CommandFlags flags = CommandFlags.None); + /// + [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] + Task StringBitPositionAsync(RedisKey key, bool bit, long start, long end, CommandFlags flags); - /// - /// Decrements the number stored at key by decrement. If the key does not exist, it is set to 0 before performing the operation. - /// An error is returned if the key contains a value of the wrong type or contains a string that is not representable as integer. This operation is limited to 64 bit signed integers. - /// - /// The key of the string. - /// The amount to decrement by (defaults to 1). - /// The flags to use for this operation. - /// The value of key after the decrement. - /// https://redis.io/commands/decrby - /// https://redis.io/commands/decr + /// + Task StringBitPositionAsync(RedisKey key, bool bit, long start = 0, long end = -1, StringIndexType indexType = StringIndexType.Byte, CommandFlags flags = CommandFlags.None); + + /// Task StringDecrementAsync(RedisKey key, long value = 1, CommandFlags flags = CommandFlags.None); - /// - /// Decrements the string representing a floating point number stored at key by the specified decrement. If the key does not exist, it is set to 0 before performing the operation. The precision of the output is fixed at 17 digits after the decimal point regardless of the actual internal precision of the computation. - /// - /// The key of the string. - /// The amount to decrement by (defaults to 1). - /// The flags to use for this operation. - /// The value of key after the decrement. - /// https://redis.io/commands/incrbyfloat + /// + Task StringDeleteAsync(RedisKey key, ValueCondition when, CommandFlags flags = CommandFlags.None); + + /// Task StringDecrementAsync(RedisKey key, double value, CommandFlags flags = CommandFlags.None); - /// - /// Get the value of key. If the key does not exist the special value nil is returned. An error is returned if the value stored at key is not a string, because GET only handles string values. - /// - /// The key of the string. - /// The flags to use for this operation. - /// The value of key, or nil when key does not exist. - /// https://redis.io/commands/get + /// + [Experimental(Experiments.Server_8_4, UrlFormat = Experiments.UrlFormat)] + Task StringDigestAsync(RedisKey key, CommandFlags flags = CommandFlags.None); + + /// Task StringGetAsync(RedisKey key, CommandFlags flags = CommandFlags.None); - /// - /// Returns the values of all specified keys. For every key that does not hold a string value or does not exist, the special value nil is returned. - /// - /// The keys of the strings. - /// The flags to use for this operation. - /// The values of the strings with nil for keys do not exist. - /// https://redis.io/commands/mget + /// Task StringGetAsync(RedisKey[] keys, CommandFlags flags = CommandFlags.None); - /// - /// Get the value of key. If the key does not exist the special value nil is returned. An error is returned if the value stored at key is not a string, because GET only handles string values. - /// - /// The key of the string. - /// The flags to use for this operation. - /// The value of key, or nil when key does not exist. - /// https://redis.io/commands/get - Task> StringGetLeaseAsync(RedisKey key, CommandFlags flags = CommandFlags.None); + /// + Task?> StringGetLeaseAsync(RedisKey key, CommandFlags flags = CommandFlags.None); - /// - /// Returns the bit value at offset in the string value stored at key. - /// When offset is beyond the string length, the string is assumed to be a contiguous space with 0 bits. - /// - /// The key of the string. - /// The offset in the string to get a bit at. - /// The flags to use for this operation. - /// The bit value stored at offset. - /// https://redis.io/commands/getbit + /// Task StringGetBitAsync(RedisKey key, long offset, CommandFlags flags = CommandFlags.None); - /// - /// Returns the substring of the string value stored at key, determined by the offsets start and end (both are inclusive). Negative offsets can be used in order to provide an offset starting from the end of the string. So -1 means the last character, -2 the penultimate and so forth. - /// - /// The key of the string. - /// The start index of the substring to get. - /// The end index of the substring to get. - /// The flags to use for this operation. - /// The substring of the string value stored at key. - /// https://redis.io/commands/getrange + /// Task StringGetRangeAsync(RedisKey key, long start, long end, CommandFlags flags = CommandFlags.None); - /// - /// Atomically sets key to value and returns the old value stored at key. - /// - /// The key of the string. - /// The value to replace the existing value with. - /// The flags to use for this operation. - /// The old value stored at key, or nil when key did not exist. - /// https://redis.io/commands/getset + /// Task StringGetSetAsync(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None); - /// - /// Get the value of key. If the key does not exist the special value nil is returned. An error is returned if the value stored at key is not a string, because GET only handles string values. - /// - /// The key of the string. - /// The flags to use for this operation. - /// The value of key and its expiry, or nil when key does not exist. - /// https://redis.io/commands/get + /// + Task StringGetSetExpiryAsync(RedisKey key, TimeSpan? expiry, CommandFlags flags = CommandFlags.None); + + /// + Task StringGetSetExpiryAsync(RedisKey key, DateTime expiry, CommandFlags flags = CommandFlags.None); + + /// + Task StringGetDeleteAsync(RedisKey key, CommandFlags flags = CommandFlags.None); + + /// Task StringGetWithExpiryAsync(RedisKey key, CommandFlags flags = CommandFlags.None); - /// - /// Increments the number stored at key by increment. If the key does not exist, it is set to 0 before performing the operation. An error is returned if the key contains a value of the wrong type or contains a string that is not representable as integer. This operation is limited to 64 bit signed integers. - /// - /// The key of the string. - /// The amount to increment by (defaults to 1). - /// The flags to use for this operation. - /// The value of key after the increment. - /// https://redis.io/commands/incrby - /// https://redis.io/commands/incr + /// Task StringIncrementAsync(RedisKey key, long value = 1, CommandFlags flags = CommandFlags.None); - /// - /// Increments the string representing a floating point number stored at key by the specified increment. If the key does not exist, it is set to 0 before performing the operation. The precision of the output is fixed at 17 digits after the decimal point regardless of the actual internal precision of the computation. - /// - /// The key of the string. - /// The amount to increment by (defaults to 1). - /// The flags to use for this operation. - /// The value of key after the increment. - /// https://redis.io/commands/incrbyfloat + /// Task StringIncrementAsync(RedisKey key, double value, CommandFlags flags = CommandFlags.None); - /// - /// Returns the length of the string value stored at key. - /// - /// The key of the string. - /// The flags to use for this operation. - /// the length of the string at key, or 0 when key does not exist. - /// https://redis.io/commands/strlen + /// Task StringLengthAsync(RedisKey key, CommandFlags flags = CommandFlags.None); - /// - /// Set key to hold the string value. If key already holds a value, it is overwritten, regardless of its type. - /// - /// The key of the string. - /// The value to set. - /// The expiry to set. - /// Which condition to set the value under (defaults to always). - /// The flags to use for this operation. - /// True if the string was set, false otherwise. - /// https://redis.io/commands/set - Task StringSetAsync(RedisKey key, RedisValue value, TimeSpan? expiry = null, When when = When.Always, CommandFlags flags = CommandFlags.None); + /// + Task StringLongestCommonSubsequenceAsync(RedisKey first, RedisKey second, CommandFlags flags = CommandFlags.None); - /// - /// Sets the given keys to their respective values. If "not exists" is specified, this will not perform any operation at all even if just a single key already exists. - /// - /// The keys and values to set. - /// Which condition to set the value under (defaults to always). - /// The flags to use for this operation. - /// True if the keys were set, else False - /// https://redis.io/commands/mset - /// https://redis.io/commands/msetnx - Task StringSetAsync(KeyValuePair[] values, When when = When.Always, CommandFlags flags = CommandFlags.None); + /// t + Task StringLongestCommonSubsequenceLengthAsync(RedisKey first, RedisKey second, CommandFlags flags = CommandFlags.None); - /// - /// Sets or clears the bit at offset in the string value stored at key. - /// The bit is either set or cleared depending on value, which can be either 0 or 1. When key does not exist, a new string value is created.The string is grown to make sure it can hold a bit at offset. - /// - /// The key of the string. - /// The offset in the string to set . - /// The bit value to set, true for 1, false for 0. - /// The flags to use for this operation. - /// The original bit value stored at offset. - /// https://redis.io/commands/setbit - Task StringSetBitAsync(RedisKey key, long offset, bool bit, CommandFlags flags = CommandFlags.None); + /// + Task StringLongestCommonSubsequenceWithMatchesAsync(RedisKey first, RedisKey second, long minLength = 0, CommandFlags flags = CommandFlags.None); - /// - /// Overwrites part of the string stored at key, starting at the specified offset, for the entire length of value. If the offset is larger than the current length of the string at key, the string is padded with zero-bytes to make offset fit. Non-existing keys are considered as empty strings, so this command will make sure it holds a string large enough to be able to set value at offset. - /// - /// The key of the string. - /// The offset in the string to overwrite. - /// The value to overwrite with. - /// The flags to use for this operation. - /// The length of the string after it was modified by the command. - /// https://redis.io/commands/setrange - Task StringSetRangeAsync(RedisKey key, long offset, RedisValue value, CommandFlags flags = CommandFlags.None); + /// + [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] + Task StringSetAsync(RedisKey key, RedisValue value, TimeSpan? expiry, When when); - /// - /// Touch the specified key. - /// - /// The key to touch. - /// The flags to use for this operation. - /// True if the key was touched. - /// https://redis.io/commands/touch - Task KeyTouchAsync(RedisKey key, CommandFlags flags = CommandFlags.None); + /// + [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] + Task StringSetAsync(RedisKey key, RedisValue value, TimeSpan? expiry, When when, CommandFlags flags); - /// - /// Youch the specified keys. A key is ignored if it does not exist. - /// - /// The keys to touch. - /// The flags to use for this operation. - /// The number of keys that were touched. - /// https://redis.io/commands/touch - Task KeyTouchAsync(RedisKey[] keys, CommandFlags flags = CommandFlags.None); + /// + Task StringSetAsync(RedisKey key, RedisValue value, TimeSpan? expiry, bool keepTtl, When when = When.Always, CommandFlags flags = CommandFlags.None); + /// +#pragma warning disable RS0027 // Public API with optional parameter(s) should have the most parameters amongst its public overloads + Task StringSetAsync(RedisKey key, RedisValue value, Expiration expiry = default, ValueCondition when = default, CommandFlags flags = CommandFlags.None); +#pragma warning restore RS0027 + + /// + Task StringSetAsync(KeyValuePair[] values, When when, CommandFlags flags); + + /// + Task StringSetAsync(KeyValuePair[] values, When when = When.Always, Expiration expiry = default, CommandFlags flags = CommandFlags.None); + + /// + Task StringSetAndGetAsync(RedisKey key, RedisValue value, TimeSpan? expiry, When when, CommandFlags flags); + + /// + Task StringSetAndGetAsync(RedisKey key, RedisValue value, TimeSpan? expiry = null, bool keepTtl = false, When when = When.Always, CommandFlags flags = CommandFlags.None); + + /// + Task StringSetBitAsync(RedisKey key, long offset, bool bit, CommandFlags flags = CommandFlags.None); + + /// + Task StringSetRangeAsync(RedisKey key, long offset, RedisValue value, CommandFlags flags = CommandFlags.None); } } diff --git a/src/StackExchange.Redis/Interfaces/IReconnectRetryPolicy.cs b/src/StackExchange.Redis/Interfaces/IReconnectRetryPolicy.cs index d2930dc2f..7bb29843a 100644 --- a/src/StackExchange.Redis/Interfaces/IReconnectRetryPolicy.cs +++ b/src/StackExchange.Redis/Interfaces/IReconnectRetryPolicy.cs @@ -1,15 +1,15 @@ -namespace StackExchange.Redis +namespace StackExchange.Redis { /// - /// Describes retry policy functionality that can be provided to the multiplexer to be used for connection reconnects + /// Describes retry policy functionality that can be provided to the multiplexer to be used for connection reconnects. /// public interface IReconnectRetryPolicy { /// /// This method is called by the multiplexer to determine if a reconnect operation can be retried now. /// - /// The number of times reconnect retries have already been made by the multiplexer while it was in connecting state - /// Total time elapsed in milliseconds since the last reconnect retry was made + /// The number of times reconnect retries have already been made by the multiplexer while it was in connecting state. + /// Total time elapsed in milliseconds since the last reconnect retry was made. bool ShouldRetry(long currentRetryCount, int timeElapsedMillisecondsSinceLastRetry); } } diff --git a/src/StackExchange.Redis/Interfaces/IRedis.cs b/src/StackExchange.Redis/Interfaces/IRedis.cs index ac18b7898..3507aa433 100644 --- a/src/StackExchange.Redis/Interfaces/IRedis.cs +++ b/src/StackExchange.Redis/Interfaces/IRedis.cs @@ -3,7 +3,7 @@ namespace StackExchange.Redis { /// - /// Common operations available to all redis connections + /// Common operations available to all redis connections. /// public partial interface IRedis : IRedisAsync { @@ -12,7 +12,7 @@ public partial interface IRedis : IRedisAsync /// /// The command flags to use when pinging. /// The observed latency. - /// https://redis.io/commands/ping + /// TimeSpan Ping(CommandFlags flags = CommandFlags.None); } } diff --git a/src/StackExchange.Redis/Interfaces/IRedisAsync.cs b/src/StackExchange.Redis/Interfaces/IRedisAsync.cs index 91e2dd7dd..4c20d5e72 100644 --- a/src/StackExchange.Redis/Interfaces/IRedisAsync.cs +++ b/src/StackExchange.Redis/Interfaces/IRedisAsync.cs @@ -4,12 +4,12 @@ namespace StackExchange.Redis { /// - /// Common operations available to all redis connections + /// Common operations available to all redis connections. /// public partial interface IRedisAsync { /// - /// Gets the multiplexer that created this instance + /// Gets the multiplexer that created this instance. /// IConnectionMultiplexer Multiplexer { get; } @@ -18,30 +18,30 @@ public partial interface IRedisAsync /// /// The command flags to use. /// The observed latency. - /// https://redis.io/commands/ping + /// Task PingAsync(CommandFlags flags = CommandFlags.None); /// - /// Wait for a given asynchronous operation to complete (or timeout), reporting which + /// Wait for a given asynchronous operation to complete (or timeout), reporting which. /// /// The task to wait on. bool TryWait(Task task); /// - /// Wait for a given asynchronous operation to complete (or timeout) + /// Wait for a given asynchronous operation to complete (or timeout). /// /// The task to wait on. void Wait(Task task); /// - /// Wait for a given asynchronous operation to complete (or timeout) + /// Wait for a given asynchronous operation to complete (or timeout). /// /// The type of task to wait on. /// The task to wait on. T Wait(Task task); /// - /// Wait for the given asynchronous operations to complete (or timeout) + /// Wait for the given asynchronous operations to complete (or timeout). /// /// The tasks to wait on. void WaitAll(params Task[] tasks); diff --git a/src/StackExchange.Redis/Interfaces/IScanningCursor.cs b/src/StackExchange.Redis/Interfaces/IScanningCursor.cs index 4cbe6d92e..a9c8c45cf 100644 --- a/src/StackExchange.Redis/Interfaces/IScanningCursor.cs +++ b/src/StackExchange.Redis/Interfaces/IScanningCursor.cs @@ -1,23 +1,23 @@ namespace StackExchange.Redis { /// - /// Represents a resumable, cursor-based scanning operation + /// Represents a resumable, cursor-based scanning operation. /// public interface IScanningCursor { /// - /// Returns the cursor that represents the *active* page of results (not the pending/next page of results as returned by SCAN/HSCAN/ZSCAN/SSCAN) + /// Returns the cursor that represents the *active* page of results (not the pending/next page of results as returned by SCAN/HSCAN/ZSCAN/SSCAN). /// long Cursor { get; } /// - /// The page size of the current operation + /// The page size of the current operation. /// int PageSize { get; } /// - /// The offset into the current page + /// The offset into the current page. /// int PageOffset { get; } } -} \ No newline at end of file +} diff --git a/src/StackExchange.Redis/Interfaces/IServer.cs b/src/StackExchange.Redis/Interfaces/IServer.cs index 07c2615c9..fdd3d6872 100644 --- a/src/StackExchange.Redis/Interfaces/IServer.cs +++ b/src/StackExchange.Redis/Interfaces/IServer.cs @@ -9,150 +9,144 @@ namespace StackExchange.Redis { /// - /// Provides configuration controls of a redis server + /// Provides configuration controls of a redis server. /// public partial interface IServer : IRedis { /// - /// Gets the cluster configuration associated with this server, if known + /// Gets the cluster configuration associated with this server, if known. /// - ClusterConfiguration ClusterConfiguration { get; } + ClusterConfiguration? ClusterConfiguration { get; } /// - /// Gets the address of the connected server + /// Gets the address of the connected server. /// EndPoint EndPoint { get; } /// - /// Gets the features available to the connected server + /// Gets the features available to the connected server. /// RedisFeatures Features { get; } /// - /// Gets whether the connection to the server is active and usable + /// Gets whether the connection to the server is active and usable. /// bool IsConnected { get; } /// - /// Gets whether the connected server is a replica + /// The protocol being used to communicate with this server (if not connected/known, then the anticipated protocol from the configuration is returned, assuming success). /// - [Obsolete("Starting with Redis version 5, Redis has moved to 'replica' terminology. Please use " + nameof(IsReplica) + " instead.")] + RedisProtocol Protocol { get; } + + /// + /// Gets whether the connected server is a replica. + /// + [Obsolete("Starting with Redis version 5, Redis has moved to 'replica' terminology. Please use " + nameof(IsReplica) + " instead, this will be removed in 3.0.")] [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] bool IsSlave { get; } /// - /// Gets whether the connected server is a replica + /// Gets whether the connected server is a replica. /// bool IsReplica { get; } /// - /// Explicitly opt in for replica writes on writable replica + /// Explicitly opt in for replica writes on writable replica. /// - [Obsolete("Starting with Redis version 5, Redis has moved to 'replica' terminology. Please use " + nameof(AllowReplicaWrites) + " instead.")] + [Obsolete("Starting with Redis version 5, Redis has moved to 'replica' terminology. Please use " + nameof(AllowReplicaWrites) + " instead, this will be removed in 3.0.")] [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] bool AllowSlaveWrites { get; set; } /// - /// Explicitly opt in for replica writes on writable replica + /// Explicitly opt in for replica writes on writable replica. /// bool AllowReplicaWrites { get; set; } /// - /// Gets the operating mode of the connected server + /// Gets the operating mode of the connected server. /// ServerType ServerType { get; } /// - /// Gets the version of the connected server + /// Gets the version of the connected server. /// Version Version { get; } /// - /// The number of databases supported on this server + /// The number of databases supported on this server. /// int DatabaseCount { get; } /// - /// The CLIENT KILL command closes a given client connection identified by ip:port. - /// The ip:port should match a line returned by the CLIENT LIST command. - /// Due to the single-threaded nature of Redis, it is not possible to kill a client connection while it is executing a command.From the client point of view, the connection can never be closed in the middle of the execution of a command.However, the client will notice the connection has been closed only when the next command is sent (and results in network error). + /// The CLIENT KILL command closes a given client connection identified by ip:port. + /// The ip:port should match a line returned by the CLIENT LIST command. + /// Due to the single-threaded nature of Redis, it is not possible to kill a client connection while it is executing a command. + /// From the client point of view, the connection can never be closed in the middle of the execution of a command. + /// However, the client will notice the connection has been closed only when the next command is sent (and results in network error). /// /// The endpoint of the client to kill. /// The command flags to use. - /// https://redis.io/commands/client-kill + /// void ClientKill(EndPoint endpoint, CommandFlags flags = CommandFlags.None); - /// - /// The CLIENT KILL command closes a given client connection identified by ip:port. - /// The ip:port should match a line returned by the CLIENT LIST command. - /// Due to the single-threaded nature of Redis, it is not possible to kill a client connection while it is executing a command.From the client point of view, the connection can never be closed in the middle of the execution of a command.However, the client will notice the connection has been closed only when the next command is sent (and results in network error). - /// - /// The endpoint of the client to kill. - /// The command flags to use. - /// https://redis.io/commands/client-kill + /// Task ClientKillAsync(EndPoint endpoint, CommandFlags flags = CommandFlags.None); /// - /// The CLIENT KILL command closes multiple connections that match the specified filters + /// The CLIENT KILL command closes multiple connections that match the specified filters. /// /// The ID of the client to kill. /// The type of client. /// The endpoint to kill. - /// Whether to kskip the current connection. + /// Whether to skip the current connection. /// The command flags to use. /// the number of clients killed. - /// https://redis.io/commands/client-kill - long ClientKill(long? id = null, ClientType? clientType = null, EndPoint endpoint = null, bool skipMe = true, CommandFlags flags = CommandFlags.None); + /// + long ClientKill(long? id = null, ClientType? clientType = null, EndPoint? endpoint = null, bool skipMe = true, CommandFlags flags = CommandFlags.None); + + /// + Task ClientKillAsync(long? id = null, ClientType? clientType = null, EndPoint? endpoint = null, bool skipMe = true, CommandFlags flags = CommandFlags.None); /// - /// The CLIENT KILL command closes multiple connections that match the specified filters + /// The CLIENT KILL command closes multiple connections that match the specified filters. /// - /// The ID of the client to kill. - /// The type of client. - /// The endpoint to kill. - /// Whether to kskip the current connection. + /// The filter to use in choosing which clients to kill. /// The command flags to use. /// the number of clients killed. - /// https://redis.io/commands/client-kill - Task ClientKillAsync(long? id = null, ClientType? clientType = null, EndPoint endpoint = null, bool skipMe = true, CommandFlags flags = CommandFlags.None); + long ClientKill(ClientKillFilter filter, CommandFlags flags = CommandFlags.None); + + /// + Task ClientKillAsync(ClientKillFilter filter, CommandFlags flags = CommandFlags.None); /// - /// The CLIENT LIST command returns information and statistics about the client connections server in a mostly human readable format. + /// The CLIENT LIST command returns information and statistics about the client connections server in a mostly human readable format. /// /// The command flags to use. - /// https://redis.io/commands/client-list + /// ClientInfo[] ClientList(CommandFlags flags = CommandFlags.None); - /// - /// The CLIENT LIST command returns information and statistics about the client connections server in a mostly human readable format. - /// - /// The command flags to use. - /// https://redis.io/commands/client-list + /// Task ClientListAsync(CommandFlags flags = CommandFlags.None); /// - /// Obtains the current CLUSTER NODES output from a cluster server + /// Obtains the current CLUSTER NODES output from a cluster server. /// /// The command flags to use. - ClusterConfiguration ClusterNodes(CommandFlags flags = CommandFlags.None); + /// + ClusterConfiguration? ClusterNodes(CommandFlags flags = CommandFlags.None); - /// - /// Obtains the current CLUSTER NODES output from a cluster server - /// - /// The command flags to use. - Task ClusterNodesAsync(CommandFlags flags = CommandFlags.None); + /// + Task ClusterNodesAsync(CommandFlags flags = CommandFlags.None); /// - /// Obtains the current raw CLUSTER NODES output from a cluster server + /// Obtains the current raw CLUSTER NODES output from a cluster server. /// /// The command flags to use. - string ClusterNodesRaw(CommandFlags flags = CommandFlags.None); + /// + string? ClusterNodesRaw(CommandFlags flags = CommandFlags.None); - /// - /// Obtains the current raw CLUSTER NODES output from a cluster server - /// - /// The command flags to use. - Task ClusterNodesRawAsync(CommandFlags flags = CommandFlags.None); + /// + Task ClusterNodesRawAsync(CommandFlags flags = CommandFlags.None); /// /// Get all configuration parameters matching the specified pattern. @@ -160,78 +154,91 @@ public partial interface IServer : IRedis /// The pattern of config values to get. /// The command flags to use. /// All matching configuration parameters. - /// https://redis.io/commands/config-get - KeyValuePair[] ConfigGet(RedisValue pattern = default(RedisValue), CommandFlags flags = CommandFlags.None); + /// + KeyValuePair[] ConfigGet(RedisValue pattern = default, CommandFlags flags = CommandFlags.None); - /// - /// Get all configuration parameters matching the specified pattern. - /// - /// The pattern of config values to get. - /// The command flags to use. - /// All matching configuration parameters. - /// https://redis.io/commands/config-get - Task[]> ConfigGetAsync(RedisValue pattern = default(RedisValue), CommandFlags flags = CommandFlags.None); + /// + Task[]> ConfigGetAsync(RedisValue pattern = default, CommandFlags flags = CommandFlags.None); /// /// Resets the statistics reported by Redis using the INFO command. /// /// The command flags to use. - /// https://redis.io/commands/config-resetstat + /// void ConfigResetStatistics(CommandFlags flags = CommandFlags.None); - /// - /// Resets the statistics reported by Redis using the INFO command. - /// - /// The command flags to use. - /// https://redis.io/commands/config-resetstat + /// Task ConfigResetStatisticsAsync(CommandFlags flags = CommandFlags.None); /// - /// The CONFIG REWRITE command rewrites the redis.conf file the server was started with, applying the minimal changes needed to make it reflecting the configuration currently used by the server, that may be different compared to the original one because of the use of the CONFIG SET command. + /// The CONFIG REWRITE command rewrites the redis.conf file the server was started with, + /// applying the minimal changes needed to make it reflecting the configuration currently + /// used by the server, that may be different compared to the original one because of the use of the CONFIG SET command. /// /// The command flags to use. - /// https://redis.io/commands/config-rewrite + /// void ConfigRewrite(CommandFlags flags = CommandFlags.None); - /// - /// The CONFIG REWRITE command rewrites the redis.conf file the server was started with, applying the minimal changes needed to make it reflecting the configuration currently used by the server, that may be different compared to the original one because of the use of the CONFIG SET command. - /// - /// The command flags to use. - /// https://redis.io/commands/config-rewrite + /// Task ConfigRewriteAsync(CommandFlags flags = CommandFlags.None); /// - /// The CONFIG SET command is used in order to reconfigure the server at runtime without the need to restart Redis. You can change both trivial parameters or switch from one to another persistence option using this command. + /// The CONFIG SET command is used in order to reconfigure the server at runtime without the need to restart Redis. + /// You can change both trivial parameters or switch from one to another persistence option using this command. /// /// The setting name. /// The new setting value. /// The command flags to use. - /// https://redis.io/commands/config-set + /// void ConfigSet(RedisValue setting, RedisValue value, CommandFlags flags = CommandFlags.None); + /// + Task ConfigSetAsync(RedisValue setting, RedisValue value, CommandFlags flags = CommandFlags.None); + /// - /// The CONFIG SET command is used in order to reconfigure the server at runtime without the need to restart Redis. You can change both trivial parameters or switch from one to another persistence option using this command. + /// Returns the number of total commands available in this Redis server. /// - /// The setting name. - /// The new setting value. /// The command flags to use. - /// https://redis.io/commands/config-set - Task ConfigSetAsync(RedisValue setting, RedisValue value, CommandFlags flags = CommandFlags.None); + /// + long CommandCount(CommandFlags flags = CommandFlags.None); + + /// + Task CommandCountAsync(CommandFlags flags = CommandFlags.None); /// - /// Return the number of keys in the database. + /// Returns list of keys from a full Redis command. /// - /// The database ID. + /// The command to get keys from. /// The command flags to use. - /// https://redis.io/commands/dbsize - long DatabaseSize(int database = -1, CommandFlags flags = CommandFlags.None); + /// + RedisKey[] CommandGetKeys(RedisValue[] command, CommandFlags flags = CommandFlags.None); + + /// + Task CommandGetKeysAsync(RedisValue[] command, CommandFlags flags = CommandFlags.None); + + /// + /// Returns a list of command names available on this Redis server. + /// Only one of the filter options is usable at a time. + /// + /// The module name to filter the command list by. + /// The category to filter the command list by. + /// The pattern to filter the command list by. + /// The command flags to use. + /// + string[] CommandList(RedisValue? moduleName = null, RedisValue? category = null, RedisValue? pattern = null, CommandFlags flags = CommandFlags.None); + + /// + Task CommandListAsync(RedisValue? moduleName = null, RedisValue? category = null, RedisValue? pattern = null, CommandFlags flags = CommandFlags.None); /// /// Return the number of keys in the database. /// /// The database ID. /// The command flags to use. - /// https://redis.io/commands/dbsize + /// + long DatabaseSize(int database = -1, CommandFlags flags = CommandFlags.None); + + /// Task DatabaseSizeAsync(int database = -1, CommandFlags flags = CommandFlags.None); /// @@ -239,15 +246,10 @@ public partial interface IServer : IRedis /// /// The message to echo. /// The command flags to use. - /// https://redis.io/commands/echo + /// RedisValue Echo(RedisValue message, CommandFlags flags = CommandFlags.None); - /// - /// Return the same message passed in. - /// - /// The message to echo. - /// The command flags to use. - /// https://redis.io/commands/echo + /// Task EchoAsync(RedisValue message, CommandFlags flags = CommandFlags.None); /// @@ -257,57 +259,56 @@ public partial interface IServer : IRedis /// /// The command to run. /// The arguments to pass for the command. - /// This API should be considered an advanced feature; inappropriate use can be harmful - /// A dynamic representation of the command's result + /// A dynamic representation of the command's result. + /// This API should be considered an advanced feature; inappropriate use can be harmful. RedisResult Execute(string command, params object[] args); + /// + Task ExecuteAsync(string command, params object[] args); + +#pragma warning disable RS0026, RS0027 // multiple overloads /// /// Execute an arbitrary command against the server; this is primarily intended for /// executing modules, but may also be used to provide access to new features that lack - /// a direct API. + /// a direct API. The command is assumed to be not database-specific. If this is not the case, + /// should be used to + /// specify the database (using null to use the configured default database). /// /// The command to run. /// The arguments to pass for the command. /// The flags to use for this operation. - /// This API should be considered an advanced feature; inappropriate use can be harmful - /// A dynamic representation of the command's result + /// A dynamic representation of the command's result. + /// This API should be considered an advanced feature; inappropriate use can be harmful. RedisResult Execute(string command, ICollection args, CommandFlags flags = CommandFlags.None); - /// - /// Execute an arbitrary command against the server; this is primarily intended for - /// executing modules, but may also be used to provide access to new features that lack - /// a direct API. - /// - /// The command to run. - /// The arguments to pass for the command. - /// This API should be considered an advanced feature; inappropriate use can be harmful - /// A dynamic representation of the command's result - Task ExecuteAsync(string command, params object[] args); + /// + Task ExecuteAsync(string command, ICollection args, CommandFlags flags = CommandFlags.None); +#pragma warning restore RS0026, RS0027 /// - /// Execute an arbitrary command against the server; this is primarily intended for + /// Execute an arbitrary database-specific command against the server; this is primarily intended for /// executing modules, but may also be used to provide access to new features that lack /// a direct API. /// + /// The database ID; if , the configured default database is used. /// The command to run. /// The arguments to pass for the command. /// The flags to use for this operation. - /// This API should be considered an advanced feature; inappropriate use can be harmful - /// A dynamic representation of the command's result - Task ExecuteAsync(string command, ICollection args, CommandFlags flags = CommandFlags.None); + /// A dynamic representation of the command's result. + /// This API should be considered an advanced feature; inappropriate use can be harmful. + RedisResult Execute(int? database, string command, ICollection args, CommandFlags flags = CommandFlags.None); + + /// + Task ExecuteAsync(int? database, string command, ICollection args, CommandFlags flags = CommandFlags.None); /// /// Delete all the keys of all databases on the server. /// /// The command flags to use. - /// https://redis.io/commands/flushall + /// void FlushAllDatabases(CommandFlags flags = CommandFlags.None); - /// - /// Delete all the keys of all databases on the server. - /// - /// The command flags to use. - /// https://redis.io/commands/flushall + /// Task FlushAllDatabasesAsync(CommandFlags flags = CommandFlags.None); /// @@ -315,19 +316,14 @@ public partial interface IServer : IRedis /// /// The database ID. /// The command flags to use. - /// https://redis.io/commands/flushdb + /// void FlushDatabase(int database = -1, CommandFlags flags = CommandFlags.None); - /// - /// Delete all the keys of the database. - /// - /// The database ID. - /// The command flags to use. - /// https://redis.io/commands/flushdb + /// Task FlushDatabaseAsync(int database = -1, CommandFlags flags = CommandFlags.None); /// - /// Get summary statistics associates with this server + /// Get summary statistics associates with this server. /// ServerCounters GetCounters(); @@ -336,47 +332,32 @@ public partial interface IServer : IRedis /// /// The info section to get, if getting a specific one. /// The command flags to use. - /// https://redis.io/commands/info - IGrouping>[] Info(RedisValue section = default(RedisValue), CommandFlags flags = CommandFlags.None); + /// A grouping of key/value pairs, grouped by their section header. + /// + IGrouping>[] Info(RedisValue section = default, CommandFlags flags = CommandFlags.None); - /// - /// The INFO command returns information and statistics about the server in a format that is simple to parse by computers and easy to read by humans. - /// - /// The info section to get, if getting a specific one. - /// The command flags to use. - /// https://redis.io/commands/info - Task>[]> InfoAsync(RedisValue section = default(RedisValue), CommandFlags flags = CommandFlags.None); + /// + Task>[]> InfoAsync(RedisValue section = default, CommandFlags flags = CommandFlags.None); /// /// The INFO command returns information and statistics about the server in a format that is simple to parse by computers and easy to read by humans. /// /// The info section to get, if getting a specific one. /// The command flags to use. - /// https://redis.io/commands/info - string InfoRaw(RedisValue section = default(RedisValue), CommandFlags flags = CommandFlags.None); + /// The entire raw INFO string. + /// + string? InfoRaw(RedisValue section = default, CommandFlags flags = CommandFlags.None); - /// - /// The INFO command returns information and statistics about the server in a format that is simple to parse by computers and easy to read by humans. - /// - /// The info section to get, if getting a specific one. - /// The command flags to use. - /// https://redis.io/commands/info - Task InfoRawAsync(RedisValue section = default(RedisValue), CommandFlags flags = CommandFlags.None); + /// + Task InfoRawAsync(RedisValue section = default, CommandFlags flags = CommandFlags.None); - /// - /// Returns all keys matching pattern; the KEYS or SCAN commands will be used based on the server capabilities. - /// - /// The database ID. - /// The pattern to use. - /// The page size to iterate by. - /// The command flags to use. - /// Warning: consider KEYS as a command that should only be used in production environments with extreme care. - /// https://redis.io/commands/keys - /// https://redis.io/commands/scan + /// IEnumerable Keys(int database, RedisValue pattern, int pageSize, CommandFlags flags); /// - /// Returns all keys matching pattern; the KEYS or SCAN commands will be used based on the server capabilities; note: to resume an iteration via cursor, cast the original enumerable or enumerator to IScanningCursor. + /// Returns all keys matching . + /// The KEYS or SCAN commands will be used based on the server capabilities. + /// Note: to resume an iteration via cursor, cast the original enumerable or enumerator to . /// /// The database ID. /// The pattern to use. @@ -384,254 +365,208 @@ public partial interface IServer : IRedis /// The cursor position to resume at. /// The page offset to start at. /// The command flags to use. - /// Warning: consider KEYS as a command that should only be used in production environments with extreme care. - /// https://redis.io/commands/keys - /// https://redis.io/commands/scan - IEnumerable Keys(int database = -1, RedisValue pattern = default(RedisValue), int pageSize = RedisBase.CursorUtils.DefaultLibraryPageSize, long cursor = RedisBase.CursorUtils.Origin, int pageOffset = 0, CommandFlags flags = CommandFlags.None); + /// An enumeration of matching redis keys. + /// + /// Warning: consider KEYS as a command that should only be used in production environments with extreme care. + /// + /// See + /// , + /// . + /// + /// + IEnumerable Keys(int database = -1, RedisValue pattern = default, int pageSize = RedisBase.CursorUtils.DefaultLibraryPageSize, long cursor = RedisBase.CursorUtils.Origin, int pageOffset = 0, CommandFlags flags = CommandFlags.None); - /// - /// Returns all keys matching pattern; the KEYS or SCAN commands will be used based on the server capabilities; note: to resume an iteration via cursor, cast the original enumerable or enumerator to IScanningCursor. - /// - /// The database ID. - /// The pattern to use. - /// The page size to iterate by. - /// The cursor position to resume at. - /// The page offset to start at. - /// The command flags to use. - /// Warning: consider KEYS as a command that should only be used in production environments with extreme care. - /// https://redis.io/commands/keys - /// https://redis.io/commands/scan - IAsyncEnumerable KeysAsync(int database = -1, RedisValue pattern = default(RedisValue), int pageSize = RedisBase.CursorUtils.DefaultLibraryPageSize, long cursor = RedisBase.CursorUtils.Origin, int pageOffset = 0, CommandFlags flags = CommandFlags.None); + /// + IAsyncEnumerable KeysAsync(int database = -1, RedisValue pattern = default, int pageSize = RedisBase.CursorUtils.DefaultLibraryPageSize, long cursor = RedisBase.CursorUtils.Origin, int pageOffset = 0, CommandFlags flags = CommandFlags.None); /// - /// Return the time of the last DB save executed with success. A client may check if a BGSAVE command succeeded reading the LASTSAVE value, then issuing a BGSAVE command and checking at regular intervals every N seconds if LASTSAVE changed. + /// Return the time of the last DB save executed with success. + /// A client may check if a BGSAVE command succeeded reading the LASTSAVE value, then issuing a BGSAVE command + /// and checking at regular intervals every N seconds if LASTSAVE changed. /// /// The command flags to use. - /// https://redis.io/commands/lastsave + /// The last time a save was performed. + /// DateTime LastSave(CommandFlags flags = CommandFlags.None); - /// - /// Return the time of the last DB save executed with success. A client may check if a BGSAVE command succeeded reading the LASTSAVE value, then issuing a BGSAVE command and checking at regular intervals every N seconds if LASTSAVE changed. - /// - /// The command flags to use. - /// https://redis.io/commands/lastsave + /// Task LastSaveAsync(CommandFlags flags = CommandFlags.None); + /// + [Obsolete("Please use " + nameof(MakePrimaryAsync) + ", this will be removed in 3.0.")] + [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] + void MakeMaster(ReplicationChangeOptions options, TextWriter? log = null); + /// - /// Promote the selected node to be master + /// Promote the selected node to be primary. /// /// The options to use for this topology change. /// The log to write output to. - void MakeMaster(ReplicationChangeOptions options, TextWriter log = null); + /// + Task MakePrimaryAsync(ReplicationChangeOptions options, TextWriter? log = null); /// /// Returns the role info for the current server. /// - /// https://redis.io/commands/role + /// Role Role(CommandFlags flags = CommandFlags.None); - /// - /// Returns the role info for the current server. - /// - /// https://redis.io/commands/role + /// Task RoleAsync(CommandFlags flags = CommandFlags.None); /// - /// Explicitly request the database to persist the current state to disk + /// Explicitly request the database to persist the current state to disk. /// /// The method of the save (e.g. background or foreground). /// The command flags to use. - /// https://redis.io/commands/bgrewriteaof - /// https://redis.io/commands/bgsave - /// https://redis.io/commands/save - /// https://redis.io/topics/persistence + /// + /// See + /// , + /// , + /// , + /// . + /// void Save(SaveType type, CommandFlags flags = CommandFlags.None); - /// - /// Explicitly request the database to persist the current state to disk - /// - /// The method of the save (e.g. background or foreground). - /// The command flags to use. - /// https://redis.io/commands/bgrewriteaof - /// https://redis.io/commands/bgsave - /// https://redis.io/commands/save - /// https://redis.io/topics/persistence + /// Task SaveAsync(SaveType type, CommandFlags flags = CommandFlags.None); /// - /// Indicates whether the specified script is defined on the server + /// Indicates whether the specified script is defined on the server. /// /// The text of the script to check for on the server. /// The command flags to use. + /// bool ScriptExists(string script, CommandFlags flags = CommandFlags.None); - /// - /// Indicates whether the specified script hash is defined on the server - /// - /// The SHA1 of the script to check for on the server. - /// The command flags to use. - bool ScriptExists(byte[] sha1, CommandFlags flags = CommandFlags.None); - - /// - /// Indicates whether the specified script is defined on the server - /// - /// The text of the script to check for on the server. - /// The command flags to use. + /// Task ScriptExistsAsync(string script, CommandFlags flags = CommandFlags.None); /// - /// Indicates whether the specified script hash is defined on the server + /// Indicates whether the specified script hash is defined on the server. /// /// The SHA1 of the script to check for on the server. /// The command flags to use. + /// + bool ScriptExists(byte[] sha1, CommandFlags flags = CommandFlags.None); + + /// Task ScriptExistsAsync(byte[] sha1, CommandFlags flags = CommandFlags.None); /// - /// Removes all cached scripts on this server + /// Removes all cached scripts on this server. /// /// The command flags to use. + /// void ScriptFlush(CommandFlags flags = CommandFlags.None); - /// - /// Removes all cached scripts on this server - /// - /// The command flags to use. + /// Task ScriptFlushAsync(CommandFlags flags = CommandFlags.None); /// - /// Explicitly defines a script on the server + /// Explicitly defines a script on the server. /// /// The script to load. /// The command flags to use. + /// The SHA1 of the loaded script. + /// byte[] ScriptLoad(string script, CommandFlags flags = CommandFlags.None); - /// - /// Explicitly defines a script on the server - /// - /// The script to load. - /// The command flags to use. - LoadedLuaScript ScriptLoad(LuaScript script, CommandFlags flags = CommandFlags.None); - - /// - /// Explicitly defines a script on the server - /// - /// The script to load. - /// The command flags to use. + /// Task ScriptLoadAsync(string script, CommandFlags flags = CommandFlags.None); /// - /// Explicitly defines a script on the server + /// Explicitly defines a script on the server. /// /// The script to load. /// The command flags to use. + /// The loaded script, ready for rapid reuse based on the SHA1. + /// + LoadedLuaScript ScriptLoad(LuaScript script, CommandFlags flags = CommandFlags.None); + + /// Task ScriptLoadAsync(LuaScript script, CommandFlags flags = CommandFlags.None); /// - /// Asks the redis server to shutdown, killing all connections. Please FULLY read the notes on the SHUTDOWN command. + /// Asks the redis server to shutdown, killing all connections. Please FULLY read the notes on the SHUTDOWN command. /// /// The mode of the shutdown. /// The command flags to use. - /// https://redis.io/commands/shutdown + /// void Shutdown(ShutdownMode shutdownMode = ShutdownMode.Default, CommandFlags flags = CommandFlags.None); - /// - /// The REPLICAOF command can change the replication settings of a replica on the fly. If a Redis server is already acting as replica, specifying a null master will turn off the replication, turning the Redis server into a MASTER. Specifying a non-null master will make the server a replica of another server listening at the specified hostname and port. - /// - /// Endpoint of the new master to replicate from. - /// The command flags to use. - /// https://redis.io/commands/replicaof - [Obsolete("Starting with Redis version 5, Redis has moved to 'replica' terminology. Please use " + nameof(ReplicaOf) + " instead.")] + /// + [Obsolete("Starting with Redis version 5, Redis has moved to 'replica' terminology. Please use " + nameof(ReplicaOfAsync) + " instead, this will be removed in 3.0.")] [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] void SlaveOf(EndPoint master, CommandFlags flags = CommandFlags.None); - /// - /// The REPLICAOF command can change the replication settings of a replica on the fly. If a Redis server is already acting as replica, specifying a null master will turn off the replication, turning the Redis server into a MASTER. Specifying a non-null master will make the server a replica of another server listening at the specified hostname and port. - /// - /// Endpoint of the new master to replicate from. - /// The command flags to use. - /// https://redis.io/commands/replicaof - void ReplicaOf(EndPoint master, CommandFlags flags = CommandFlags.None); - - /// - /// The REPLICAOF command can change the replication settings of a replica on the fly. If a Redis server is already acting as replica, specifying a null master will turn off the replication, turning the Redis server into a MASTER. Specifying a non-null master will make the server a replica of another server listening at the specified hostname and port. - /// - /// Endpoint of the new master to replicate from. - /// The command flags to use. - /// https://redis.io/commands/replicaof - [Obsolete("Starting with Redis version 5, Redis has moved to 'replica' terminology. Please use " + nameof(ReplicaOfAsync) + " instead.")] + /// + [Obsolete("Starting with Redis version 5, Redis has moved to 'replica' terminology. Please use " + nameof(ReplicaOfAsync) + " instead, this will be removed in 3.0.")] [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] Task SlaveOfAsync(EndPoint master, CommandFlags flags = CommandFlags.None); + /// + [Obsolete("Please use " + nameof(ReplicaOfAsync) + ", this will be removed in 3.0.")] + [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] + void ReplicaOf(EndPoint master, CommandFlags flags = CommandFlags.None); + /// - /// The REPLICAOF command can change the replication settings of a replica on the fly. If a Redis server is already acting as replica, specifying a null master will turn off the replication, turning the Redis server into a MASTER. Specifying a non-null master will make the server a replica of another server listening at the specified hostname and port. + /// The REPLICAOF command can change the replication settings of a replica on the fly. + /// If a Redis server is already acting as replica, specifying a null primary will turn off the replication, + /// turning the Redis server into a PRIMARY. Specifying a non-null primary will make the server a replica of + /// another server listening at the specified hostname and port. /// - /// Endpoint of the new master to replicate from. + /// Endpoint of the new primary to replicate from. /// The command flags to use. - /// https://redis.io/commands/replicaof + /// Task ReplicaOfAsync(EndPoint master, CommandFlags flags = CommandFlags.None); /// - /// To read the slow log the SLOWLOG GET command is used, that returns every entry in the slow log. It is possible to return only the N most recent entries passing an additional argument to the command (for instance SLOWLOG GET 10). + /// To read the slow log the SLOWLOG GET command is used, that returns every entry in the slow log. + /// It is possible to return only the N most recent entries passing an additional argument to the command (for instance SLOWLOG GET 10). /// /// The count of items to get. /// The command flags to use. - /// https://redis.io/commands/slowlog + /// The slow command traces as recorded by the Redis server. + /// CommandTrace[] SlowlogGet(int count = 0, CommandFlags flags = CommandFlags.None); - /// - /// To read the slow log the SLOWLOG GET command is used, that returns every entry in the slow log. It is possible to return only the N most recent entries passing an additional argument to the command (for instance SLOWLOG GET 10). - /// - /// The count of items to get. - /// The command flags to use. - /// https://redis.io/commands/slowlog + /// Task SlowlogGetAsync(int count = 0, CommandFlags flags = CommandFlags.None); /// /// You can reset the slow log using the SLOWLOG RESET command. Once deleted the information is lost forever. /// /// The command flags to use. - /// https://redis.io/commands/slowlog + /// void SlowlogReset(CommandFlags flags = CommandFlags.None); - /// - /// You can reset the slow log using the SLOWLOG RESET command. Once deleted the information is lost forever. - /// - /// The command flags to use. - /// https://redis.io/commands/slowlog + /// Task SlowlogResetAsync(CommandFlags flags = CommandFlags.None); /// - /// Lists the currently active channels. An active channel is a Pub/Sub channel with one ore more subscribers (not including clients subscribed to patterns). + /// Lists the currently active channels. + /// An active channel is a Pub/Sub channel with one ore more subscribers (not including clients subscribed to patterns). /// /// The channel name pattern to get channels for. /// The command flags to use. /// a list of active channels, optionally matching the specified pattern. - /// https://redis.io/commands/pubsub - RedisChannel[] SubscriptionChannels(RedisChannel pattern = default(RedisChannel), CommandFlags flags = CommandFlags.None); + /// + RedisChannel[] SubscriptionChannels(RedisChannel pattern = default, CommandFlags flags = CommandFlags.None); - /// - /// Lists the currently active channels. An active channel is a Pub/Sub channel with one ore more subscribers (not including clients subscribed to patterns). - /// - /// The channel name pattern to get channels for. - /// The command flags to use. - /// a list of active channels, optionally matching the specified pattern. - /// https://redis.io/commands/pubsub - Task SubscriptionChannelsAsync(RedisChannel pattern = default(RedisChannel), CommandFlags flags = CommandFlags.None); + /// + Task SubscriptionChannelsAsync(RedisChannel pattern = default, CommandFlags flags = CommandFlags.None); /// - /// Returns the number of subscriptions to patterns (that are performed using the PSUBSCRIBE command). Note that this is not just the count of clients subscribed to patterns but the total number of patterns all the clients are subscribed to. + /// Returns the number of subscriptions to patterns (that are performed using the PSUBSCRIBE command). + /// Note that this is not just the count of clients subscribed to patterns but the total number of patterns all the clients are subscribed to. /// /// The command flags to use. /// the number of patterns all the clients are subscribed to. - /// https://redis.io/commands/pubsub + /// long SubscriptionPatternCount(CommandFlags flags = CommandFlags.None); - /// - /// Returns the number of subscriptions to patterns (that are performed using the PSUBSCRIBE command). Note that this is not just the count of clients subscribed to patterns but the total number of patterns all the clients are subscribed to. - /// - /// The command flags to use. - /// the number of patterns all the clients are subscribed to. - /// https://redis.io/commands/pubsub + /// Task SubscriptionPatternCountAsync(CommandFlags flags = CommandFlags.None); /// @@ -639,421 +574,248 @@ public partial interface IServer : IRedis /// /// The channel to get a subscriber count for. /// The command flags to use. - /// https://redis.io/commands/pubsub + /// The number of subscribers on this server. + /// long SubscriptionSubscriberCount(RedisChannel channel, CommandFlags flags = CommandFlags.None); - /// - /// Returns the number of subscribers (not counting clients subscribed to patterns) for the specified channel. - /// - /// The channel to get a subscriber count for. - /// The command flags to use. - /// https://redis.io/commands/pubsub + /// Task SubscriptionSubscriberCountAsync(RedisChannel channel, CommandFlags flags = CommandFlags.None); /// - /// Swaps two Redis databases, so that immediately all the clients connected to a given database will see the data of the other database, and the other way around + /// Swaps two Redis databases, so that immediately all the clients connected to a given database will see the data of the other database, and the other way around. /// /// The ID of the first database. /// The ID of the second database. /// The command flags to use. - /// https://redis.io/commands/swapdb + /// void SwapDatabases(int first, int second, CommandFlags flags = CommandFlags.None); - /// - /// Swaps two Redis databases, so that immediately all the clients connected to a given database will see the data of the other database, and the other way around - /// - /// The ID of the first database. - /// The ID of the second database. - /// The command flags to use. - /// https://redis.io/commands/swapdb + /// Task SwapDatabasesAsync(int first, int second, CommandFlags flags = CommandFlags.None); /// - /// The TIME command returns the current server time in UTC format. - /// Use the DateTime.ToLocalTime() method to get local time. + /// The TIME command returns the current server time in UTC format. + /// Use the method to get local time. /// /// The command flags to use. /// The server's current time. - /// https://redis.io/commands/time + /// DateTime Time(CommandFlags flags = CommandFlags.None); - /// - /// The TIME command returns the current server time in UTC format. - /// Use the DateTime.ToLocalTime() method to get local time. - /// - /// The command flags to use. - /// The server's current time. - /// https://redis.io/commands/time + /// Task TimeAsync(CommandFlags flags = CommandFlags.None); /// - /// Gets a text-based latency diagnostic - /// - /// https://redis.io/topics/latency-monitor - Task LatencyDoctorAsync(CommandFlags flags = CommandFlags.None); - /// - /// Gets a text-based latency diagnostic + /// Gets a text-based latency diagnostic. /// - /// https://redis.io/topics/latency-monitor + /// The full text result of latency doctor. + /// + /// See + /// , + /// . + /// string LatencyDoctor(CommandFlags flags = CommandFlags.None); + /// + Task LatencyDoctorAsync(CommandFlags flags = CommandFlags.None); + /// /// Resets the given events (or all if none are specified), discarding the currently logged latency spike events, and resetting the maximum event time register. /// - /// https://redis.io/topics/latency-monitor - Task LatencyResetAsync(string[] eventNames = null, CommandFlags flags = CommandFlags.None); - /// - /// Resets the given events (or all if none are specified), discarding the currently logged latency spike events, and resetting the maximum event time register. - /// - /// https://redis.io/topics/latency-monitor - long LatencyReset(string[] eventNames = null, CommandFlags flags = CommandFlags.None); + /// The number of events that were reset. + /// + /// See + /// , + /// . + /// + long LatencyReset(string[]? eventNames = null, CommandFlags flags = CommandFlags.None); + + /// + Task LatencyResetAsync(string[]? eventNames = null, CommandFlags flags = CommandFlags.None); /// - /// Fetch raw latency data from the event time series, as timestamp-latency pairs - /// - /// https://redis.io/topics/latency-monitor - Task LatencyHistoryAsync(string eventName, CommandFlags flags = CommandFlags.None); - /// - /// Fetch raw latency data from the event time series, as timestamp-latency pairs + /// Fetch raw latency data from the event time series, as timestamp-latency pairs. /// - /// https://redis.io/topics/latency-monitor + /// An array of latency history entries. + /// + /// See + /// , + /// . + /// LatencyHistoryEntry[] LatencyHistory(string eventName, CommandFlags flags = CommandFlags.None); + /// + Task LatencyHistoryAsync(string eventName, CommandFlags flags = CommandFlags.None); + /// - /// Fetch raw latency data from the event time series, as timestamp-latency pairs - /// - /// https://redis.io/topics/latency-monitor - Task LatencyLatestAsync(CommandFlags flags = CommandFlags.None); - /// - /// Fetch raw latency data from the event time series, as timestamp-latency pairs + /// Fetch raw latency data from the event time series, as timestamp-latency pairs. /// - /// https://redis.io/topics/latency-monitor + /// An array of the latest latency history entries. + /// + /// See + /// , + /// . + /// LatencyLatestEntry[] LatencyLatest(CommandFlags flags = CommandFlags.None); - /// - /// Reports about different memory-related issues that the Redis server experiences, and advises about possible remedies. - /// - /// https://redis.io/commands/memory-doctor - Task MemoryDoctorAsync(CommandFlags flags = CommandFlags.None); + /// + Task LatencyLatestAsync(CommandFlags flags = CommandFlags.None); /// /// Reports about different memory-related issues that the Redis server experiences, and advises about possible remedies. /// - /// https://redis.io/commands/memory-doctor + /// The full text result of memory doctor. + /// string MemoryDoctor(CommandFlags flags = CommandFlags.None); - /// - /// Attempts to purge dirty pages so these can be reclaimed by the allocator. - /// - /// https://redis.io/commands/memory-purge - Task MemoryPurgeAsync(CommandFlags flags = CommandFlags.None); + /// + Task MemoryDoctorAsync(CommandFlags flags = CommandFlags.None); /// /// Attempts to purge dirty pages so these can be reclaimed by the allocator. /// - /// https://redis.io/commands/memory-purge + /// void MemoryPurge(CommandFlags flags = CommandFlags.None); - /// - /// Returns an array reply about the memory usage of the server. - /// - /// https://redis.io/commands/memory-stats - Task MemoryStatsAsync(CommandFlags flags = CommandFlags.None); + /// + Task MemoryPurgeAsync(CommandFlags flags = CommandFlags.None); /// /// Returns an array reply about the memory usage of the server. /// - /// https://redis.io/commands/memory-stats + /// An array reply of memory stat metrics and values. + /// RedisResult MemoryStats(CommandFlags flags = CommandFlags.None); - /// - /// Provides an internal statistics report from the memory allocator. - /// - /// https://redis.io/commands/memory-malloc-stats - Task MemoryAllocatorStatsAsync(CommandFlags flags = CommandFlags.None); + /// + Task MemoryStatsAsync(CommandFlags flags = CommandFlags.None); /// /// Provides an internal statistics report from the memory allocator. /// - /// https://redis.io/commands/memory-malloc-stats - string MemoryAllocatorStats(CommandFlags flags = CommandFlags.None); + /// The full text result of memory allocation stats. + /// + string? MemoryAllocatorStats(CommandFlags flags = CommandFlags.None); - #region Sentinel + /// + Task MemoryAllocatorStatsAsync(CommandFlags flags = CommandFlags.None); /// - /// Returns the ip and port number of the master with that name. - /// If a failover is in progress or terminated successfully for this master it returns the address and port of the promoted replica. + /// Returns the IP and port number of the primary with that name. + /// If a failover is in progress or terminated successfully for this primary it returns the address and port of the promoted replica. /// /// The sentinel service name. /// The command flags to use. - /// the master ip and port - /// https://redis.io/topics/sentinel - EndPoint SentinelGetMasterAddressByName(string serviceName, CommandFlags flags = CommandFlags.None); + /// The primary IP and port. + /// + EndPoint? SentinelGetMasterAddressByName(string serviceName, CommandFlags flags = CommandFlags.None); - /// - /// Returns the ip and port number of the master with that name. - /// If a failover is in progress or terminated successfully for this master it returns the address and port of the promoted replica. - /// - /// The sentinel service name. - /// The command flags to use. - /// the master ip and port - /// https://redis.io/topics/sentinel - Task SentinelGetMasterAddressByNameAsync(string serviceName, CommandFlags flags = CommandFlags.None); + /// + Task SentinelGetMasterAddressByNameAsync(string serviceName, CommandFlags flags = CommandFlags.None); /// - /// Returns the ip and port numbers of all known Sentinels - /// for the given service name. + /// Returns the IP and port numbers of all known Sentinels for the given service name. /// - /// the sentinel service name + /// The sentinel service name. /// The command flags to use. - /// a list of the sentinel ips and ports + /// A list of the sentinel IPs and ports. + /// EndPoint[] SentinelGetSentinelAddresses(string serviceName, CommandFlags flags = CommandFlags.None); - /// - /// Returns the ip and port numbers of all known Sentinels - /// for the given service name. - /// - /// the sentinel service name - /// The command flags to use. - /// a list of the sentinel ips and ports + /// Task SentinelGetSentinelAddressesAsync(string serviceName, CommandFlags flags = CommandFlags.None); /// - /// Returns the ip and port numbers of all known Sentinel replicas - /// for the given service name. + /// Returns the IP and port numbers of all known Sentinel replicas for the given service name. /// - /// the sentinel service name + /// The sentinel service name. /// The command flags to use. - /// a list of the replica ips and ports + /// A list of the replica IPs and ports. + /// EndPoint[] SentinelGetReplicaAddresses(string serviceName, CommandFlags flags = CommandFlags.None); - /// - /// Returns the ip and port numbers of all known Sentinel replicas - /// for the given service name. - /// - /// the sentinel service name - /// The command flags to use. - /// a list of the replica ips and ports + /// Task SentinelGetReplicaAddressesAsync(string serviceName, CommandFlags flags = CommandFlags.None); /// - /// Show the state and info of the specified master. + /// Show the state and info of the specified primary. /// /// The sentinel service name. /// The command flags to use. - /// the master state as KeyValuePairs - /// https://redis.io/topics/sentinel + /// The primaries state as KeyValuePairs. + /// KeyValuePair[] SentinelMaster(string serviceName, CommandFlags flags = CommandFlags.None); - /// - /// Show the state and info of the specified master. - /// - /// The sentinel service name. - /// The command flags to use. - /// the master state as KeyValuePairs - /// https://redis.io/topics/sentinel + /// Task[]> SentinelMasterAsync(string serviceName, CommandFlags flags = CommandFlags.None); /// - /// Show a list of monitored masters and their state. + /// Show a list of monitored primaries and their state. /// /// The command flags to use. - /// an array of master state KeyValuePair arrays - /// https://redis.io/topics/sentinel + /// An array of primaries state KeyValuePair arrays. + /// KeyValuePair[][] SentinelMasters(CommandFlags flags = CommandFlags.None); - /// - /// Show a list of monitored masters and their state. - /// - /// The command flags to use. - /// an array of master state KeyValuePair arrays - /// https://redis.io/topics/sentinel + /// Task[][]> SentinelMastersAsync(CommandFlags flags = CommandFlags.None); - /// - /// Show a list of replicas for this master, and their state. - /// - /// The sentinel service name. - /// The command flags to use. - /// an array of replica state KeyValuePair arrays - /// https://redis.io/topics/sentinel - [Obsolete("Starting with Redis version 5, Redis has moved to 'replica' terminology. Please use " + nameof(SentinelReplicas) + " instead.")] + /// + [Obsolete("Starting with Redis version 5, Redis has moved to 'replica' terminology. Please use " + nameof(SentinelReplicas) + " instead, this will be removed in 3.0.")] [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] KeyValuePair[][] SentinelSlaves(string serviceName, CommandFlags flags = CommandFlags.None); - /// - /// Show a list of replicas for this master, and their state. - /// - /// The sentinel service name. - /// The command flags to use. - /// an array of replica state KeyValuePair arrays - /// https://redis.io/topics/sentinel - KeyValuePair[][] SentinelReplicas(string serviceName, CommandFlags flags = CommandFlags.None); - - /// - /// Show a list of replicas for this master, and their state. - /// - /// The sentinel service name. - /// The command flags to use. - /// an array of replica state KeyValuePair arrays - /// https://redis.io/topics/sentinel - [Obsolete("Starting with Redis version 5, Redis has moved to 'replica' terminology. Please use " + nameof(SentinelReplicasAsync) + " instead.")] + /// + [Obsolete("Starting with Redis version 5, Redis has moved to 'replica' terminology. Please use " + nameof(SentinelReplicasAsync) + " instead, this will be removed in 3.0.")] [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] Task[][]> SentinelSlavesAsync(string serviceName, CommandFlags flags = CommandFlags.None); /// - /// Show a list of replicas for this master, and their state. + /// Show a list of replicas for this primary, and their state. /// /// The sentinel service name. /// The command flags to use. - /// an array of replica state KeyValuePair arrays - /// https://redis.io/topics/sentinel + /// An array of replica state KeyValuePair arrays. + /// + KeyValuePair[][] SentinelReplicas(string serviceName, CommandFlags flags = CommandFlags.None); + + /// Task[][]> SentinelReplicasAsync(string serviceName, CommandFlags flags = CommandFlags.None); /// - /// Force a failover as if the master was not reachable, and without asking for agreement to other Sentinels + /// Force a failover as if the primary was not reachable, and without asking for agreement to other Sentinels /// (however a new version of the configuration will be published so that the other Sentinels will update their configurations). /// /// The sentinel service name. /// The command flags to use. - /// https://redis.io/topics/sentinel + /// void SentinelFailover(string serviceName, CommandFlags flags = CommandFlags.None); - /// - /// Force a failover as if the master was not reachable, and without asking for agreement to other Sentinels - /// (however a new version of the configuration will be published so that the other Sentinels will update their configurations). - /// - /// The sentinel service name. - /// The command flags to use. - /// https://redis.io/topics/sentinel + /// Task SentinelFailoverAsync(string serviceName, CommandFlags flags = CommandFlags.None); /// - /// Show a list of sentinels for a master, and their state. + /// Show a list of sentinels for a primary, and their state. /// /// The sentinel service name. /// The command flags to use. - /// https://redis.io/topics/sentinel + /// KeyValuePair[][] SentinelSentinels(string serviceName, CommandFlags flags = CommandFlags.None); - /// - /// Show a list of sentinels for a master, and their state. - /// - /// The sentinel service name. - /// The command flags to use. - /// https://redis.io/topics/sentinel + /// Task[][]> SentinelSentinelsAsync(string serviceName, CommandFlags flags = CommandFlags.None); - - #endregion - } - - /// - /// A latency entry as reported by the built-in LATENCY HISTORY command - /// - public readonly struct LatencyHistoryEntry - { - internal static readonly ResultProcessor ToArray = new Processor(); - - private sealed class Processor : ArrayResultProcessor - { - protected override bool TryParse(in RawResult raw, out LatencyHistoryEntry parsed) - { - if (raw.Type == ResultType.MultiBulk) - { - var items = raw.GetItems(); - if (items.Length >= 2 - && items[0].TryGetInt64(out var timestamp) - && items[1].TryGetInt64(out var duration)) - { - parsed = new LatencyHistoryEntry(timestamp, duration); - return true; - } - } - parsed = default; - return false; - } - } - - /// - /// The time at which this entry was recorded - /// - public DateTime Timestamp { get; } - - /// - /// The latency recorded for this event - /// - public int DurationMilliseconds { get; } - - internal LatencyHistoryEntry(long timestamp, long duration) - { - Timestamp = RedisBase.UnixEpoch.AddSeconds(timestamp); - DurationMilliseconds = checked((int)duration); - } - } - - /// - /// A latency entry as reported by the built-in LATENCY LATEST command - /// - public readonly struct LatencyLatestEntry - { - internal static readonly ResultProcessor ToArray = new Processor(); - - private sealed class Processor : ArrayResultProcessor - { - protected override bool TryParse(in RawResult raw, out LatencyLatestEntry parsed) - { - if (raw.Type == ResultType.MultiBulk) - { - var items = raw.GetItems(); - if (items.Length >= 4 - && items[1].TryGetInt64(out var timestamp) - && items[2].TryGetInt64(out var duration) - && items[3].TryGetInt64(out var maxDuration)) - { - parsed = new LatencyLatestEntry(items[0].GetString(), timestamp, duration, maxDuration); - return true; - } - } - parsed = default; - return false; - } - } - - /// - /// The name of this event - /// - public string EventName { get; } - - /// - /// The time at which this entry was recorded - /// - public DateTime Timestamp { get; } - - /// - /// The latency recorded for this event - /// - public int DurationMilliseconds { get; } - - /// - /// The max latency recorded for all events - /// - public int MaxDurationMilliseconds { get; } - - internal LatencyLatestEntry(string eventName, long timestamp, long duration, long maxDuration) - { - EventName = eventName; - Timestamp = RedisBase.UnixEpoch.AddSeconds(timestamp); - DurationMilliseconds = checked((int)duration); - MaxDurationMilliseconds = checked((int)maxDuration); - } } internal static class IServerExtensions { /// - /// For testing only: Break the connection without mercy or thought + /// For testing only: Break the connection without mercy or thought. /// /// The server to simulate failure on. - public static void SimulateConnectionFailure(this IServer server) => (server as RedisServer)?.SimulateConnectionFailure(); + /// The type of failure(s) to simulate. + internal static void SimulateConnectionFailure(this IServer server, SimulatedFailureType failureType) => (server as RedisServer)?.SimulateConnectionFailure(failureType); + + // ReSharper disable once ConditionIsAlwaysTrueOrFalseAccordingToNullableAPIContract + internal static bool CanSimulateConnectionFailure(this IServer server) => server is not null; // this changes in v3 } } diff --git a/src/StackExchange.Redis/Interfaces/ISubscriber.cs b/src/StackExchange.Redis/Interfaces/ISubscriber.cs index b479a8d8d..a9c0bf298 100644 --- a/src/StackExchange.Redis/Interfaces/ISubscriber.cs +++ b/src/StackExchange.Redis/Interfaces/ISubscriber.cs @@ -5,32 +5,29 @@ namespace StackExchange.Redis { /// - /// A redis connection used as the subscriber in a pub/sub scenario + /// A redis connection used as the subscriber in a pub/sub scenario. /// public interface ISubscriber : IRedis { /// - /// Indicate exactly which redis server we are talking to + /// Indicate exactly which redis server we are talking to. /// /// The channel to identify the server endpoint by. /// The command flags to use. - EndPoint IdentifyEndpoint(RedisChannel channel, CommandFlags flags = CommandFlags.None); + EndPoint? IdentifyEndpoint(RedisChannel channel, CommandFlags flags = CommandFlags.None); - /// - /// Indicate exactly which redis server we are talking to - /// - /// The channel to identify the server endpoint by. - /// The command flags to use. - Task IdentifyEndpointAsync(RedisChannel channel, CommandFlags flags = CommandFlags.None); + /// + Task IdentifyEndpointAsync(RedisChannel channel, CommandFlags flags = CommandFlags.None); /// - /// Indicates whether the instance can communicate with the server; - /// if a channel is specified, the existing subscription map is queried to + /// Indicates whether the instance can communicate with the server. + /// If a channel is specified, the existing subscription map is queried to /// resolve the server responsible for that subscription - otherwise the - /// server is chosen arbitrarily from the masters. + /// server is chosen arbitrarily from the primaries. /// /// The channel to identify the server endpoint by. - bool IsConnected(RedisChannel channel = default(RedisChannel)); + /// if connected, otherwise. + bool IsConnected(RedisChannel channel = default); /// /// Posts a message to the given channel. @@ -38,18 +35,14 @@ public interface ISubscriber : IRedis /// The channel to publish to. /// The message to publish. /// The command flags to use. - /// the number of clients that received the message. - /// https://redis.io/commands/publish + /// + /// The number of clients that received the message *on the destination server*, + /// note that this doesn't mean much in a cluster as clients can get the message through other nodes. + /// + /// long Publish(RedisChannel channel, RedisValue message, CommandFlags flags = CommandFlags.None); - /// - /// Posts a message to the given channel. - /// - /// The channel to publish to. - /// The message to publish. - /// The command flags to use. - /// the number of clients that received the message. - /// https://redis.io/commands/publish + /// Task PublishAsync(RedisChannel channel, RedisValue message, CommandFlags flags = CommandFlags.None); /// @@ -58,85 +51,70 @@ public interface ISubscriber : IRedis /// The channel to subscribe to. /// The handler to invoke when a message is received on . /// The command flags to use. - /// https://redis.io/commands/subscribe - /// https://redis.io/commands/psubscribe + /// + /// See + /// , + /// . + /// void Subscribe(RedisChannel channel, Action handler, CommandFlags flags = CommandFlags.None); + /// + Task SubscribeAsync(RedisChannel channel, Action handler, CommandFlags flags = CommandFlags.None); + /// /// Subscribe to perform some operation when a message to the preferred/active node is broadcast, as a queue that guarantees ordered handling. /// /// The redis channel to subscribe to. /// The command flags to use. - /// A channel that represents this source - /// https://redis.io/commands/subscribe - /// https://redis.io/commands/psubscribe + /// A channel that represents this source. + /// + /// See + /// , + /// . + /// ChannelMessageQueue Subscribe(RedisChannel channel, CommandFlags flags = CommandFlags.None); - /// - /// Subscribe to perform some operation when a change to the preferred/active node is broadcast. - /// - /// The channel to subscribe to. - /// The handler to invoke when a message is received on . - /// The command flags to use. - /// https://redis.io/commands/subscribe - /// https://redis.io/commands/psubscribe - Task SubscribeAsync(RedisChannel channel, Action handler, CommandFlags flags = CommandFlags.None); - - /// - /// Subscribe to perform some operation when a change to the preferred/active node is broadcast, as a channel. - /// - /// The redis channel to subscribe to. - /// The command flags to use. - /// A channel that represents this source - /// https://redis.io/commands/subscribe - /// https://redis.io/commands/psubscribe + /// Task SubscribeAsync(RedisChannel channel, CommandFlags flags = CommandFlags.None); /// - /// Indicate to which redis server we are actively subscribed for a given channel; returns null if - /// the channel is not actively subscribed + /// Indicate to which redis server we are actively subscribed for a given channel. /// /// The channel to check which server endpoint was subscribed on. - EndPoint SubscribedEndpoint(RedisChannel channel); + /// The subscribed endpoint for the given , if the channel is not actively subscribed. + EndPoint? SubscribedEndpoint(RedisChannel channel); /// - /// Unsubscribe from a specified message channel; note; if no handler is specified, the subscription is cancelled regardless - /// of the subscribers; if a handler is specified, the subscription is only cancelled if this handler is the - /// last handler remaining against the channel + /// Unsubscribe from a specified message channel. + /// Note: if no handler is specified, the subscription is canceled regardless of the subscribers. + /// If a handler is specified, the subscription is only canceled if this handler is the last handler remaining against the channel. /// /// The channel that was subscribed to. /// The handler to no longer invoke when a message is received on . /// The command flags to use. - /// https://redis.io/commands/unsubscribe - /// https://redis.io/commands/punsubscribe - void Unsubscribe(RedisChannel channel, Action handler = null, CommandFlags flags = CommandFlags.None); + /// + /// See + /// , + /// . + /// + void Unsubscribe(RedisChannel channel, Action? handler = null, CommandFlags flags = CommandFlags.None); + + /// + Task UnsubscribeAsync(RedisChannel channel, Action? handler = null, CommandFlags flags = CommandFlags.None); /// - /// Unsubscribe all subscriptions on this instance + /// Unsubscribe all subscriptions on this instance. /// /// The command flags to use. - /// https://redis.io/commands/unsubscribe - /// https://redis.io/commands/punsubscribe + /// + /// See + /// , + /// . + /// . + /// void UnsubscribeAll(CommandFlags flags = CommandFlags.None); - /// - /// Unsubscribe all subscriptions on this instance - /// - /// The command flags to use. - /// https://redis.io/commands/unsubscribe - /// https://redis.io/commands/punsubscribe + /// Task UnsubscribeAllAsync(CommandFlags flags = CommandFlags.None); - - /// - /// Unsubscribe from a specified message channel; note; if no handler is specified, the subscription is cancelled regardless - /// of the subscribers; if a handler is specified, the subscription is only cancelled if this handler is the - /// last handler remaining against the channel - /// - /// The channel that was subscribed to. - /// The handler to no longer invoke when a message is received on . - /// The command flags to use. - /// https://redis.io/commands/unsubscribe - /// https://redis.io/commands/punsubscribe - Task UnsubscribeAsync(RedisChannel channel, Action handler = null, CommandFlags flags = CommandFlags.None); } } diff --git a/src/StackExchange.Redis/Interfaces/ITransaction.cs b/src/StackExchange.Redis/Interfaces/ITransaction.cs index 230405282..21c66968a 100644 --- a/src/StackExchange.Redis/Interfaces/ITransaction.cs +++ b/src/StackExchange.Redis/Interfaces/ITransaction.cs @@ -5,18 +5,19 @@ namespace StackExchange.Redis /// /// Represents a group of operations that will be sent to the server as a single unit, /// and processed on the server as a single unit. Transactions can also include constraints - /// (implemented via WATCH), but note that constraint checking involves will (very briefly) - /// block the connection, since the transaction cannot be correctly committed (EXEC), - /// aborted (DISCARD) or not applied in the first place (UNWATCH) until the responses from + /// (implemented via WATCH), but note that constraint checking involves will (very briefly) + /// block the connection, since the transaction cannot be correctly committed (EXEC), + /// aborted (DISCARD) or not applied in the first place (UNWATCH) until the responses from /// the constraint checks have arrived. /// - /// https://redis.io/topics/transactions - /// Note that on a cluster, it may be required that all keys involved in the transaction - /// (including constraints) are in the same hash-slot + /// + /// Note that on a cluster, it may be required that all keys involved in the transaction (including constraints) are in the same hash-slot. + /// + /// public interface ITransaction : IBatch { /// - /// Adds a precondition for this transaction + /// Adds a precondition for this transaction. /// /// The condition to add to the transaction. ConditionResult AddCondition(Condition condition); diff --git a/src/StackExchange.Redis/InternalErrorEventArgs.cs b/src/StackExchange.Redis/InternalErrorEventArgs.cs index 9c613ed79..f664f4d62 100644 --- a/src/StackExchange.Redis/InternalErrorEventArgs.cs +++ b/src/StackExchange.Redis/InternalErrorEventArgs.cs @@ -5,13 +5,13 @@ namespace StackExchange.Redis { /// - /// Describes internal errors (mainly intended for debugging) + /// Describes internal errors (mainly intended for debugging). /// public class InternalErrorEventArgs : EventArgs, ICompletable { - private readonly EventHandler handler; + private readonly EventHandler? handler; private readonly object sender; - internal InternalErrorEventArgs(EventHandler handler, object sender, EndPoint endpoint, ConnectionType connectionType, Exception exception, string origin) + internal InternalErrorEventArgs(EventHandler? handler, object sender, EndPoint? endpoint, ConnectionType connectionType, Exception exception, string? origin) { this.handler = handler; this.sender = sender; @@ -25,39 +25,42 @@ internal InternalErrorEventArgs(EventHandler handler, ob /// This constructor is only for testing purposes. /// /// The source of the event. - /// + /// The endpoint (if any) involved in the event. /// Redis connection type. - /// The exception occured. + /// The exception that occurred. /// Origin. public InternalErrorEventArgs(object sender, EndPoint endpoint, ConnectionType connectionType, Exception exception, string origin) - : this (null, sender, endpoint, connectionType, exception, origin) + : this(null, sender, endpoint, connectionType, exception, origin) { } /// - /// Gets the connection-type of the failing connection + /// Gets the connection-type of the failing connection. /// public ConnectionType ConnectionType { get; } /// - /// Gets the failing server-endpoint (this can be null) + /// Gets the failing server-endpoint (this can be null). /// - public EndPoint EndPoint { get; } + public EndPoint? EndPoint { get; } /// - /// Gets the exception if available (this can be null) + /// Gets the exception if available (this can be null). /// public Exception Exception { get; } /// - /// The underlying origin of the error + /// The underlying origin of the error. /// - public string Origin { get; } + public string? Origin { get; } void ICompletable.AppendStormLog(StringBuilder sb) { sb.Append("event, internal-error: ").Append(Origin); - if (EndPoint != null) sb.Append(", ").Append(Format.ToString(EndPoint)); + if (EndPoint != null) + { + sb.Append(", ").Append(Format.ToString(EndPoint)); + } } bool ICompletable.TryComplete(bool isAsync) => ConnectionMultiplexer.TryCompleteHandler(handler, sender, this, isAsync); diff --git a/src/StackExchange.Redis/KeyNotification.cs b/src/StackExchange.Redis/KeyNotification.cs new file mode 100644 index 000000000..08c157bc6 --- /dev/null +++ b/src/StackExchange.Redis/KeyNotification.cs @@ -0,0 +1,495 @@ +using System; +using System.Buffers; +using System.Buffers.Text; +using System.Diagnostics; +using System.Runtime.CompilerServices; +using System.Text; +using RESPite; +using static StackExchange.Redis.KeyNotificationChannels; +namespace StackExchange.Redis; + +/// +/// Represents keyspace and keyevent notifications, with utility methods for accessing the component data. Additionally, +/// since notifications can be high volume, a range of utility APIs is provided for avoiding allocations, in particular +/// to assist in filtering and inspecting the key without performing string allocations and substring operations. +/// In particular, note that this allows use with the alt-lookup (span-based) APIs on dictionaries. +/// +public readonly ref struct KeyNotification +{ + // effectively we just wrap a channel, but: we've pre-validated that things make sense + private readonly RedisChannel _channel; + private readonly RedisValue _value; + private readonly int _keyOffset; // used to efficiently strip key prefixes + + // this type has been designed with the intent of being able to move the entire thing alloc-free in some future + // high-throughput callback, potentially with a ReadOnlySpan field for the key fragment; this is + // not implemented currently, but is why this is a ref struct + + /// + /// If the channel is either a keyspace or keyevent notification, resolve the key and event type. + /// + public static bool TryParse(scoped in RedisChannel channel, scoped in RedisValue value, out KeyNotification notification) + { + // validate that it looks reasonable + var span = channel.Span; + + // KeySpaceStart and KeyEventStart are the same size, see KeyEventPrefix_KeySpacePrefix_Length_Matches + if (span.Length >= KeySpacePrefix.Length + MinSuffixBytes) + { + // check that the prefix is valid, i.e. "__keyspace@" or "__keyevent@" + var prefix = span.Slice(0, KeySpacePrefix.Length); + var hashCS = AsciiHash.HashCS(prefix); + switch (hashCS) + { + case KeyEventPrefix.HashCS when KeyEventPrefix.IsCS(prefix, hashCS): + case KeySpacePrefix.HashCS when KeySpacePrefix.IsCS(prefix, hashCS): + // check that there is *something* non-empty after the prefix, with __: as the suffix (we don't verify *what*) + if (span.Slice(KeySpacePrefix.Length).IndexOf("__:"u8) > 0) + { + notification = new KeyNotification(in channel, in value); + return true; + } + + break; + } + } + + notification = default; + return false; + } + + /// + /// If the channel is either a keyspace or keyevent notification *with the requested prefix*, resolve the key and event type, + /// and remove the prefix when reading the key. + /// + public static bool TryParse(scoped in ReadOnlySpan keyPrefix, scoped in RedisChannel channel, scoped in RedisValue value, out KeyNotification notification) + { + if (TryParse(in channel, in value, out notification) && notification.KeyStartsWith(keyPrefix)) + { + notification = notification.WithKeySlice(keyPrefix.Length); + return true; + } + + notification = default; + return false; + } + + internal KeyNotification WithKeySlice(int keyPrefixLength) + { + KeyNotification result = this; + Unsafe.AsRef(in result._keyOffset) = keyPrefixLength; + return result; + } + + private const int MinSuffixBytes = 5; // need "0__:x" or similar after prefix + + /// + /// The channel associated with this notification. + /// + public RedisChannel GetChannel() => _channel; + + /// + /// The payload associated with this notification. + /// + public RedisValue GetValue() => _value; + + internal KeyNotification(scoped in RedisChannel channel, scoped in RedisValue value) + { + _channel = channel; + _value = value; + _keyOffset = 0; + } + + internal int KeyOffset => _keyOffset; + + /// + /// The database the key is in. If the database cannot be parsed, -1 is returned. + /// + public int Database + { + get + { + // prevalidated format, so we can just skip past the prefix (except for the default value) + if (_channel.IsNull) return -1; + var span = _channel.Span.Slice(KeySpacePrefix.Length); // also works for KeyEventPrefix + var end = span.IndexOf((byte)'_'); // expecting "__:foo" - we'll just stop at the underscore + if (end <= 0) return -1; + + span = span.Slice(0, end); + return Utf8Parser.TryParse(span, out int database, out var bytes) + && bytes == end ? database : -1; + } + } + + /// + /// The key associated with this event. + /// + /// Note that this will allocate a copy of the key bytes; to avoid allocations, + /// the , , and APIs can be used. + public RedisKey GetKey() + { + if (IsKeySpace) + { + // then the channel contains the key, and the payload contains the event-type + return ChannelSuffix.Slice(_keyOffset).ToArray(); // create an isolated copy + } + + if (IsKeyEvent) + { + // then the channel contains the event-type, and the payload contains the key + byte[]? blob = _value; + if (_keyOffset != 0 & blob is not null) + { + return blob.AsSpan(_keyOffset).ToArray(); + } + return blob; + } + + return RedisKey.Null; + } + + /// + /// Get the number of bytes in the key. + /// + /// If a scratch-buffer is required, it may be preferable to use , which is less expensive. + public int GetKeyByteCount() + { + if (IsKeySpace) + { + return ChannelSuffix.Length - _keyOffset; + } + + if (IsKeyEvent) + { + return _value.GetByteCount() - _keyOffset; + } + + return 0; + } + + /// + /// Get the maximum number of bytes in the key. + /// + public int GetKeyMaxByteCount() + { + if (IsKeySpace) + { + return ChannelSuffix.Length - _keyOffset; + } + + if (IsKeyEvent) + { + return _value.GetMaxByteCount() - _keyOffset; + } + + return 0; + } + + /// + /// Get the maximum number of characters in the key, interpreting as UTF8. + /// + public int GetKeyMaxCharCount() + { + if (IsKeySpace) + { + return Encoding.UTF8.GetMaxCharCount(ChannelSuffix.Length - _keyOffset); + } + + if (IsKeyEvent) + { + return _value.GetMaxCharCount() - _keyOffset; + } + + return 0; + } + + /// + /// Get the number of characters in the key, interpreting as UTF8. + /// + /// If a scratch-buffer is required, it may be preferable to use , which is less expensive. + public int GetKeyCharCount() + { + if (IsKeySpace) + { + return Encoding.UTF8.GetCharCount(ChannelSuffix.Slice(_keyOffset)); + } + + if (IsKeyEvent) + { + return _keyOffset == 0 ? _value.GetCharCount() : SlowMeasure(in this); + } + + return 0; + + static int SlowMeasure(in KeyNotification value) + { + var span = value.GetKeySpan(out var lease, stackalloc byte[128]); + var result = Encoding.UTF8.GetCharCount(span); + Return(lease); + return result; + } + } + + private ReadOnlySpan GetKeySpan(out byte[]? lease, Span buffer) // buffer typically stackalloc + { + lease = null; + if (_value.TryGetSpan(out var direct)) + { + return direct.Slice(_keyOffset); + } + var count = _value.GetMaxByteCount(); + if (count > buffer.Length) + { + buffer = lease = ArrayPool.Shared.Rent(count); + } + count = _value.CopyTo(buffer); + return buffer.Slice(_keyOffset, count - _keyOffset); + } + + private static void Return(byte[]? lease) + { + if (lease is not null) ArrayPool.Shared.Return(lease); + } + + /// + /// Attempt to copy the bytes from the key to a buffer, returning the number of bytes written. + /// + public bool TryCopyKey(Span destination, out int bytesWritten) + { + if (IsKeySpace) + { + var suffix = ChannelSuffix.Slice(_keyOffset); + bytesWritten = suffix.Length; // assume success + if (bytesWritten <= destination.Length) + { + suffix.CopyTo(destination); + return true; + } + } + + if (IsKeyEvent) + { + if (_value.TryGetSpan(out var direct)) + { + bytesWritten = direct.Length - _keyOffset; // assume success + if (bytesWritten <= destination.Length) + { + direct.Slice(_keyOffset).CopyTo(destination); + return true; + } + bytesWritten = 0; + return false; + } + + if (_keyOffset == 0) + { + // get the value to do the hard work + bytesWritten = _value.GetByteCount(); + if (bytesWritten <= destination.Length) + { + _value.CopyTo(destination); + return true; + } + bytesWritten = 0; + return false; + } + + return SlowCopy(in this, destination, out bytesWritten); + + static bool SlowCopy(in KeyNotification value, Span destination, out int bytesWritten) + { + var span = value.GetKeySpan(out var lease, stackalloc byte[128]); + bool result = span.TryCopyTo(destination); + bytesWritten = result ? span.Length : 0; + Return(lease); + return result; + } + } + + bytesWritten = 0; + return false; + } + + /// + /// Attempt to copy the bytes from the key to a buffer, returning the number of bytes written. + /// + public bool TryCopyKey(Span destination, out int charsWritten) + { + if (IsKeySpace) + { + var suffix = ChannelSuffix.Slice(_keyOffset); + if (Encoding.UTF8.GetMaxCharCount(suffix.Length) <= destination.Length || + Encoding.UTF8.GetCharCount(suffix) <= destination.Length) + { + charsWritten = Encoding.UTF8.GetChars(suffix, destination); + return true; + } + } + + if (IsKeyEvent) + { + if (_keyOffset == 0) // can use short-cut + { + if (_value.GetMaxCharCount() <= destination.Length || _value.GetCharCount() <= destination.Length) + { + charsWritten = _value.CopyTo(destination); + return true; + } + } + var span = GetKeySpan(out var lease, stackalloc byte[128]); + charsWritten = 0; + bool result = false; + if (Encoding.UTF8.GetMaxCharCount(span.Length) <= destination.Length || + Encoding.UTF8.GetCharCount(span) <= destination.Length) + { + charsWritten = Encoding.UTF8.GetChars(span, destination); + result = true; + } + Return(lease); + return result; + } + + charsWritten = 0; + return false; + } + + /// + /// Get the portion of the channel after the "__{keyspace|keyevent}@{db}__:". + /// + private ReadOnlySpan ChannelSuffix + { + get + { + var span = _channel.Span; + var index = span.IndexOf("__:"u8); + return index > 0 ? span.Slice(index + 3) : default; + } + } + + /// + /// Indicates whether this notification is of the given type, specified as raw bytes. + /// + /// This is especially useful for working with unknown event types, but repeated calls to this method will be more expensive than + /// a single successful call to . + public bool IsType(ReadOnlySpan type) + { + if (IsKeySpace) + { + if (_value.TryGetSpan(out var direct)) + { + return direct.SequenceEqual(type); + } + + const int MAX_STACK = 64; + byte[]? lease = null; + var maxCount = _value.GetMaxByteCount(); + Span localCopy = maxCount <= MAX_STACK + ? stackalloc byte[MAX_STACK] + : (lease = ArrayPool.Shared.Rent(maxCount)); + var count = _value.CopyTo(localCopy); + bool result = localCopy.Slice(0, count).SequenceEqual(type); + if (lease is not null) ArrayPool.Shared.Return(lease); + return result; + } + + if (IsKeyEvent) + { + return ChannelSuffix.SequenceEqual(type); + } + + return false; + } + + /// + /// The type of notification associated with this event, if it is well-known - otherwise . + /// + /// Unexpected values can be processed manually from the and . + public KeyNotificationType Type + { + get + { + if (IsKeySpace) + { + // then the channel contains the key, and the payload contains the event-type + if (_value.TryGetSpan(out var direct)) + { + return KeyNotificationTypeMetadata.Parse(direct); + } + + if (_value.GetByteCount() <= KeyNotificationTypeMetadata.BufferBytes) + { + Span localCopy = stackalloc byte[KeyNotificationTypeMetadata.BufferBytes]; + var len = _value.CopyTo(localCopy); + return KeyNotificationTypeMetadata.Parse(localCopy.Slice(0, len)); + } + } + else if (IsKeyEvent) + { + // then the channel contains the event-type, and the payload contains the key + return KeyNotificationTypeMetadata.Parse(ChannelSuffix); + } + return KeyNotificationType.Unknown; + } + } + + /// + /// Indicates whether this notification originated from a keyspace notification, for example __keyspace@4__:mykey with payload set. + /// + public bool IsKeySpace + { + get + { + var span = _channel.Span; + return span.Length >= KeySpacePrefix.Length + MinSuffixBytes && KeySpacePrefix.IsCS(span.Slice(0, KeySpacePrefix.Length), AsciiHash.HashCS(span)); + } + } + + /// + /// Indicates whether this notification originated from a keyevent notification, for example __keyevent@4__:set with payload mykey. + /// + public bool IsKeyEvent + { + get + { + var span = _channel.Span; + return span.Length >= KeyEventPrefix.Length + MinSuffixBytes && KeyEventPrefix.IsCS(span.Slice(0, KeyEventPrefix.Length), AsciiHash.HashCS(span)); + } + } + + /// + /// Indicates whether the key associated with this notification starts with the specified prefix. + /// + /// This API is intended as a high-throughput filter API. + public bool KeyStartsWith(ReadOnlySpan prefix) // intentionally leading people to the BLOB API + { + if (IsKeySpace) + { + return ChannelSuffix.Slice(_keyOffset).StartsWith(prefix); + } + + if (IsKeyEvent) + { + if (_keyOffset == 0) return _value.StartsWith(prefix); + + var span = GetKeySpan(out var lease, stackalloc byte[128]); + bool result = span.StartsWith(prefix); + Return(lease); + return result; + } + + return false; + } +} + +internal static partial class KeyNotificationChannels +{ + [AsciiHash("__keyspace@")] + internal static partial class KeySpacePrefix + { + } + + [AsciiHash("__keyevent@")] + internal static partial class KeyEventPrefix + { + } +} diff --git a/src/StackExchange.Redis/KeyNotificationType.cs b/src/StackExchange.Redis/KeyNotificationType.cs new file mode 100644 index 000000000..d45d11e47 --- /dev/null +++ b/src/StackExchange.Redis/KeyNotificationType.cs @@ -0,0 +1,127 @@ +using RESPite; + +namespace StackExchange.Redis; + +/// +/// The type of keyspace or keyevent notification. +/// +[AsciiHash(nameof(KeyNotificationTypeMetadata))] +public enum KeyNotificationType +{ + // note: initially presented alphabetically, but: new values *must* be appended, not inserted + // (to preserve values of existing elements) +#pragma warning disable CS1591 // docs, redundant + [AsciiHash("")] + Unknown = 0, + [AsciiHash("append")] + Append = 1, + [AsciiHash("copy")] + Copy = 2, + [AsciiHash("del")] + Del = 3, + [AsciiHash("expire")] + Expire = 4, + [AsciiHash("hdel")] + HDel = 5, + [AsciiHash("hexpired")] + HExpired = 6, + [AsciiHash("hincrbyfloat")] + HIncrByFloat = 7, + [AsciiHash("hincrby")] + HIncrBy = 8, + [AsciiHash("hpersist")] + HPersist = 9, + [AsciiHash("hset")] + HSet = 10, + [AsciiHash("incrbyfloat")] + IncrByFloat = 11, + [AsciiHash("incrby")] + IncrBy = 12, + [AsciiHash("linsert")] + LInsert = 13, + [AsciiHash("lpop")] + LPop = 14, + [AsciiHash("lpush")] + LPush = 15, + [AsciiHash("lrem")] + LRem = 16, + [AsciiHash("lset")] + LSet = 17, + [AsciiHash("ltrim")] + LTrim = 18, + [AsciiHash("move_from")] + MoveFrom = 19, + [AsciiHash("move_to")] + MoveTo = 20, + [AsciiHash("persist")] + Persist = 21, + [AsciiHash("rename_from")] + RenameFrom = 22, + [AsciiHash("rename_to")] + RenameTo = 23, + [AsciiHash("restore")] + Restore = 24, + [AsciiHash("rpop")] + RPop = 25, + [AsciiHash("rpush")] + RPush = 26, + [AsciiHash("sadd")] + SAdd = 27, + [AsciiHash("set")] + Set = 28, + [AsciiHash("setrange")] + SetRange = 29, + [AsciiHash("sortstore")] + SortStore = 30, + [AsciiHash("srem")] + SRem = 31, + [AsciiHash("spop")] + SPop = 32, + [AsciiHash("xadd")] + XAdd = 33, + [AsciiHash("xdel")] + XDel = 34, + [AsciiHash("xgroup-createconsumer")] + XGroupCreateConsumer = 35, + [AsciiHash("xgroup-create")] + XGroupCreate = 36, + [AsciiHash("xgroup-delconsumer")] + XGroupDelConsumer = 37, + [AsciiHash("xgroup-destroy")] + XGroupDestroy = 38, + [AsciiHash("xgroup-setid")] + XGroupSetId = 39, + [AsciiHash("xsetid")] + XSetId = 40, + [AsciiHash("xtrim")] + XTrim = 41, + [AsciiHash("zadd")] + ZAdd = 42, + [AsciiHash("zdiffstore")] + ZDiffStore = 43, + [AsciiHash("zinterstore")] + ZInterStore = 44, + [AsciiHash("zunionstore")] + ZUnionStore = 45, + [AsciiHash("zincr")] + ZIncr = 46, + [AsciiHash("zrembyrank")] + ZRemByRank = 47, + [AsciiHash("zrembyscore")] + ZRemByScore = 48, + [AsciiHash("zrem")] + ZRem = 49, + + // side-effect notifications + [AsciiHash("expired")] + Expired = 1000, + [AsciiHash("evicted")] + Evicted = 1001, + [AsciiHash("new")] + New = 1002, + [AsciiHash("overwritten")] + Overwritten = 1003, + [AsciiHash("type_changed")] + TypeChanged = 1004, +#pragma warning restore CS1591 // docs, redundant +} diff --git a/src/StackExchange.Redis/KeyNotificationTypeMetadata.cs b/src/StackExchange.Redis/KeyNotificationTypeMetadata.cs new file mode 100644 index 000000000..594fd29c2 --- /dev/null +++ b/src/StackExchange.Redis/KeyNotificationTypeMetadata.cs @@ -0,0 +1,77 @@ +using System; +using RESPite; + +namespace StackExchange.Redis; + +/// +/// Metadata and parsing methods for KeyNotificationType. +/// +internal static partial class KeyNotificationTypeMetadata +{ + [AsciiHash] + internal static partial bool TryParse(ReadOnlySpan value, out KeyNotificationType keyNotificationType); + + public static KeyNotificationType Parse(ReadOnlySpan value) + { + return TryParse(value, out var result) ? result : KeyNotificationType.Unknown; + } + + internal static ReadOnlySpan GetRawBytes(KeyNotificationType type) => type switch + { + KeyNotificationType.Append => "append"u8, + KeyNotificationType.Copy => "copy"u8, + KeyNotificationType.Del => "del"u8, + KeyNotificationType.Expire => "expire"u8, + KeyNotificationType.HDel => "hdel"u8, + KeyNotificationType.HExpired => "hexpired"u8, + KeyNotificationType.HIncrByFloat => "hincrbyfloat"u8, + KeyNotificationType.HIncrBy => "hincrby"u8, + KeyNotificationType.HPersist => "hpersist"u8, + KeyNotificationType.HSet => "hset"u8, + KeyNotificationType.IncrByFloat => "incrbyfloat"u8, + KeyNotificationType.IncrBy => "incrby"u8, + KeyNotificationType.LInsert => "linsert"u8, + KeyNotificationType.LPop => "lpop"u8, + KeyNotificationType.LPush => "lpush"u8, + KeyNotificationType.LRem => "lrem"u8, + KeyNotificationType.LSet => "lset"u8, + KeyNotificationType.LTrim => "ltrim"u8, + KeyNotificationType.MoveFrom => "move_from"u8, + KeyNotificationType.MoveTo => "move_to"u8, + KeyNotificationType.Persist => "persist"u8, + KeyNotificationType.RenameFrom => "rename_from"u8, + KeyNotificationType.RenameTo => "rename_to"u8, + KeyNotificationType.Restore => "restore"u8, + KeyNotificationType.RPop => "rpop"u8, + KeyNotificationType.RPush => "rpush"u8, + KeyNotificationType.SAdd => "sadd"u8, + KeyNotificationType.Set => "set"u8, + KeyNotificationType.SetRange => "setrange"u8, + KeyNotificationType.SortStore => "sortstore"u8, + KeyNotificationType.SRem => "srem"u8, + KeyNotificationType.SPop => "spop"u8, + KeyNotificationType.XAdd => "xadd"u8, + KeyNotificationType.XDel => "xdel"u8, + KeyNotificationType.XGroupCreateConsumer => "xgroup-createconsumer"u8, + KeyNotificationType.XGroupCreate => "xgroup-create"u8, + KeyNotificationType.XGroupDelConsumer => "xgroup-delconsumer"u8, + KeyNotificationType.XGroupDestroy => "xgroup-destroy"u8, + KeyNotificationType.XGroupSetId => "xgroup-setid"u8, + KeyNotificationType.XSetId => "xsetid"u8, + KeyNotificationType.XTrim => "xtrim"u8, + KeyNotificationType.ZAdd => "zadd"u8, + KeyNotificationType.ZDiffStore => "zdiffstore"u8, + KeyNotificationType.ZInterStore => "zinterstore"u8, + KeyNotificationType.ZUnionStore => "zunionstore"u8, + KeyNotificationType.ZIncr => "zincr"u8, + KeyNotificationType.ZRemByRank => "zrembyrank"u8, + KeyNotificationType.ZRemByScore => "zrembyscore"u8, + KeyNotificationType.ZRem => "zrem"u8, + KeyNotificationType.Expired => "expired"u8, + KeyNotificationType.Evicted => "evicted"u8, + KeyNotificationType.New => "new"u8, + KeyNotificationType.Overwritten => "overwritten"u8, + KeyNotificationType.TypeChanged => "type_changed"u8, + _ => throw new ArgumentOutOfRangeException(nameof(type)), + }; +} diff --git a/src/StackExchange.Redis/KeyspaceIsolation/BatchWrapper.cs b/src/StackExchange.Redis/KeyspaceIsolation/BatchWrapper.cs deleted file mode 100644 index b551f1807..000000000 --- a/src/StackExchange.Redis/KeyspaceIsolation/BatchWrapper.cs +++ /dev/null @@ -1,14 +0,0 @@ -namespace StackExchange.Redis.KeyspaceIsolation -{ - internal sealed class BatchWrapper : WrapperBase, IBatch - { - public BatchWrapper(IBatch inner, byte[] prefix) : base(inner, prefix) - { - } - - public void Execute() - { - Inner.Execute(); - } - } -} diff --git a/src/StackExchange.Redis/KeyspaceIsolation/DatabaseExtension.cs b/src/StackExchange.Redis/KeyspaceIsolation/DatabaseExtension.cs index 3eb4f0678..742bc06eb 100644 --- a/src/StackExchange.Redis/KeyspaceIsolation/DatabaseExtension.cs +++ b/src/StackExchange.Redis/KeyspaceIsolation/DatabaseExtension.cs @@ -9,7 +9,7 @@ public static class DatabaseExtensions { /// /// Creates a new instance that provides an isolated key space - /// of the specified underyling database instance. + /// of the specified underlying database instance. /// /// /// The underlying database instance that the returned instance shall use. @@ -20,13 +20,13 @@ public static class DatabaseExtensions /// /// A new instance that invokes the specified underlying /// but prepends the specified - /// to all key paramters and thus forms a logical key space isolation. + /// to all key parameters and thus forms a logical key space isolation. /// /// /// /// The following methods are not supported in a key space isolated database and /// will throw an when invoked: - /// + /// /// /// /// @@ -54,14 +54,14 @@ public static IDatabase WithKeyPrefix(this IDatabase database, RedisKey keyPrefi return database; // fine - you can keep using the original, then } - if (database is DatabaseWrapper wrapper) + if (database is KeyPrefixedDatabase prefixed) { // combine the key in advance to minimize indirection - keyPrefix = wrapper.ToInner(keyPrefix); - database = wrapper.Inner; + keyPrefix = prefixed.ToInner(keyPrefix); + database = prefixed.Inner; } - return new DatabaseWrapper(database, keyPrefix.AsPrefix()); + return new KeyPrefixedDatabase(database, keyPrefix.AsPrefix()!); } } } diff --git a/src/StackExchange.Redis/KeyspaceIsolation/DatabaseWrapper.cs b/src/StackExchange.Redis/KeyspaceIsolation/DatabaseWrapper.cs deleted file mode 100644 index e6f6f506a..000000000 --- a/src/StackExchange.Redis/KeyspaceIsolation/DatabaseWrapper.cs +++ /dev/null @@ -1,907 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Net; - -namespace StackExchange.Redis.KeyspaceIsolation -{ - internal sealed class DatabaseWrapper : WrapperBase, IDatabase - { - public DatabaseWrapper(IDatabase inner, byte[] prefix) : base(inner, prefix) - { - } - - public IBatch CreateBatch(object asyncState = null) - { - return new BatchWrapper(Inner.CreateBatch(asyncState), Prefix); - } - - public ITransaction CreateTransaction(object asyncState = null) - { - return new TransactionWrapper(Inner.CreateTransaction(asyncState), Prefix); - } - - public int Database => Inner.Database; - - public RedisValue DebugObject(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.DebugObject(ToInner(key), flags); - } - - public bool GeoAdd(RedisKey key, double longitude, double latitude, RedisValue member, CommandFlags flags = CommandFlags.None) - { - return Inner.GeoAdd(ToInner(key), longitude, latitude, member, flags); - } - - public long GeoAdd(RedisKey key, GeoEntry[] values, CommandFlags flags = CommandFlags.None) - { - return Inner.GeoAdd(ToInner(key), values, flags); - } - - public bool GeoAdd(RedisKey key, GeoEntry value, CommandFlags flags = CommandFlags.None) - { - return Inner.GeoAdd(ToInner(key), value, flags); - } - - public bool GeoRemove(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) - { - return Inner.GeoRemove(ToInner(key), member, flags); - } - - public double? GeoDistance(RedisKey key, RedisValue member1, RedisValue member2, GeoUnit unit = GeoUnit.Meters,CommandFlags flags = CommandFlags.None) - { - return Inner.GeoDistance(ToInner(key), member1, member2, unit, flags); - } - - public string[] GeoHash(RedisKey key, RedisValue[] members, CommandFlags flags = CommandFlags.None) - { - return Inner.GeoHash(ToInner(key), members, flags); - } - - public string GeoHash(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) - { - return Inner.GeoHash(ToInner(key), member, flags); - } - - public GeoPosition?[] GeoPosition(RedisKey key, RedisValue[] members, CommandFlags flags = CommandFlags.None) - { - return Inner.GeoPosition(ToInner(key), members, flags); - } - - public GeoPosition? GeoPosition(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) - { - return Inner.GeoPosition(ToInner(key), member, flags); - } - - public GeoRadiusResult[] GeoRadius(RedisKey key, RedisValue member, double radius, GeoUnit unit = GeoUnit.Meters, int count = -1, Order? order = null,GeoRadiusOptions options = GeoRadiusOptions.Default, CommandFlags flags = CommandFlags.None) - { - return Inner.GeoRadius(ToInner(key), member, radius, unit, count, order, options, flags); - } - - public GeoRadiusResult[] GeoRadius(RedisKey key, double longitude, double latitude, double radius, GeoUnit unit = GeoUnit.Meters, int count = -1, Order? order = null, GeoRadiusOptions options = GeoRadiusOptions.Default, CommandFlags flags = CommandFlags.None) - { - return Inner.GeoRadius(ToInner(key), longitude, latitude, radius, unit, count, order, options, flags); - } - - public double HashDecrement(RedisKey key, RedisValue hashField, double value, CommandFlags flags = CommandFlags.None) - { - return Inner.HashDecrement(ToInner(key), hashField, value, flags); - } - - public long HashDecrement(RedisKey key, RedisValue hashField, long value = 1, CommandFlags flags = CommandFlags.None) - { - return Inner.HashDecrement(ToInner(key), hashField, value, flags); - } - - public long HashDelete(RedisKey key, RedisValue[] hashFields, CommandFlags flags = CommandFlags.None) - { - return Inner.HashDelete(ToInner(key), hashFields, flags); - } - - public bool HashDelete(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None) - { - return Inner.HashDelete(ToInner(key), hashField, flags); - } - - public bool HashExists(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None) - { - return Inner.HashExists(ToInner(key), hashField, flags); - } - - public HashEntry[] HashGetAll(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.HashGetAll(ToInner(key), flags); - } - - public RedisValue[] HashGet(RedisKey key, RedisValue[] hashFields, CommandFlags flags = CommandFlags.None) - { - return Inner.HashGet(ToInner(key), hashFields, flags); - } - - public RedisValue HashGet(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None) - { - return Inner.HashGet(ToInner(key), hashField, flags); - } - - public Lease HashGetLease(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None) - { - return Inner.HashGetLease(ToInner(key), hashField, flags); - } - - public double HashIncrement(RedisKey key, RedisValue hashField, double value, CommandFlags flags = CommandFlags.None) - { - return Inner.HashIncrement(ToInner(key), hashField, value, flags); - } - - public long HashIncrement(RedisKey key, RedisValue hashField, long value = 1, CommandFlags flags = CommandFlags.None) - { - return Inner.HashIncrement(ToInner(key), hashField, value, flags); - } - - public RedisValue[] HashKeys(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.HashKeys(ToInner(key), flags); - } - - public long HashLength(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.HashLength(ToInner(key), flags); - } - - public bool HashSet(RedisKey key, RedisValue hashField, RedisValue value, When when = When.Always, CommandFlags flags = CommandFlags.None) - { - return Inner.HashSet(ToInner(key), hashField, value, when, flags); - } - - public long HashStringLength(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None) - { - return Inner.HashStringLength(ToInner(key), hashField, flags); - } - - public void HashSet(RedisKey key, HashEntry[] hashFields, CommandFlags flags = CommandFlags.None) - { - Inner.HashSet(ToInner(key), hashFields, flags); - } - - public RedisValue[] HashValues(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.HashValues(ToInner(key), flags); - } - - public bool HyperLogLogAdd(RedisKey key, RedisValue[] values, CommandFlags flags = CommandFlags.None) - { - return Inner.HyperLogLogAdd(ToInner(key), values, flags); - } - - public bool HyperLogLogAdd(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None) - { - return Inner.HyperLogLogAdd(ToInner(key), value, flags); - } - - public long HyperLogLogLength(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.HyperLogLogLength(ToInner(key), flags); - } - - public long HyperLogLogLength(RedisKey[] keys, CommandFlags flags = CommandFlags.None) - { - return Inner.HyperLogLogLength(ToInner(keys), flags); - } - - public void HyperLogLogMerge(RedisKey destination, RedisKey[] sourceKeys, CommandFlags flags = CommandFlags.None) - { - Inner.HyperLogLogMerge(ToInner(destination), ToInner(sourceKeys), flags); - } - - public void HyperLogLogMerge(RedisKey destination, RedisKey first, RedisKey second, CommandFlags flags = CommandFlags.None) - { - Inner.HyperLogLogMerge(ToInner(destination), ToInner(first), ToInner(second), flags); - } - - public EndPoint IdentifyEndpoint(RedisKey key = default(RedisKey), CommandFlags flags = CommandFlags.None) - { - return Inner.IdentifyEndpoint(ToInner(key), flags); - } - - public long KeyDelete(RedisKey[] keys, CommandFlags flags = CommandFlags.None) - { - return Inner.KeyDelete(ToInner(keys), flags); - } - - public bool KeyDelete(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.KeyDelete(ToInner(key), flags); - } - - public byte[] KeyDump(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.KeyDump(ToInner(key), flags); - } - - public bool KeyExists(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.KeyExists(ToInner(key), flags); - } - public long KeyExists(RedisKey[] keys, CommandFlags flags = CommandFlags.None) - { - return Inner.KeyExists(ToInner(keys), flags); - } - - public bool KeyExpire(RedisKey key, DateTime? expiry, CommandFlags flags = CommandFlags.None) - { - return Inner.KeyExpire(ToInner(key), expiry, flags); - } - - public bool KeyExpire(RedisKey key, TimeSpan? expiry, CommandFlags flags = CommandFlags.None) - { - return Inner.KeyExpire(ToInner(key), expiry, flags); - } - - public TimeSpan? KeyIdleTime(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.KeyIdleTime(ToInner(key), flags); - } - - public void KeyMigrate(RedisKey key, EndPoint toServer, int toDatabase = 0, int timeoutMilliseconds = 0, MigrateOptions migrateOptions = MigrateOptions.None, CommandFlags flags = CommandFlags.None) - { - Inner.KeyMigrate(ToInner(key), toServer, toDatabase, timeoutMilliseconds, migrateOptions, flags); - } - - public bool KeyMove(RedisKey key, int database, CommandFlags flags = CommandFlags.None) - { - return Inner.KeyMove(ToInner(key), database, flags); - } - - public bool KeyPersist(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.KeyPersist(ToInner(key), flags); - } - - public RedisKey KeyRandom(CommandFlags flags = CommandFlags.None) - { - throw new NotSupportedException("RANDOMKEY is not supported when a key-prefix is specified"); - } - - public bool KeyRename(RedisKey key, RedisKey newKey, When when = When.Always, CommandFlags flags = CommandFlags.None) - { - return Inner.KeyRename(ToInner(key), ToInner(newKey), when, flags); - } - - public void KeyRestore(RedisKey key, byte[] value, TimeSpan? expiry = null, CommandFlags flags = CommandFlags.None) - { - Inner.KeyRestore(ToInner(key), value, expiry, flags); - } - - public TimeSpan? KeyTimeToLive(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.KeyTimeToLive(ToInner(key), flags); - } - - public RedisType KeyType(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.KeyType(ToInner(key), flags); - } - - public RedisValue ListGetByIndex(RedisKey key, long index, CommandFlags flags = CommandFlags.None) - { - return Inner.ListGetByIndex(ToInner(key), index, flags); - } - - public long ListInsertAfter(RedisKey key, RedisValue pivot, RedisValue value, CommandFlags flags = CommandFlags.None) - { - return Inner.ListInsertAfter(ToInner(key), pivot, value, flags); - } - - public long ListInsertBefore(RedisKey key, RedisValue pivot, RedisValue value, CommandFlags flags = CommandFlags.None) - { - return Inner.ListInsertBefore(ToInner(key), pivot, value, flags); - } - - public RedisValue ListLeftPop(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.ListLeftPop(ToInner(key), flags); - } - - public long ListLeftPush(RedisKey key, RedisValue[] values, CommandFlags flags = CommandFlags.None) - { - return Inner.ListLeftPush(ToInner(key), values, flags); - } - - public long ListLeftPush(RedisKey key, RedisValue[] values, When when = When.Always, CommandFlags flags = CommandFlags.None) - { - return Inner.ListLeftPush(ToInner(key), values, when, flags); - } - - public long ListLeftPush(RedisKey key, RedisValue value, When when = When.Always, CommandFlags flags = CommandFlags.None) - { - return Inner.ListLeftPush(ToInner(key), value, when, flags); - } - - public long ListLength(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.ListLength(ToInner(key), flags); - } - - public RedisValue[] ListRange(RedisKey key, long start = 0, long stop = -1, CommandFlags flags = CommandFlags.None) - { - return Inner.ListRange(ToInner(key), start, stop, flags); - } - - public long ListRemove(RedisKey key, RedisValue value, long count = 0, CommandFlags flags = CommandFlags.None) - { - return Inner.ListRemove(ToInner(key), value, count, flags); - } - - public RedisValue ListRightPop(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.ListRightPop(ToInner(key), flags); - } - - public RedisValue ListRightPopLeftPush(RedisKey source, RedisKey destination, CommandFlags flags = CommandFlags.None) - { - return Inner.ListRightPopLeftPush(ToInner(source), ToInner(destination), flags); - } - - public long ListRightPush(RedisKey key, RedisValue[] values, CommandFlags flags = CommandFlags.None) - { - return Inner.ListRightPush(ToInner(key), values, flags); - } - - public long ListRightPush(RedisKey key, RedisValue[] values, When when = When.Always, CommandFlags flags = CommandFlags.None) - { - return Inner.ListRightPush(ToInner(key), values, when, flags); - } - - public long ListRightPush(RedisKey key, RedisValue value, When when = When.Always, CommandFlags flags = CommandFlags.None) - { - return Inner.ListRightPush(ToInner(key), value, when, flags); - } - - public void ListSetByIndex(RedisKey key, long index, RedisValue value, CommandFlags flags = CommandFlags.None) - { - Inner.ListSetByIndex(ToInner(key), index, value, flags); - } - - public void ListTrim(RedisKey key, long start, long stop, CommandFlags flags = CommandFlags.None) - { - Inner.ListTrim(ToInner(key), start, stop, flags); - } - - public bool LockExtend(RedisKey key, RedisValue value, TimeSpan expiry, CommandFlags flags = CommandFlags.None) - { - return Inner.LockExtend(ToInner(key), value, expiry, flags); - } - - public RedisValue LockQuery(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.LockQuery(ToInner(key), flags); - } - - public bool LockRelease(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None) - { - return Inner.LockRelease(ToInner(key), value, flags); - } - - public bool LockTake(RedisKey key, RedisValue value, TimeSpan expiry, CommandFlags flags = CommandFlags.None) - { - return Inner.LockTake(ToInner(key), value, expiry, flags); - } - - public long Publish(RedisChannel channel, RedisValue message, CommandFlags flags = CommandFlags.None) - { - return Inner.Publish(ToInner(channel), message, flags); - } - - public RedisResult Execute(string command, params object[] args) - => Inner.Execute(command, ToInner(args), CommandFlags.None); - - public RedisResult Execute(string command, ICollection args, CommandFlags flags = CommandFlags.None) - => Inner.Execute(command, ToInner(args), flags); - - public RedisResult ScriptEvaluate(byte[] hash, RedisKey[] keys = null, RedisValue[] values = null, CommandFlags flags = CommandFlags.None) - { - // TODO: The return value could contain prefixed keys. It might make sense to 'unprefix' those? - return Inner.ScriptEvaluate(hash, ToInner(keys), values, flags); - } - - public RedisResult ScriptEvaluate(string script, RedisKey[] keys = null, RedisValue[] values = null, CommandFlags flags = CommandFlags.None) - { - // TODO: The return value could contain prefixed keys. It might make sense to 'unprefix' those? - return Inner.ScriptEvaluate(script, ToInner(keys), values, flags); - } - - public RedisResult ScriptEvaluate(LuaScript script, object parameters = null, CommandFlags flags = CommandFlags.None) - { - // TODO: The return value could contain prefixed keys. It might make sense to 'unprefix' those? - return script.Evaluate(Inner, parameters, Prefix, flags); - } - - public RedisResult ScriptEvaluate(LoadedLuaScript script, object parameters = null, CommandFlags flags = CommandFlags.None) - { - // TODO: The return value could contain prefixed keys. It might make sense to 'unprefix' those? - return script.Evaluate(Inner, parameters, Prefix, flags); - } - - public long SetAdd(RedisKey key, RedisValue[] values, CommandFlags flags = CommandFlags.None) - { - return Inner.SetAdd(ToInner(key), values, flags); - } - - public bool SetAdd(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None) - { - return Inner.SetAdd(ToInner(key), value, flags); - } - - public long SetCombineAndStore(SetOperation operation, RedisKey destination, RedisKey[] keys, CommandFlags flags = CommandFlags.None) - { - return Inner.SetCombineAndStore(operation, ToInner(destination), ToInner(keys), flags); - } - - public long SetCombineAndStore(SetOperation operation, RedisKey destination, RedisKey first, RedisKey second, CommandFlags flags = CommandFlags.None) - { - return Inner.SetCombineAndStore(operation, ToInner(destination), ToInner(first), ToInner(second), flags); - } - - public RedisValue[] SetCombine(SetOperation operation, RedisKey[] keys, CommandFlags flags = CommandFlags.None) - { - return Inner.SetCombine(operation, ToInner(keys), flags); - } - - public RedisValue[] SetCombine(SetOperation operation, RedisKey first, RedisKey second, CommandFlags flags = CommandFlags.None) - { - return Inner.SetCombine(operation, ToInner(first), ToInner(second), flags); - } - - public bool SetContains(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None) - { - return Inner.SetContains(ToInner(key), value, flags); - } - - public long SetLength(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.SetLength(ToInner(key), flags); - } - - public RedisValue[] SetMembers(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.SetMembers(ToInner(key), flags); - } - - public bool SetMove(RedisKey source, RedisKey destination, RedisValue value, CommandFlags flags = CommandFlags.None) - { - return Inner.SetMove(ToInner(source), ToInner(destination), value, flags); - } - - public RedisValue SetPop(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.SetPop(ToInner(key), flags); - } - - public RedisValue[] SetPop(RedisKey key, long count, CommandFlags flags = CommandFlags.None) - { - return Inner.SetPop(ToInner(key), count, flags); - } - - public RedisValue SetRandomMember(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.SetRandomMember(ToInner(key), flags); - } - - public RedisValue[] SetRandomMembers(RedisKey key, long count, CommandFlags flags = CommandFlags.None) - { - return Inner.SetRandomMembers(ToInner(key), count, flags); - } - - public long SetRemove(RedisKey key, RedisValue[] values, CommandFlags flags = CommandFlags.None) - { - return Inner.SetRemove(ToInner(key), values, flags); - } - - public bool SetRemove(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None) - { - return Inner.SetRemove(ToInner(key), value, flags); - } - - public long SortAndStore(RedisKey destination, RedisKey key, long skip = 0, long take = -1, Order order = Order.Ascending, SortType sortType = SortType.Numeric, RedisValue by = default(RedisValue), RedisValue[] get = null, CommandFlags flags = CommandFlags.None) - { - return Inner.SortAndStore(ToInner(destination), ToInner(key), skip, take, order, sortType, SortByToInner(by), SortGetToInner(get), flags); - } - - public RedisValue[] Sort(RedisKey key, long skip = 0, long take = -1, Order order = Order.Ascending, SortType sortType = SortType.Numeric, RedisValue by = default(RedisValue), RedisValue[] get = null, CommandFlags flags = CommandFlags.None) - { - return Inner.Sort(ToInner(key), skip, take, order, sortType, SortByToInner(by), SortGetToInner(get), flags); - } - - public long SortedSetAdd(RedisKey key, SortedSetEntry[] values, CommandFlags flags) - { - return Inner.SortedSetAdd(ToInner(key), values, flags); - } - - public long SortedSetAdd(RedisKey key, SortedSetEntry[] values, When when = When.Always, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetAdd(ToInner(key), values, when, flags); - } - - public bool SortedSetAdd(RedisKey key, RedisValue member, double score, CommandFlags flags) - { - return Inner.SortedSetAdd(ToInner(key), member, score, flags); - } - - public bool SortedSetAdd(RedisKey key, RedisValue member, double score, When when = When.Always, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetAdd(ToInner(key), member, score, when, flags); - } - - public long SortedSetCombineAndStore(SetOperation operation, RedisKey destination, RedisKey[] keys, double[] weights = null, Aggregate aggregate = Aggregate.Sum, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetCombineAndStore(operation, ToInner(destination), ToInner(keys), weights, aggregate, flags); - } - - public long SortedSetCombineAndStore(SetOperation operation, RedisKey destination, RedisKey first, RedisKey second, Aggregate aggregate = Aggregate.Sum, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetCombineAndStore(operation, ToInner(destination), ToInner(first), ToInner(second), aggregate, flags); - } - - public double SortedSetDecrement(RedisKey key, RedisValue member, double value, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetDecrement(ToInner(key), member, value, flags); - } - - public double SortedSetIncrement(RedisKey key, RedisValue member, double value, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetIncrement(ToInner(key), member, value, flags); - } - - public long SortedSetLength(RedisKey key, double min = -1.0 / 0.0, double max = 1.0 / 0.0, Exclude exclude = Exclude.None, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetLength(ToInner(key), min, max, exclude, flags); - } - - public long SortedSetLengthByValue(RedisKey key, RedisValue min, RedisValue max, Exclude exclude = Exclude.None, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetLengthByValue(ToInner(key), min, max, exclude, flags); - } - - public RedisValue[] SortedSetRangeByRank(RedisKey key, long start = 0, long stop = -1, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetRangeByRank(ToInner(key), start, stop, order, flags); - } - - public SortedSetEntry[] SortedSetRangeByRankWithScores(RedisKey key, long start = 0, long stop = -1, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetRangeByRankWithScores(ToInner(key), start, stop, order, flags); - } - - public RedisValue[] SortedSetRangeByScore(RedisKey key, double start = -1.0 / 0.0, double stop = 1.0 / 0.0, Exclude exclude = Exclude.None, Order order = Order.Ascending, long skip = 0, long take = -1, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetRangeByScore(ToInner(key), start, stop, exclude, order, skip, take, flags); - } - - public SortedSetEntry[] SortedSetRangeByScoreWithScores(RedisKey key, double start = -1.0 / 0.0, double stop = 1.0 / 0.0, Exclude exclude = Exclude.None, Order order = Order.Ascending, long skip = 0, long take = -1, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetRangeByScoreWithScores(ToInner(key), start, stop, exclude, order, skip, take, flags); - } - - public RedisValue[] SortedSetRangeByValue(RedisKey key, RedisValue min, RedisValue max, Exclude exclude, long skip, long take, CommandFlags flags) - { - return Inner.SortedSetRangeByValue(ToInner(key), min, max, exclude, Order.Ascending, skip, take, flags); - } - - public RedisValue[] SortedSetRangeByValue(RedisKey key, RedisValue min = default(RedisValue), RedisValue max = default(RedisValue), Exclude exclude = Exclude.None, Order order = Order.Ascending, long skip = 0, long take = -1, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetRangeByValue(ToInner(key), min, max, exclude, order, skip, take, flags); - } - - public long? SortedSetRank(RedisKey key, RedisValue member, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetRank(ToInner(key), member, order, flags); - } - - public long SortedSetRemove(RedisKey key, RedisValue[] members, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetRemove(ToInner(key), members, flags); - } - - public bool SortedSetRemove(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetRemove(ToInner(key), member, flags); - } - - public long SortedSetRemoveRangeByRank(RedisKey key, long start, long stop, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetRemoveRangeByRank(ToInner(key), start, stop, flags); - } - - public long SortedSetRemoveRangeByScore(RedisKey key, double start, double stop, Exclude exclude = Exclude.None, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetRemoveRangeByScore(ToInner(key), start, stop, exclude, flags); - } - - public long SortedSetRemoveRangeByValue(RedisKey key, RedisValue min, RedisValue max, Exclude exclude = Exclude.None, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetRemoveRangeByValue(ToInner(key), min, max, exclude, flags); - } - - public double? SortedSetScore(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetScore(ToInner(key), member, flags); - } - - public SortedSetEntry? SortedSetPop(RedisKey key, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetPop(ToInner(key), order, flags); - } - - public SortedSetEntry[] SortedSetPop(RedisKey key, long count, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetPop(ToInner(key), count, order, flags); - } - - public long StreamAcknowledge(RedisKey key, RedisValue groupName, RedisValue messageId, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamAcknowledge(ToInner(key), groupName, messageId, flags); - } - - public long StreamAcknowledge(RedisKey key, RedisValue groupName, RedisValue[] messageIds, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamAcknowledge(ToInner(key), groupName, messageIds, flags); - } - - public RedisValue StreamAdd(RedisKey key, RedisValue streamField, RedisValue streamValue, RedisValue? messageId = null, int? maxLength = null, bool useApproximateMaxLength = false, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamAdd(ToInner(key), streamField, streamValue, messageId, maxLength, useApproximateMaxLength, flags); - } - - public RedisValue StreamAdd(RedisKey key, NameValueEntry[] streamPairs, RedisValue? messageId = null, int? maxLength = null, bool useApproximateMaxLength = false, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamAdd(ToInner(key), streamPairs, messageId, maxLength, useApproximateMaxLength, flags); - } - - public StreamEntry[] StreamClaim(RedisKey key, RedisValue consumerGroup, RedisValue claimingConsumer, long minIdleTimeInMs, RedisValue[] messageIds, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamClaim(ToInner(key), consumerGroup, claimingConsumer, minIdleTimeInMs, messageIds, flags); - } - - public RedisValue[] StreamClaimIdsOnly(RedisKey key, RedisValue consumerGroup, RedisValue claimingConsumer, long minIdleTimeInMs, RedisValue[] messageIds, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamClaimIdsOnly(ToInner(key), consumerGroup, claimingConsumer, minIdleTimeInMs, messageIds, flags); - } - - public bool StreamConsumerGroupSetPosition(RedisKey key, RedisValue groupName, RedisValue position, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamConsumerGroupSetPosition(ToInner(key), groupName, position, flags); - } - - public bool StreamCreateConsumerGroup(RedisKey key, RedisValue groupName, RedisValue? position, CommandFlags flags) - { - return Inner.StreamCreateConsumerGroup(ToInner(key), groupName, position, flags); - } - - public bool StreamCreateConsumerGroup(RedisKey key, RedisValue groupName, RedisValue? position = null, bool createStream = true, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamCreateConsumerGroup(ToInner(key), groupName, position, createStream, flags); - } - - public StreamInfo StreamInfo(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamInfo(ToInner(key), flags); - } - - public StreamGroupInfo[] StreamGroupInfo(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamGroupInfo(ToInner(key), flags); - } - - public StreamConsumerInfo[] StreamConsumerInfo(RedisKey key, RedisValue groupName, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamConsumerInfo(ToInner(key), groupName, flags); - } - - public long StreamLength(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamLength(ToInner(key), flags); - } - - public long StreamDelete(RedisKey key, RedisValue[] messageIds, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamDelete(ToInner(key), messageIds, flags); - } - - public long StreamDeleteConsumer(RedisKey key, RedisValue groupName, RedisValue consumerName, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamDeleteConsumer(ToInner(key), groupName, consumerName, flags); - } - - public bool StreamDeleteConsumerGroup(RedisKey key, RedisValue groupName, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamDeleteConsumerGroup(ToInner(key), groupName, flags); - } - - public StreamPendingInfo StreamPending(RedisKey key, RedisValue groupName, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamPending(ToInner(key), groupName, flags); - } - - public StreamPendingMessageInfo[] StreamPendingMessages(RedisKey key, RedisValue groupName, int count, RedisValue consumerName, RedisValue? minId = null, RedisValue? maxId = null, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamPendingMessages(ToInner(key), groupName, count, consumerName, minId, maxId, flags); - } - - public StreamEntry[] StreamRange(RedisKey key, RedisValue? minId = null, RedisValue? maxId = null, int? count = null, Order messageOrder = Order.Ascending, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamRange(ToInner(key), minId, maxId, count, messageOrder, flags); - } - - public StreamEntry[] StreamRead(RedisKey key, RedisValue position, int? count = null, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamRead(ToInner(key), position, count, flags); - } - - public RedisStream[] StreamRead(StreamPosition[] streamPositions, int? countPerStream = null, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamRead(streamPositions, countPerStream, flags); - } - - public StreamEntry[] StreamReadGroup(RedisKey key, RedisValue groupName, RedisValue consumerName, RedisValue? position, int? count, CommandFlags flags) - { - return Inner.StreamReadGroup(ToInner(key), groupName, consumerName, position, count, flags); - } - - public StreamEntry[] StreamReadGroup(RedisKey key, RedisValue groupName, RedisValue consumerName, RedisValue? position = null, int? count = null, bool noAck = false, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamReadGroup(ToInner(key), groupName, consumerName, position, count, noAck, flags); - } - - public RedisStream[] StreamReadGroup(StreamPosition[] streamPositions, RedisValue groupName, RedisValue consumerName, int? countPerStream, CommandFlags flags) - { - return Inner.StreamReadGroup(streamPositions, groupName, consumerName, countPerStream, flags); - } - - public RedisStream[] StreamReadGroup(StreamPosition[] streamPositions, RedisValue groupName, RedisValue consumerName, int? countPerStream = null, bool noAck = false, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamReadGroup(streamPositions, groupName, consumerName, countPerStream, noAck, flags); - } - - public long StreamTrim(RedisKey key, int maxLength, bool useApproximateMaxLength = false, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamTrim(ToInner(key), maxLength, useApproximateMaxLength, flags); - } - - public long StringAppend(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None) - { - return Inner.StringAppend(ToInner(key), value, flags); - } - - public long StringBitCount(RedisKey key, long start = 0, long end = -1, CommandFlags flags = CommandFlags.None) - { - return Inner.StringBitCount(ToInner(key), start, end, flags); - } - - public long StringBitOperation(Bitwise operation, RedisKey destination, RedisKey[] keys, CommandFlags flags = CommandFlags.None) - { - return Inner.StringBitOperation(operation, ToInner(destination), ToInner(keys), flags); - } - - public long StringBitOperation(Bitwise operation, RedisKey destination, RedisKey first, RedisKey second = default(RedisKey), CommandFlags flags = CommandFlags.None) - { - return Inner.StringBitOperation(operation, ToInner(destination), ToInner(first), ToInnerOrDefault(second), flags); - } - - public long StringBitPosition(RedisKey key, bool bit, long start = 0, long end = -1, CommandFlags flags = CommandFlags.None) - { - return Inner.StringBitPosition(ToInner(key), bit, start, end, flags); - } - - public double StringDecrement(RedisKey key, double value, CommandFlags flags = CommandFlags.None) - { - return Inner.StringDecrement(ToInner(key), value, flags); - } - - public long StringDecrement(RedisKey key, long value = 1, CommandFlags flags = CommandFlags.None) - { - return Inner.StringDecrement(ToInner(key), value, flags); - } - - public RedisValue[] StringGet(RedisKey[] keys, CommandFlags flags = CommandFlags.None) - { - return Inner.StringGet(ToInner(keys), flags); - } - - public RedisValue StringGet(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.StringGet(ToInner(key), flags); - } - - public LeaseStringGetLease(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.StringGetLease(ToInner(key), flags); - } - - public bool StringGetBit(RedisKey key, long offset, CommandFlags flags = CommandFlags.None) - { - return Inner.StringGetBit(ToInner(key), offset, flags); - } - - public RedisValue StringGetRange(RedisKey key, long start, long end, CommandFlags flags = CommandFlags.None) - { - return Inner.StringGetRange(ToInner(key), start, end, flags); - } - - public RedisValue StringGetSet(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None) - { - return Inner.StringGetSet(ToInner(key), value, flags); - } - - public RedisValueWithExpiry StringGetWithExpiry(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.StringGetWithExpiry(ToInner(key), flags); - } - - public double StringIncrement(RedisKey key, double value, CommandFlags flags = CommandFlags.None) - { - return Inner.StringIncrement(ToInner(key), value, flags); - } - - public long StringIncrement(RedisKey key, long value = 1, CommandFlags flags = CommandFlags.None) - { - return Inner.StringIncrement(ToInner(key), value, flags); - } - - public long StringLength(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.StringLength(ToInner(key), flags); - } - - public bool StringSet(KeyValuePair[] values, When when = When.Always, CommandFlags flags = CommandFlags.None) - { - return Inner.StringSet(ToInner(values), when, flags); - } - - public bool StringSet(RedisKey key, RedisValue value, TimeSpan? expiry = null, When when = When.Always, CommandFlags flags = CommandFlags.None) - { - return Inner.StringSet(ToInner(key), value, expiry, when, flags); - } - - public bool StringSetBit(RedisKey key, long offset, bool bit, CommandFlags flags = CommandFlags.None) - { - return Inner.StringSetBit(ToInner(key), offset, bit, flags); - } - - public RedisValue StringSetRange(RedisKey key, long offset, RedisValue value, CommandFlags flags = CommandFlags.None) - { - return Inner.StringSetRange(ToInner(key), offset, value, flags); - } - - public TimeSpan Ping(CommandFlags flags = CommandFlags.None) - { - return Inner.Ping(flags); - } - - IEnumerable IDatabase.HashScan(RedisKey key, RedisValue pattern, int pageSize, CommandFlags flags) - => Inner.HashScan(ToInner(key), pattern, pageSize, flags); - - IEnumerable IDatabase.HashScan(RedisKey key, RedisValue pattern, int pageSize, long cursor, int pageOffset, CommandFlags flags) - => Inner.HashScan(ToInner(key), pattern, pageSize, cursor, pageOffset, flags); - - IEnumerable IDatabase.SetScan(RedisKey key, RedisValue pattern, int pageSize, CommandFlags flags) - => Inner.SetScan(ToInner(key), pattern, pageSize, flags); - - IEnumerable IDatabase.SetScan(RedisKey key, RedisValue pattern, int pageSize, long cursor, int pageOffset, CommandFlags flags) - => Inner.SetScan(ToInner(key), pattern, pageSize, cursor, pageOffset, flags); - - IEnumerable IDatabase.SortedSetScan(RedisKey key, RedisValue pattern, int pageSize, CommandFlags flags) - => Inner.SortedSetScan(ToInner(key), pattern, pageSize, flags); - - IEnumerable IDatabase.SortedSetScan(RedisKey key, RedisValue pattern, int pageSize, long cursor, int pageOffset, CommandFlags flags) - => Inner.SortedSetScan(ToInner(key), pattern, pageSize, cursor, pageOffset, flags); - - public bool KeyTouch(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.KeyTouch(ToInner(key), flags); - } - - public long KeyTouch(RedisKey[] keys, CommandFlags flags = CommandFlags.None) - { - return Inner.KeyTouch(ToInner(keys), flags); - } - } -} diff --git a/src/StackExchange.Redis/KeyspaceIsolation/KeyPrefixed.VectorSets.cs b/src/StackExchange.Redis/KeyspaceIsolation/KeyPrefixed.VectorSets.cs new file mode 100644 index 000000000..ad4efe916 --- /dev/null +++ b/src/StackExchange.Redis/KeyspaceIsolation/KeyPrefixed.VectorSets.cs @@ -0,0 +1,78 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using System.Threading.Tasks; +using RESPite; + +// ReSharper disable once CheckNamespace +namespace StackExchange.Redis.KeyspaceIsolation; + +internal partial class KeyPrefixed +{ + // Vector Set operations - async methods + [Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] + public Task VectorSetAddAsync( + RedisKey key, + VectorSetAddRequest request, + CommandFlags flags = CommandFlags.None) => + Inner.VectorSetAddAsync(ToInner(key), request, flags); + + public Task VectorSetLengthAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.VectorSetLengthAsync(ToInner(key), flags); + + public Task VectorSetDimensionAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.VectorSetDimensionAsync(ToInner(key), flags); + + public Task?> VectorSetGetApproximateVectorAsync(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) => + Inner.VectorSetGetApproximateVectorAsync(ToInner(key), member, flags); + + public Task VectorSetGetAttributesJsonAsync(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) => + Inner.VectorSetGetAttributesJsonAsync(ToInner(key), member, flags); + + public Task VectorSetInfoAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.VectorSetInfoAsync(ToInner(key), flags); + + public Task VectorSetContainsAsync(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) => + Inner.VectorSetContainsAsync(ToInner(key), member, flags); + + public Task?> VectorSetGetLinksAsync(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) => + Inner.VectorSetGetLinksAsync(ToInner(key), member, flags); + + public Task?> VectorSetGetLinksWithScoresAsync(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) => + Inner.VectorSetGetLinksWithScoresAsync(ToInner(key), member, flags); + + public Task VectorSetRandomMemberAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.VectorSetRandomMemberAsync(ToInner(key), flags); + + public Task VectorSetRandomMembersAsync(RedisKey key, long count, CommandFlags flags = CommandFlags.None) => + Inner.VectorSetRandomMembersAsync(ToInner(key), count, flags); + + public Task VectorSetRemoveAsync(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) => + Inner.VectorSetRemoveAsync(ToInner(key), member, flags); + + public Task VectorSetSetAttributesJsonAsync(RedisKey key, RedisValue member, string attributesJson, CommandFlags flags = CommandFlags.None) => + Inner.VectorSetSetAttributesJsonAsync(ToInner(key), member, attributesJson, flags); + + public Task?> VectorSetSimilaritySearchAsync( + RedisKey key, + VectorSetSimilaritySearchRequest query, + CommandFlags flags = CommandFlags.None) => + Inner.VectorSetSimilaritySearchAsync(ToInner(key), query, flags); + + public Task?> VectorSetRangeAsync( + RedisKey key, + RedisValue start = default, + RedisValue end = default, + long count = -1, + Exclude exclude = Exclude.None, + CommandFlags flags = CommandFlags.None) => + Inner.VectorSetRangeAsync(ToInner(key), start, end, count, exclude, flags); + + public System.Collections.Generic.IAsyncEnumerable VectorSetRangeEnumerateAsync( + RedisKey key, + RedisValue start = default, + RedisValue end = default, + long count = 100, + Exclude exclude = Exclude.None, + CommandFlags flags = CommandFlags.None) => + Inner.VectorSetRangeEnumerateAsync(ToInner(key), start, end, count, exclude, flags); +} diff --git a/src/StackExchange.Redis/KeyspaceIsolation/KeyPrefixed.cs b/src/StackExchange.Redis/KeyspaceIsolation/KeyPrefixed.cs new file mode 100644 index 000000000..c7831fdb8 --- /dev/null +++ b/src/StackExchange.Redis/KeyspaceIsolation/KeyPrefixed.cs @@ -0,0 +1,962 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; +using System.Linq; +using System.Net; +using System.Threading.Tasks; + +namespace StackExchange.Redis.KeyspaceIsolation +{ + internal partial class KeyPrefixed : IDatabaseAsync where TInner : IDatabaseAsync + { + internal KeyPrefixed(TInner inner, byte[] keyPrefix) + { + Inner = inner; + Prefix = keyPrefix; + } + + public IConnectionMultiplexer Multiplexer => Inner.Multiplexer; + + internal TInner Inner { get; } + + internal byte[] Prefix { get; } + + public Task DebugObjectAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.DebugObjectAsync(ToInner(key), flags); + + public Task GeoAddAsync(RedisKey key, double longitude, double latitude, RedisValue member, CommandFlags flags = CommandFlags.None) => + Inner.GeoAddAsync(ToInner(key), longitude, latitude, member, flags); + + public Task GeoAddAsync(RedisKey key, GeoEntry value, CommandFlags flags = CommandFlags.None) => + Inner.GeoAddAsync(ToInner(key), value, flags); + + public Task GeoAddAsync(RedisKey key, GeoEntry[] values, CommandFlags flags = CommandFlags.None) => + Inner.GeoAddAsync(ToInner(key), values, flags); + + public Task GeoRemoveAsync(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) => + Inner.GeoRemoveAsync(ToInner(key), member, flags); + + public Task GeoDistanceAsync(RedisKey key, RedisValue member1, RedisValue member2, GeoUnit unit = GeoUnit.Meters, CommandFlags flags = CommandFlags.None) => + Inner.GeoDistanceAsync(ToInner(key), member1, member2, unit, flags); + + public Task GeoHashAsync(RedisKey key, RedisValue[] members, CommandFlags flags = CommandFlags.None) => + Inner.GeoHashAsync(ToInner(key), members, flags); + + public Task GeoHashAsync(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) => + Inner.GeoHashAsync(ToInner(key), member, flags); + + public Task GeoPositionAsync(RedisKey key, RedisValue[] members, CommandFlags flags = CommandFlags.None) => + Inner.GeoPositionAsync(ToInner(key), members, flags); + + public Task GeoPositionAsync(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) => + Inner.GeoPositionAsync(ToInner(key), member, flags); + + public Task GeoRadiusAsync(RedisKey key, RedisValue member, double radius, GeoUnit unit = GeoUnit.Meters, int count = -1, Order? order = null, GeoRadiusOptions options = GeoRadiusOptions.Default, CommandFlags flags = CommandFlags.None) => + Inner.GeoRadiusAsync(ToInner(key), member, radius, unit, count, order, options, flags); + + public Task GeoRadiusAsync(RedisKey key, double longitude, double latitude, double radius, GeoUnit unit = GeoUnit.Meters, int count = -1, Order? order = null, GeoRadiusOptions options = GeoRadiusOptions.Default, CommandFlags flags = CommandFlags.None) => + Inner.GeoRadiusAsync(ToInner(key), longitude, latitude, radius, unit, count, order, options, flags); + + public Task GeoSearchAsync(RedisKey key, RedisValue member, GeoSearchShape shape, int count = -1, bool demandClosest = true, Order? order = null, GeoRadiusOptions options = GeoRadiusOptions.Default, CommandFlags flags = CommandFlags.None) => + Inner.GeoSearchAsync(ToInner(key), member, shape, count, demandClosest, order, options, flags); + + public Task GeoSearchAsync(RedisKey key, double longitude, double latitude, GeoSearchShape shape, int count = -1, bool demandClosest = true, Order? order = null, GeoRadiusOptions options = GeoRadiusOptions.Default, CommandFlags flags = CommandFlags.None) => + Inner.GeoSearchAsync(ToInner(key), longitude, latitude, shape, count, demandClosest, order, options, flags); + + public Task GeoSearchAndStoreAsync(RedisKey sourceKey, RedisKey destinationKey, RedisValue member, GeoSearchShape shape, int count = -1, bool demandClosest = true, Order? order = null, bool storeDistances = false, CommandFlags flags = CommandFlags.None) => + Inner.GeoSearchAndStoreAsync(ToInner(sourceKey), ToInner(destinationKey), member, shape, count, demandClosest, order, storeDistances, flags); + + public Task GeoSearchAndStoreAsync(RedisKey sourceKey, RedisKey destinationKey, double longitude, double latitude, GeoSearchShape shape, int count = -1, bool demandClosest = true, Order? order = null, bool storeDistances = false, CommandFlags flags = CommandFlags.None) => + Inner.GeoSearchAndStoreAsync(ToInner(sourceKey), ToInner(destinationKey), longitude, latitude, shape, count, demandClosest, order, storeDistances, flags); + + public Task HashDecrementAsync(RedisKey key, RedisValue hashField, double value, CommandFlags flags = CommandFlags.None) => + Inner.HashDecrementAsync(ToInner(key), hashField, value, flags); + + public Task HashDecrementAsync(RedisKey key, RedisValue hashField, long value = 1, CommandFlags flags = CommandFlags.None) => + Inner.HashDecrementAsync(ToInner(key), hashField, value, flags); + + public Task HashDeleteAsync(RedisKey key, RedisValue[] hashFields, CommandFlags flags = CommandFlags.None) => + Inner.HashDeleteAsync(ToInner(key), hashFields, flags); + + public Task HashDeleteAsync(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None) => + Inner.HashDeleteAsync(ToInner(key), hashField, flags); + + public Task HashExistsAsync(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None) => + Inner.HashExistsAsync(ToInner(key), hashField, flags); + + public Task HashFieldGetAndDeleteAsync(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None) => + Inner.HashFieldGetAndDeleteAsync(ToInner(key), hashField, flags); + + public Task?> HashFieldGetLeaseAndDeleteAsync(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None) => + Inner.HashFieldGetLeaseAndDeleteAsync(ToInner(key), hashField, flags); + + public Task HashFieldGetAndDeleteAsync(RedisKey key, RedisValue[] hashFields, CommandFlags flags = CommandFlags.None) => + Inner.HashFieldGetAndDeleteAsync(ToInner(key), hashFields, flags); + + public Task HashFieldGetAndSetExpiryAsync(RedisKey key, RedisValue hashField, TimeSpan? expiry = null, bool persist = false, CommandFlags flags = CommandFlags.None) => + Inner.HashFieldGetAndSetExpiryAsync(ToInner(key), hashField, expiry, persist, flags); + + public Task HashFieldGetAndSetExpiryAsync(RedisKey key, RedisValue hashField, DateTime expiry, CommandFlags flags = CommandFlags.None) => + Inner.HashFieldGetAndSetExpiryAsync(ToInner(key), hashField, expiry, flags); + + public Task?> HashFieldGetLeaseAndSetExpiryAsync(RedisKey key, RedisValue hashField, TimeSpan? expiry = null, bool persist = false, CommandFlags flags = CommandFlags.None) => + Inner.HashFieldGetLeaseAndSetExpiryAsync(ToInner(key), hashField, expiry, persist, flags); + + public Task?> HashFieldGetLeaseAndSetExpiryAsync(RedisKey key, RedisValue hashField, DateTime expiry, CommandFlags flags = CommandFlags.None) => + Inner.HashFieldGetLeaseAndSetExpiryAsync(ToInner(key), hashField, expiry, flags); + + public Task HashFieldGetAndSetExpiryAsync(RedisKey key, RedisValue[] hashFields, TimeSpan? expiry = null, bool persist = false, CommandFlags flags = CommandFlags.None) => + Inner.HashFieldGetAndSetExpiryAsync(ToInner(key), hashFields, expiry, persist, flags); + + public Task HashFieldGetAndSetExpiryAsync(RedisKey key, RedisValue[] hashFields, DateTime expiry, CommandFlags flags = CommandFlags.None) => + Inner.HashFieldGetAndSetExpiryAsync(ToInner(key), hashFields, expiry, flags); + + public Task HashFieldSetAndSetExpiryAsync(RedisKey key, RedisValue field, RedisValue value, TimeSpan? expiry = null, bool keepTtl = false, When when = When.Always, CommandFlags flags = CommandFlags.None) => + Inner.HashFieldSetAndSetExpiryAsync(ToInner(key), field, value, expiry, keepTtl, when, flags); + + public Task HashFieldSetAndSetExpiryAsync(RedisKey key, RedisValue field, RedisValue value, DateTime expiry, When when = When.Always, CommandFlags flags = CommandFlags.None) => + Inner.HashFieldSetAndSetExpiryAsync(ToInner(key), field, value, expiry, when, flags); + + public Task HashFieldSetAndSetExpiryAsync(RedisKey key, HashEntry[] hashFields, TimeSpan? expiry = null, bool keepTtl = false, When when = When.Always, CommandFlags flags = CommandFlags.None) => + Inner.HashFieldSetAndSetExpiryAsync(ToInner(key), hashFields, expiry, keepTtl, when, flags); + + public Task HashFieldSetAndSetExpiryAsync(RedisKey key, HashEntry[] hashFields, DateTime expiry, When when = When.Always, CommandFlags flags = CommandFlags.None) => + Inner.HashFieldSetAndSetExpiryAsync(ToInner(key), hashFields, expiry, when, flags); + + public Task HashFieldExpireAsync(RedisKey key, RedisValue[] hashFields, TimeSpan expiry, ExpireWhen when = ExpireWhen.Always, CommandFlags flags = CommandFlags.None) => + Inner.HashFieldExpireAsync(ToInner(key), hashFields, expiry, when, flags); + + public Task HashFieldExpireAsync(RedisKey key, RedisValue[] hashFields, DateTime expiry, ExpireWhen when = ExpireWhen.Always, CommandFlags flags = CommandFlags.None) => + Inner.HashFieldExpireAsync(ToInner(key), hashFields, expiry, when, flags); + + public Task HashFieldGetExpireDateTimeAsync(RedisKey key, RedisValue[] hashFields, CommandFlags flags) => + Inner.HashFieldGetExpireDateTimeAsync(ToInner(key), hashFields, flags); + + public Task HashFieldPersistAsync(RedisKey key, RedisValue[] hashFields, CommandFlags flags) => + Inner.HashFieldPersistAsync(ToInner(key), hashFields, flags); + + public Task HashFieldGetTimeToLiveAsync(RedisKey key, RedisValue[] hashFields, CommandFlags flags) => + Inner.HashFieldGetTimeToLiveAsync(ToInner(key), hashFields, flags); + + public Task HashGetAllAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.HashGetAllAsync(ToInner(key), flags); + + public Task HashGetAsync(RedisKey key, RedisValue[] hashFields, CommandFlags flags = CommandFlags.None) => + Inner.HashGetAsync(ToInner(key), hashFields, flags); + + public Task HashGetAsync(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None) => + Inner.HashGetAsync(ToInner(key), hashField, flags); + + public Task?> HashGetLeaseAsync(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None) => + Inner.HashGetLeaseAsync(ToInner(key), hashField, flags); + + public Task HashIncrementAsync(RedisKey key, RedisValue hashField, double value, CommandFlags flags = CommandFlags.None) => + Inner.HashIncrementAsync(ToInner(key), hashField, value, flags); + + public Task HashIncrementAsync(RedisKey key, RedisValue hashField, long value = 1, CommandFlags flags = CommandFlags.None) => + Inner.HashIncrementAsync(ToInner(key), hashField, value, flags); + + public Task HashKeysAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.HashKeysAsync(ToInner(key), flags); + + public Task HashLengthAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.HashLengthAsync(ToInner(key), flags); + + public Task HashRandomFieldAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.HashRandomFieldAsync(ToInner(key), flags); + + public Task HashRandomFieldsAsync(RedisKey key, long count, CommandFlags flags = CommandFlags.None) => + Inner.HashRandomFieldsAsync(ToInner(key), count, flags); + + public Task HashRandomFieldsWithValuesAsync(RedisKey key, long count, CommandFlags flags = CommandFlags.None) => + Inner.HashRandomFieldsWithValuesAsync(ToInner(key), count, flags); + + public IAsyncEnumerable HashScanAsync(RedisKey key, RedisValue pattern, int pageSize, long cursor, int pageOffset, CommandFlags flags) => + Inner.HashScanAsync(ToInner(key), pattern, pageSize, cursor, pageOffset, flags); + + public IAsyncEnumerable HashScanNoValuesAsync(RedisKey key, RedisValue pattern, int pageSize, long cursor, int pageOffset, CommandFlags flags) => + Inner.HashScanNoValuesAsync(ToInner(key), pattern, pageSize, cursor, pageOffset, flags); + + public Task HashSetAsync(RedisKey key, RedisValue hashField, RedisValue value, When when = When.Always, CommandFlags flags = CommandFlags.None) => + Inner.HashSetAsync(ToInner(key), hashField, value, when, flags); + + public Task HashStringLengthAsync(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None) => + Inner.HashStringLengthAsync(ToInner(key), hashField, flags); + + public Task HashSetAsync(RedisKey key, HashEntry[] hashFields, CommandFlags flags = CommandFlags.None) => + Inner.HashSetAsync(ToInner(key), hashFields, flags); + + public Task HashValuesAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.HashValuesAsync(ToInner(key), flags); + + public Task HyperLogLogAddAsync(RedisKey key, RedisValue[] values, CommandFlags flags = CommandFlags.None) => + Inner.HyperLogLogAddAsync(ToInner(key), values, flags); + + public Task HyperLogLogAddAsync(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None) => + Inner.HyperLogLogAddAsync(ToInner(key), value, flags); + + public Task HyperLogLogLengthAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.HyperLogLogLengthAsync(ToInner(key), flags); + + public Task HyperLogLogLengthAsync(RedisKey[] keys, CommandFlags flags = CommandFlags.None) => + Inner.HyperLogLogLengthAsync(ToInner(keys), flags); + + public Task HyperLogLogMergeAsync(RedisKey destination, RedisKey[] sourceKeys, CommandFlags flags = CommandFlags.None) => + Inner.HyperLogLogMergeAsync(ToInner(destination), ToInner(sourceKeys), flags); + + public Task HyperLogLogMergeAsync(RedisKey destination, RedisKey first, RedisKey second, CommandFlags flags = CommandFlags.None) => + Inner.HyperLogLogMergeAsync(ToInner(destination), ToInner(first), ToInner(second), flags); + + public Task IdentifyEndpointAsync(RedisKey key = default(RedisKey), CommandFlags flags = CommandFlags.None) => + Inner.IdentifyEndpointAsync(ToInner(key), flags); + + public bool IsConnected(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.IsConnected(ToInner(key), flags); + + public Task KeyCopyAsync(RedisKey sourceKey, RedisKey destinationKey, int destinationDatabase = -1, bool replace = false, CommandFlags flags = CommandFlags.None) => + Inner.KeyCopyAsync(ToInner(sourceKey), ToInner(destinationKey), destinationDatabase, replace, flags); + + public Task KeyDeleteAsync(RedisKey[] keys, CommandFlags flags = CommandFlags.None) => + Inner.KeyDeleteAsync(ToInner(keys), flags); + + public Task KeyDeleteAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.KeyDeleteAsync(ToInner(key), flags); + + public Task KeyDumpAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.KeyDumpAsync(ToInner(key), flags); + + public Task KeyEncodingAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.KeyEncodingAsync(ToInner(key), flags); + + public Task KeyExistsAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.KeyExistsAsync(ToInner(key), flags); + + public Task KeyExistsAsync(RedisKey[] keys, CommandFlags flags = CommandFlags.None) => + Inner.KeyExistsAsync(ToInner(keys), flags); + + public Task KeyExpireAsync(RedisKey key, DateTime? expiry, CommandFlags flags) => + Inner.KeyExpireAsync(ToInner(key), expiry, flags); + + public Task KeyExpireAsync(RedisKey key, DateTime? expiry, ExpireWhen when = ExpireWhen.Always, CommandFlags flags = CommandFlags.None) => + Inner.KeyExpireAsync(ToInner(key), expiry, when, flags); + + public Task KeyExpireAsync(RedisKey key, TimeSpan? expiry, CommandFlags flags) => + Inner.KeyExpireAsync(ToInner(key), expiry, flags); + + public Task KeyExpireAsync(RedisKey key, TimeSpan? expiry, ExpireWhen when = ExpireWhen.Always, CommandFlags flags = CommandFlags.None) => + Inner.KeyExpireAsync(ToInner(key), expiry, when, flags); + + public Task KeyExpireTimeAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.KeyExpireTimeAsync(ToInner(key), flags); + + public Task KeyFrequencyAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.KeyFrequencyAsync(ToInner(key), flags); + + public Task KeyIdleTimeAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.KeyIdleTimeAsync(ToInner(key), flags); + + public Task KeyMigrateAsync(RedisKey key, EndPoint toServer, int toDatabase = 0, int timeoutMilliseconds = 0, MigrateOptions migrateOptions = MigrateOptions.None, CommandFlags flags = CommandFlags.None) => + Inner.KeyMigrateAsync(ToInner(key), toServer, toDatabase, timeoutMilliseconds, migrateOptions, flags); + + public Task KeyMoveAsync(RedisKey key, int database, CommandFlags flags = CommandFlags.None) => + Inner.KeyMoveAsync(ToInner(key), database, flags); + + public Task KeyPersistAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.KeyPersistAsync(ToInner(key), flags); + + public Task KeyRandomAsync(CommandFlags flags = CommandFlags.None) => + throw new NotSupportedException("RANDOMKEY is not supported when a key-prefix is specified"); + + public Task KeyRefCountAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.KeyRefCountAsync(ToInner(key), flags); + + public Task KeyRenameAsync(RedisKey key, RedisKey newKey, When when = When.Always, CommandFlags flags = CommandFlags.None) => + Inner.KeyRenameAsync(ToInner(key), ToInner(newKey), when, flags); + + public Task KeyRestoreAsync(RedisKey key, byte[] value, TimeSpan? expiry = null, CommandFlags flags = CommandFlags.None) => + Inner.KeyRestoreAsync(ToInner(key), value, expiry, flags); + + public Task KeyTimeToLiveAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.KeyTimeToLiveAsync(ToInner(key), flags); + + public Task KeyTypeAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.KeyTypeAsync(ToInner(key), flags); + + public Task ListGetByIndexAsync(RedisKey key, long index, CommandFlags flags = CommandFlags.None) => + Inner.ListGetByIndexAsync(ToInner(key), index, flags); + + public Task ListInsertAfterAsync(RedisKey key, RedisValue pivot, RedisValue value, CommandFlags flags = CommandFlags.None) => + Inner.ListInsertAfterAsync(ToInner(key), pivot, value, flags); + + public Task ListInsertBeforeAsync(RedisKey key, RedisValue pivot, RedisValue value, CommandFlags flags = CommandFlags.None) => + Inner.ListInsertBeforeAsync(ToInner(key), pivot, value, flags); + + public Task ListLeftPopAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.ListLeftPopAsync(ToInner(key), flags); + + public Task ListLeftPopAsync(RedisKey key, long count, CommandFlags flags = CommandFlags.None) => + Inner.ListLeftPopAsync(ToInner(key), count, flags); + + public Task ListLeftPopAsync(RedisKey[] keys, long count, CommandFlags flags = CommandFlags.None) => + Inner.ListLeftPopAsync(ToInner(keys), count, flags); + + public Task ListPositionAsync(RedisKey key, RedisValue element, long rank = 1, long maxLength = 0, CommandFlags flags = CommandFlags.None) => + Inner.ListPositionAsync(ToInner(key), element, rank, maxLength, flags); + + public Task ListPositionsAsync(RedisKey key, RedisValue element, long count, long rank = 1, long maxLength = 0, CommandFlags flags = CommandFlags.None) => + Inner.ListPositionsAsync(ToInner(key), element, count, rank, maxLength, flags); + + public Task ListLeftPushAsync(RedisKey key, RedisValue[] values, CommandFlags flags = CommandFlags.None) => + Inner.ListLeftPushAsync(ToInner(key), values, flags); + + public Task ListLeftPushAsync(RedisKey key, RedisValue[] values, When when = When.Always, CommandFlags flags = CommandFlags.None) => + Inner.ListLeftPushAsync(ToInner(key), values, when, flags); + + public Task ListLeftPushAsync(RedisKey key, RedisValue value, When when = When.Always, CommandFlags flags = CommandFlags.None) => + Inner.ListLeftPushAsync(ToInner(key), value, when, flags); + + public Task ListLengthAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.ListLengthAsync(ToInner(key), flags); + + public Task ListMoveAsync(RedisKey sourceKey, RedisKey destinationKey, ListSide sourceSide, ListSide destinationSide, CommandFlags flags = CommandFlags.None) => + Inner.ListMoveAsync(ToInner(sourceKey), ToInner(destinationKey), sourceSide, destinationSide); + + public Task ListRangeAsync(RedisKey key, long start = 0, long stop = -1, CommandFlags flags = CommandFlags.None) => + Inner.ListRangeAsync(ToInner(key), start, stop, flags); + + public Task ListRemoveAsync(RedisKey key, RedisValue value, long count = 0, CommandFlags flags = CommandFlags.None) => + Inner.ListRemoveAsync(ToInner(key), value, count, flags); + + public Task ListRightPopAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.ListRightPopAsync(ToInner(key), flags); + + public Task ListRightPopAsync(RedisKey key, long count, CommandFlags flags = CommandFlags.None) => + Inner.ListRightPopAsync(ToInner(key), count, flags); + + public Task ListRightPopAsync(RedisKey[] keys, long count, CommandFlags flags = CommandFlags.None) => + Inner.ListRightPopAsync(ToInner(keys), count, flags); + + public Task ListRightPopLeftPushAsync(RedisKey source, RedisKey destination, CommandFlags flags = CommandFlags.None) => + Inner.ListRightPopLeftPushAsync(ToInner(source), ToInner(destination), flags); + + public Task ListRightPushAsync(RedisKey key, RedisValue[] values, CommandFlags flags = CommandFlags.None) => + Inner.ListRightPushAsync(ToInner(key), values, flags); + + public Task ListRightPushAsync(RedisKey key, RedisValue[] values, When when = When.Always, CommandFlags flags = CommandFlags.None) => + Inner.ListRightPushAsync(ToInner(key), values, when, flags); + + public Task ListRightPushAsync(RedisKey key, RedisValue value, When when = When.Always, CommandFlags flags = CommandFlags.None) => + Inner.ListRightPushAsync(ToInner(key), value, when, flags); + + public Task ListSetByIndexAsync(RedisKey key, long index, RedisValue value, CommandFlags flags = CommandFlags.None) => + Inner.ListSetByIndexAsync(ToInner(key), index, value, flags); + + public Task ListTrimAsync(RedisKey key, long start, long stop, CommandFlags flags = CommandFlags.None) => + Inner.ListTrimAsync(ToInner(key), start, stop, flags); + + public Task LockExtendAsync(RedisKey key, RedisValue value, TimeSpan expiry, CommandFlags flags = CommandFlags.None) => + Inner.LockExtendAsync(ToInner(key), value, expiry, flags); + + public Task LockQueryAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.LockQueryAsync(ToInner(key), flags); + + public Task LockReleaseAsync(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None) => + Inner.LockReleaseAsync(ToInner(key), value, flags); + + public Task LockTakeAsync(RedisKey key, RedisValue value, TimeSpan expiry, CommandFlags flags = CommandFlags.None) => + Inner.LockTakeAsync(ToInner(key), value, expiry, flags); + + public Task StringLongestCommonSubsequenceAsync(RedisKey first, RedisKey second, CommandFlags flags = CommandFlags.None) => + Inner.StringLongestCommonSubsequenceAsync(ToInner(first), ToInner(second), flags); + + public Task StringLongestCommonSubsequenceLengthAsync(RedisKey first, RedisKey second, CommandFlags flags = CommandFlags.None) => + Inner.StringLongestCommonSubsequenceLengthAsync(ToInner(first), ToInner(second), flags); + + public Task StringLongestCommonSubsequenceWithMatchesAsync(RedisKey first, RedisKey second, long minLength = 0, CommandFlags flags = CommandFlags.None) => + Inner.StringLongestCommonSubsequenceWithMatchesAsync(ToInner(first), ToInner(second), minLength, flags); + + public Task PublishAsync(RedisChannel channel, RedisValue message, CommandFlags flags = CommandFlags.None) => + Inner.PublishAsync(ToInner(channel), message, flags); + + public Task ExecuteAsync(string command, params object[] args) => + Inner.ExecuteAsync(command, ToInner(args), CommandFlags.None); + + public Task ExecuteAsync(string command, ICollection? args, CommandFlags flags = CommandFlags.None) => + Inner.ExecuteAsync(command, ToInner(args), flags); + + public Task ScriptEvaluateAsync(byte[] hash, RedisKey[]? keys = null, RedisValue[]? values = null, CommandFlags flags = CommandFlags.None) => + // TODO: The return value could contain prefixed keys. It might make sense to 'unprefix' those? + Inner.ScriptEvaluateAsync(hash, ToInner(keys), values, flags); + + public Task ScriptEvaluateAsync(string script, RedisKey[]? keys = null, RedisValue[]? values = null, CommandFlags flags = CommandFlags.None) => + // TODO: The return value could contain prefixed keys. It might make sense to 'unprefix' those? + Inner.ScriptEvaluateAsync(script: script, keys: ToInner(keys), values: values, flags: flags); + + public Task ScriptEvaluateAsync(LuaScript script, object? parameters = null, CommandFlags flags = CommandFlags.None) => + // TODO: The return value could contain prefixed keys. It might make sense to 'unprefix' those? + script.EvaluateAsync(Inner, parameters, Prefix, flags); + + public Task ScriptEvaluateAsync(LoadedLuaScript script, object? parameters = null, CommandFlags flags = CommandFlags.None) => + // TODO: The return value could contain prefixed keys. It might make sense to 'unprefix' those? + script.EvaluateAsync(Inner, parameters, Prefix, flags); + + public Task ScriptEvaluateReadOnlyAsync(byte[] hash, RedisKey[]? keys = null, RedisValue[]? values = null, CommandFlags flags = CommandFlags.None) => + // TODO: The return value could contain prefixed keys. It might make sense to 'unprefix' those? + Inner.ScriptEvaluateAsync(hash, ToInner(keys), values, flags); + + public Task ScriptEvaluateReadOnlyAsync(string script, RedisKey[]? keys = null, RedisValue[]? values = null, CommandFlags flags = CommandFlags.None) => + // TODO: The return value could contain prefixed keys. It might make sense to 'unprefix' those? + Inner.ScriptEvaluateAsync(script: script, keys: ToInner(keys), values: values, flags: flags); + + public Task SetAddAsync(RedisKey key, RedisValue[] values, CommandFlags flags = CommandFlags.None) => + Inner.SetAddAsync(ToInner(key), values, flags); + + public Task SetAddAsync(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None) => + Inner.SetAddAsync(ToInner(key), value, flags); + + public Task SetCombineAndStoreAsync(SetOperation operation, RedisKey destination, RedisKey[] keys, CommandFlags flags = CommandFlags.None) => + Inner.SetCombineAndStoreAsync(operation, ToInner(destination), ToInner(keys), flags); + + public Task SetCombineAndStoreAsync(SetOperation operation, RedisKey destination, RedisKey first, RedisKey second, CommandFlags flags = CommandFlags.None) => + Inner.SetCombineAndStoreAsync(operation, ToInner(destination), ToInner(first), ToInner(second), flags); + + public Task SetCombineAsync(SetOperation operation, RedisKey[] keys, CommandFlags flags = CommandFlags.None) => + Inner.SetCombineAsync(operation, ToInner(keys), flags); + + public Task SetCombineAsync(SetOperation operation, RedisKey first, RedisKey second, CommandFlags flags = CommandFlags.None) => + Inner.SetCombineAsync(operation, ToInner(first), ToInner(second), flags); + + public Task SetContainsAsync(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None) => + Inner.SetContainsAsync(ToInner(key), value, flags); + + public Task SetContainsAsync(RedisKey key, RedisValue[] values, CommandFlags flags = CommandFlags.None) => + Inner.SetContainsAsync(ToInner(key), values, flags); + + public Task SetIntersectionLengthAsync(RedisKey[] keys, long limit = 0, CommandFlags flags = CommandFlags.None) => + Inner.SetIntersectionLengthAsync(ToInner(keys), limit, flags); + + public Task SetLengthAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.SetLengthAsync(ToInner(key), flags); + + public Task SetMembersAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.SetMembersAsync(ToInner(key), flags); + + public Task SetMoveAsync(RedisKey source, RedisKey destination, RedisValue value, CommandFlags flags = CommandFlags.None) => + Inner.SetMoveAsync(ToInner(source), ToInner(destination), value, flags); + + public Task SetPopAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.SetPopAsync(ToInner(key), flags); + + public Task SetPopAsync(RedisKey key, long count, CommandFlags flags = CommandFlags.None) => + Inner.SetPopAsync(ToInner(key), count, flags); + + public Task SetRandomMemberAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.SetRandomMemberAsync(ToInner(key), flags); + + public Task SetRandomMembersAsync(RedisKey key, long count, CommandFlags flags = CommandFlags.None) => + Inner.SetRandomMembersAsync(ToInner(key), count, flags); + + public Task SetRemoveAsync(RedisKey key, RedisValue[] values, CommandFlags flags = CommandFlags.None) => + Inner.SetRemoveAsync(ToInner(key), values, flags); + + public IAsyncEnumerable SetScanAsync(RedisKey key, RedisValue pattern, int pageSize, long cursor, int pageOffset, CommandFlags flags) => + Inner.SetScanAsync(ToInner(key), pattern, pageSize, cursor, pageOffset, flags); + + public Task SetRemoveAsync(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None) => + Inner.SetRemoveAsync(ToInner(key), value, flags); + + public Task SortAndStoreAsync(RedisKey destination, RedisKey key, long skip = 0, long take = -1, Order order = Order.Ascending, SortType sortType = SortType.Numeric, RedisValue by = default, RedisValue[]? get = null, CommandFlags flags = CommandFlags.None) => + Inner.SortAndStoreAsync(ToInner(destination), ToInner(key), skip, take, order, sortType, SortByToInner(by), SortGetToInner(get), flags); + + public Task SortAsync(RedisKey key, long skip = 0, long take = -1, Order order = Order.Ascending, SortType sortType = SortType.Numeric, RedisValue by = default, RedisValue[]? get = null, CommandFlags flags = CommandFlags.None) => + Inner.SortAsync(ToInner(key), skip, take, order, sortType, SortByToInner(by), SortGetToInner(get), flags); + + public Task SortedSetAddAsync(RedisKey key, SortedSetEntry[] values, CommandFlags flags) => + Inner.SortedSetAddAsync(ToInner(key), values, flags); + + public Task SortedSetAddAsync(RedisKey key, SortedSetEntry[] values, When when = When.Always, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetAddAsync(ToInner(key), values, when, flags); + + public Task SortedSetAddAsync(RedisKey key, SortedSetEntry[] values, SortedSetWhen updateWhen = SortedSetWhen.Always, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetAddAsync(ToInner(key), values, updateWhen, flags); + + public Task SortedSetAddAsync(RedisKey key, RedisValue member, double score, CommandFlags flags) => + Inner.SortedSetAddAsync(ToInner(key), member, score, flags); + + public Task SortedSetAddAsync(RedisKey key, RedisValue member, double score, When when = When.Always, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetAddAsync(ToInner(key), member, score, when, flags); + + public Task SortedSetAddAsync(RedisKey key, RedisValue member, double score, SortedSetWhen updateWhen = SortedSetWhen.Always, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetAddAsync(ToInner(key), member, score, updateWhen, flags); + public Task SortedSetCombineAsync(SetOperation operation, RedisKey[] keys, double[]? weights = null, Aggregate aggregate = Aggregate.Sum, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetCombineAsync(operation, ToInner(keys), weights, aggregate, flags); + + public Task SortedSetCombineWithScoresAsync(SetOperation operation, RedisKey[] keys, double[]? weights = null, Aggregate aggregate = Aggregate.Sum, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetCombineWithScoresAsync(operation, ToInner(keys), weights, aggregate, flags); + + public Task SortedSetCombineAndStoreAsync(SetOperation operation, RedisKey destination, RedisKey[] keys, double[]? weights = null, Aggregate aggregate = Aggregate.Sum, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetCombineAndStoreAsync(operation, ToInner(destination), ToInner(keys), weights, aggregate, flags); + + public Task SortedSetCombineAndStoreAsync(SetOperation operation, RedisKey destination, RedisKey first, RedisKey second, Aggregate aggregate = Aggregate.Sum, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetCombineAndStoreAsync(operation, ToInner(destination), ToInner(first), ToInner(second), aggregate, flags); + + public Task SortedSetDecrementAsync(RedisKey key, RedisValue member, double value, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetDecrementAsync(ToInner(key), member, value, flags); + + public Task SortedSetIncrementAsync(RedisKey key, RedisValue member, double value, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetIncrementAsync(ToInner(key), member, value, flags); + + public Task SortedSetIntersectionLengthAsync(RedisKey[] keys, long limit = 0, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetIntersectionLengthAsync(ToInner(keys), limit, flags); + + public Task SortedSetLengthAsync(RedisKey key, double min = -1.0 / 0.0, double max = 1.0 / 0.0, Exclude exclude = Exclude.None, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetLengthAsync(ToInner(key), min, max, exclude, flags); + + public Task SortedSetLengthByValueAsync(RedisKey key, RedisValue min, RedisValue max, Exclude exclude = Exclude.None, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetLengthByValueAsync(ToInner(key), min, max, exclude, flags); + + public Task SortedSetRandomMemberAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetRandomMemberAsync(ToInner(key), flags); + + public Task SortedSetRandomMembersAsync(RedisKey key, long count, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetRandomMembersAsync(ToInner(key), count, flags); + + public Task SortedSetRandomMembersWithScoresAsync(RedisKey key, long count, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetRandomMembersWithScoresAsync(ToInner(key), count, flags); + + public Task SortedSetRangeAndStoreAsync( + RedisKey sourceKey, + RedisKey destinationKey, + RedisValue start, + RedisValue stop, + SortedSetOrder sortedSetOrder = SortedSetOrder.ByRank, + Exclude exclude = Exclude.None, + Order order = Order.Ascending, + long skip = 0, + long? take = null, + CommandFlags flags = CommandFlags.None) => + Inner.SortedSetRangeAndStoreAsync(ToInner(sourceKey), ToInner(destinationKey), start, stop, sortedSetOrder, exclude, order, skip, take, flags); + + public Task SortedSetRangeByRankAsync(RedisKey key, long start = 0, long stop = -1, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetRangeByRankAsync(ToInner(key), start, stop, order, flags); + + public Task SortedSetRangeByRankWithScoresAsync(RedisKey key, long start = 0, long stop = -1, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetRangeByRankWithScoresAsync(ToInner(key), start, stop, order, flags); + + public Task SortedSetRangeByScoreAsync(RedisKey key, double start = -1.0 / 0.0, double stop = 1.0 / 0.0, Exclude exclude = Exclude.None, Order order = Order.Ascending, long skip = 0, long take = -1, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetRangeByScoreAsync(ToInner(key), start, stop, exclude, order, skip, take, flags); + + public Task SortedSetRangeByScoreWithScoresAsync(RedisKey key, double start = -1.0 / 0.0, double stop = 1.0 / 0.0, Exclude exclude = Exclude.None, Order order = Order.Ascending, long skip = 0, long take = -1, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetRangeByScoreWithScoresAsync(ToInner(key), start, stop, exclude, order, skip, take, flags); + + public Task SortedSetRangeByValueAsync(RedisKey key, RedisValue min, RedisValue max, Exclude exclude, long skip, long take, CommandFlags flags) => + Inner.SortedSetRangeByValueAsync(ToInner(key), min, max, exclude, Order.Ascending, skip, take, flags); + + public Task SortedSetRangeByValueAsync(RedisKey key, RedisValue min = default, RedisValue max = default, Exclude exclude = Exclude.None, Order order = Order.Ascending, long skip = 0, long take = -1, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetRangeByValueAsync(ToInner(key), min, max, exclude, order, skip, take, flags); + + public Task SortedSetRankAsync(RedisKey key, RedisValue member, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetRankAsync(ToInner(key), member, order, flags); + + public Task SortedSetRemoveAsync(RedisKey key, RedisValue[] members, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetRemoveAsync(ToInner(key), members, flags); + + public Task SortedSetRemoveAsync(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetRemoveAsync(ToInner(key), member, flags); + + public Task SortedSetRemoveRangeByRankAsync(RedisKey key, long start, long stop, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetRemoveRangeByRankAsync(ToInner(key), start, stop, flags); + + public Task SortedSetRemoveRangeByScoreAsync(RedisKey key, double start, double stop, Exclude exclude = Exclude.None, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetRemoveRangeByScoreAsync(ToInner(key), start, stop, exclude, flags); + + public Task SortedSetRemoveRangeByValueAsync(RedisKey key, RedisValue min, RedisValue max, Exclude exclude = Exclude.None, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetRemoveRangeByValueAsync(ToInner(key), min, max, exclude, flags); + + public Task SortedSetScoreAsync(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetScoreAsync(ToInner(key), member, flags); + + public Task SortedSetScoresAsync(RedisKey key, RedisValue[] members, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetScoresAsync(ToInner(key), members, flags); + + public IAsyncEnumerable SortedSetScanAsync(RedisKey key, RedisValue pattern, int pageSize, long cursor, int pageOffset, CommandFlags flags) => + Inner.SortedSetScanAsync(ToInner(key), pattern, pageSize, cursor, pageOffset, flags); + + public Task SortedSetUpdateAsync(RedisKey key, SortedSetEntry[] values, SortedSetWhen updateWhen = SortedSetWhen.Always, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetUpdateAsync(ToInner(key), values, updateWhen, flags); + + public Task SortedSetUpdateAsync(RedisKey key, RedisValue member, double score, SortedSetWhen updateWhen = SortedSetWhen.Always, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetUpdateAsync(ToInner(key), member, score, updateWhen, flags); + + public Task SortedSetPopAsync(RedisKey key, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetPopAsync(ToInner(key), order, flags); + + public Task SortedSetPopAsync(RedisKey key, long count, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetPopAsync(ToInner(key), count, order, flags); + + public Task SortedSetPopAsync(RedisKey[] keys, long count, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetPopAsync(ToInner(keys), count, order, flags); + + public Task StreamAcknowledgeAsync(RedisKey key, RedisValue groupName, RedisValue messageId, CommandFlags flags = CommandFlags.None) => + Inner.StreamAcknowledgeAsync(ToInner(key), groupName, messageId, flags); + + public Task StreamAcknowledgeAsync(RedisKey key, RedisValue groupName, RedisValue[] messageIds, CommandFlags flags = CommandFlags.None) => + Inner.StreamAcknowledgeAsync(ToInner(key), groupName, messageIds, flags); + + public Task StreamAcknowledgeAndDeleteAsync(RedisKey key, RedisValue groupName, StreamTrimMode mode, RedisValue messageId, CommandFlags flags = CommandFlags.None) => + Inner.StreamAcknowledgeAndDeleteAsync(ToInner(key), groupName, mode, messageId, flags); + + public Task StreamAcknowledgeAndDeleteAsync(RedisKey key, RedisValue groupName, StreamTrimMode mode, RedisValue[] messageIds, CommandFlags flags = CommandFlags.None) => + Inner.StreamAcknowledgeAndDeleteAsync(ToInner(key), groupName, mode, messageIds, flags); + + public Task StreamAddAsync(RedisKey key, RedisValue streamField, RedisValue streamValue, RedisValue? messageId, int? maxLength, bool useApproximateMaxLength, CommandFlags flags) => + Inner.StreamAddAsync(ToInner(key), streamField, streamValue, messageId, maxLength, useApproximateMaxLength, flags); + + public Task StreamAddAsync(RedisKey key, NameValueEntry[] streamPairs, RedisValue? messageId, int? maxLength, bool useApproximateMaxLength, CommandFlags flags) => + Inner.StreamAddAsync(ToInner(key), streamPairs, messageId, maxLength, useApproximateMaxLength, flags); + + public Task StreamAddAsync(RedisKey key, RedisValue streamField, RedisValue streamValue, RedisValue? messageId = null, long? maxLength = null, bool useApproximateMaxLength = false, long? limit = null, StreamTrimMode mode = StreamTrimMode.KeepReferences, CommandFlags flags = CommandFlags.None) => + Inner.StreamAddAsync(ToInner(key), streamField, streamValue, messageId, maxLength, useApproximateMaxLength, limit, mode, flags); + + public Task StreamAddAsync(RedisKey key, NameValueEntry[] streamPairs, RedisValue? messageId = null, long? maxLength = null, bool useApproximateMaxLength = false, long? limit = null, StreamTrimMode mode = StreamTrimMode.KeepReferences, CommandFlags flags = CommandFlags.None) => + Inner.StreamAddAsync(ToInner(key), streamPairs, messageId, maxLength, useApproximateMaxLength, limit, mode, flags); + + public Task StreamAddAsync(RedisKey key, RedisValue streamField, RedisValue streamValue, StreamIdempotentId idempotentId, long? maxLength = null, bool useApproximateMaxLength = false, long? limit = null, StreamTrimMode mode = StreamTrimMode.KeepReferences, CommandFlags flags = CommandFlags.None) => + Inner.StreamAddAsync(ToInner(key), streamField, streamValue, idempotentId, maxLength, useApproximateMaxLength, limit, mode, flags); + + public Task StreamAddAsync(RedisKey key, NameValueEntry[] streamPairs, StreamIdempotentId idempotentId, long? maxLength = null, bool useApproximateMaxLength = false, long? limit = null, StreamTrimMode mode = StreamTrimMode.KeepReferences, CommandFlags flags = CommandFlags.None) => + Inner.StreamAddAsync(ToInner(key), streamPairs, idempotentId, maxLength, useApproximateMaxLength, limit, mode, flags); + + public Task StreamConfigureAsync(RedisKey key, StreamConfiguration configuration, CommandFlags flags = CommandFlags.None) => + Inner.StreamConfigureAsync(ToInner(key), configuration, flags); + + public Task StreamAutoClaimAsync(RedisKey key, RedisValue consumerGroup, RedisValue claimingConsumer, long minIdleTimeInMs, RedisValue startAtId, int? count = null, CommandFlags flags = CommandFlags.None) => + Inner.StreamAutoClaimAsync(ToInner(key), consumerGroup, claimingConsumer, minIdleTimeInMs, startAtId, count, flags); + + public Task StreamAutoClaimIdsOnlyAsync(RedisKey key, RedisValue consumerGroup, RedisValue claimingConsumer, long minIdleTimeInMs, RedisValue startAtId, int? count = null, CommandFlags flags = CommandFlags.None) => + Inner.StreamAutoClaimIdsOnlyAsync(ToInner(key), consumerGroup, claimingConsumer, minIdleTimeInMs, startAtId, count, flags); + + public Task StreamClaimAsync(RedisKey key, RedisValue consumerGroup, RedisValue claimingConsumer, long minIdleTimeInMs, RedisValue[] messageIds, CommandFlags flags = CommandFlags.None) => + Inner.StreamClaimAsync(ToInner(key), consumerGroup, claimingConsumer, minIdleTimeInMs, messageIds, flags); + + public Task StreamClaimIdsOnlyAsync(RedisKey key, RedisValue consumerGroup, RedisValue claimingConsumer, long minIdleTimeInMs, RedisValue[] messageIds, CommandFlags flags = CommandFlags.None) => + Inner.StreamClaimIdsOnlyAsync(ToInner(key), consumerGroup, claimingConsumer, minIdleTimeInMs, messageIds, flags); + + public Task StreamConsumerGroupSetPositionAsync(RedisKey key, RedisValue groupName, RedisValue position, CommandFlags flags = CommandFlags.None) => + Inner.StreamConsumerGroupSetPositionAsync(ToInner(key), groupName, position, flags); + + public Task StreamCreateConsumerGroupAsync(RedisKey key, RedisValue groupName, RedisValue? position, CommandFlags flags) => + Inner.StreamCreateConsumerGroupAsync(ToInner(key), groupName, position, flags); + + public Task StreamCreateConsumerGroupAsync(RedisKey key, RedisValue groupName, RedisValue? position = null, bool createStream = true, CommandFlags flags = CommandFlags.None) => + Inner.StreamCreateConsumerGroupAsync(ToInner(key), groupName, position, createStream, flags); + + public Task StreamInfoAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.StreamInfoAsync(ToInner(key), flags); + + public Task StreamGroupInfoAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.StreamGroupInfoAsync(ToInner(key), flags); + + public Task StreamConsumerInfoAsync(RedisKey key, RedisValue groupName, CommandFlags flags = CommandFlags.None) => + Inner.StreamConsumerInfoAsync(ToInner(key), groupName, flags); + + public Task StreamLengthAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.StreamLengthAsync(ToInner(key), flags); + + public Task StreamDeleteAsync(RedisKey key, RedisValue[] messageIds, CommandFlags flags = CommandFlags.None) => + Inner.StreamDeleteAsync(ToInner(key), messageIds, flags); + + public Task StreamDeleteAsync(RedisKey key, RedisValue[] messageIds, StreamTrimMode mode, CommandFlags flags = CommandFlags.None) => + Inner.StreamDeleteAsync(ToInner(key), messageIds, mode, flags); + + public Task StreamDeleteConsumerAsync(RedisKey key, RedisValue groupName, RedisValue consumerName, CommandFlags flags = CommandFlags.None) => + Inner.StreamDeleteConsumerAsync(ToInner(key), groupName, consumerName, flags); + + public Task StreamDeleteConsumerGroupAsync(RedisKey key, RedisValue groupName, CommandFlags flags = CommandFlags.None) => + Inner.StreamDeleteConsumerGroupAsync(ToInner(key), groupName, flags); + + public Task StreamPendingAsync(RedisKey key, RedisValue groupName, CommandFlags flags = CommandFlags.None) => + Inner.StreamPendingAsync(ToInner(key), groupName, flags); + + public Task StreamPendingMessagesAsync(RedisKey key, RedisValue groupName, int count, RedisValue consumerName, RedisValue? minId, RedisValue? maxId, CommandFlags flags) => + Inner.StreamPendingMessagesAsync(ToInner(key), groupName, count, consumerName, minId, maxId, flags); + + public Task StreamPendingMessagesAsync(RedisKey key, RedisValue groupName, int count, RedisValue consumerName, RedisValue? minId = null, RedisValue? maxId = null, long? minIdleTimeInMs = null, CommandFlags flags = CommandFlags.None) => + Inner.StreamPendingMessagesAsync(ToInner(key), groupName, count, consumerName, minId, maxId, minIdleTimeInMs, flags); + + public Task StreamRangeAsync(RedisKey key, RedisValue? minId = null, RedisValue? maxId = null, int? count = null, Order messageOrder = Order.Ascending, CommandFlags flags = CommandFlags.None) => + Inner.StreamRangeAsync(ToInner(key), minId, maxId, count, messageOrder, flags); + + public Task StreamReadAsync(RedisKey key, RedisValue position, int? count = null, CommandFlags flags = CommandFlags.None) => + Inner.StreamReadAsync(ToInner(key), position, count, flags); + + public Task StreamReadAsync(StreamPosition[] streamPositions, int? countPerStream = null, CommandFlags flags = CommandFlags.None) => + Inner.StreamReadAsync(streamPositions, countPerStream, flags); + + public Task StreamReadGroupAsync(RedisKey key, RedisValue groupName, RedisValue consumerName, RedisValue? position, int? count, CommandFlags flags) => + Inner.StreamReadGroupAsync(ToInner(key), groupName, consumerName, position, count, flags); + + public Task StreamReadGroupAsync(RedisKey key, RedisValue groupName, RedisValue consumerName, RedisValue? position = null, int? count = null, bool noAck = false, CommandFlags flags = CommandFlags.None) => + Inner.StreamReadGroupAsync(ToInner(key), groupName, consumerName, position, count, noAck, flags); + + public Task StreamReadGroupAsync(RedisKey key, RedisValue groupName, RedisValue consumerName, RedisValue? position = null, int? count = null, bool noAck = false, TimeSpan? claimMinIdleTime = null, CommandFlags flags = CommandFlags.None) => + Inner.StreamReadGroupAsync(ToInner(key), groupName, consumerName, position, count, noAck, claimMinIdleTime, flags); + + public Task StreamReadGroupAsync(StreamPosition[] streamPositions, RedisValue groupName, RedisValue consumerName, int? countPerStream, CommandFlags flags) => + Inner.StreamReadGroupAsync(streamPositions, groupName, consumerName, countPerStream, flags); + + public Task StreamReadGroupAsync(StreamPosition[] streamPositions, RedisValue groupName, RedisValue consumerName, int? countPerStream = null, bool noAck = false, CommandFlags flags = CommandFlags.None) => + Inner.StreamReadGroupAsync(streamPositions, groupName, consumerName, countPerStream, noAck, flags); + + public Task StreamReadGroupAsync(StreamPosition[] streamPositions, RedisValue groupName, RedisValue consumerName, int? countPerStream = null, bool noAck = false, TimeSpan? claimMinIdleTime = null, CommandFlags flags = CommandFlags.None) => + Inner.StreamReadGroupAsync(streamPositions, groupName, consumerName, countPerStream, noAck, claimMinIdleTime, flags); + + public Task StreamTrimAsync(RedisKey key, int maxLength, bool useApproximateMaxLength, CommandFlags flags) => + Inner.StreamTrimAsync(ToInner(key), maxLength, useApproximateMaxLength, flags); + + public Task StreamTrimAsync(RedisKey key, long maxLength, bool useApproximateMaxLength = false, long? limit = null, StreamTrimMode mode = StreamTrimMode.KeepReferences, CommandFlags flags = CommandFlags.None) => + Inner.StreamTrimAsync(ToInner(key), maxLength, useApproximateMaxLength, limit, mode, flags); + + public Task StreamTrimByMinIdAsync(RedisKey key, RedisValue minId, bool useApproximateMaxLength = false, long? limit = null, StreamTrimMode mode = StreamTrimMode.KeepReferences, CommandFlags flags = CommandFlags.None) => + Inner.StreamTrimByMinIdAsync(ToInner(key), minId, useApproximateMaxLength, limit, mode, flags); + + public Task StringAppendAsync(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None) => + Inner.StringAppendAsync(ToInner(key), value, flags); + + public Task StringBitCountAsync(RedisKey key, long start, long end, CommandFlags flags) => + Inner.StringBitCountAsync(ToInner(key), start, end, flags); + + public Task StringBitCountAsync(RedisKey key, long start = 0, long end = -1, StringIndexType indexType = StringIndexType.Byte, CommandFlags flags = CommandFlags.None) => + Inner.StringBitCountAsync(ToInner(key), start, end, indexType, flags); + + public Task StringBitOperationAsync(Bitwise operation, RedisKey destination, RedisKey[] keys, CommandFlags flags = CommandFlags.None) => + Inner.StringBitOperationAsync(operation, ToInner(destination), ToInner(keys), flags); + + public Task StringBitOperationAsync(Bitwise operation, RedisKey destination, RedisKey first, RedisKey second = default, CommandFlags flags = CommandFlags.None) => + Inner.StringBitOperationAsync(operation, ToInner(destination), ToInner(first), ToInnerOrDefault(second), flags); + + public Task StringBitPositionAsync(RedisKey key, bool bit, long start, long end, CommandFlags flags) => + Inner.StringBitPositionAsync(ToInner(key), bit, start, end, flags); + + public Task StringBitPositionAsync(RedisKey key, bool bit, long start = 0, long end = -1, StringIndexType indexType = StringIndexType.Byte, CommandFlags flags = CommandFlags.None) => + Inner.StringBitPositionAsync(ToInner(key), bit, start, end, indexType, flags); + + public Task StringDeleteAsync(RedisKey key, ValueCondition when, CommandFlags flags = CommandFlags.None) => + Inner.StringDeleteAsync(ToInner(key), when, flags); + + public Task StringDecrementAsync(RedisKey key, double value, CommandFlags flags = CommandFlags.None) => + Inner.StringDecrementAsync(ToInner(key), value, flags); + + public Task StringDigestAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.StringDigestAsync(ToInner(key), flags); + + public Task StringDecrementAsync(RedisKey key, long value = 1, CommandFlags flags = CommandFlags.None) => + Inner.StringDecrementAsync(ToInner(key), value, flags); + + public Task StringGetAsync(RedisKey[] keys, CommandFlags flags = CommandFlags.None) => + Inner.StringGetAsync(ToInner(keys), flags); + + public Task StringGetAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.StringGetAsync(ToInner(key), flags); + + public Task StringGetSetExpiryAsync(RedisKey key, TimeSpan? expiry, CommandFlags flags = CommandFlags.None) => + Inner.StringGetSetExpiryAsync(ToInner(key), expiry, flags); + + public Task StringGetSetExpiryAsync(RedisKey key, DateTime expiry, CommandFlags flags = CommandFlags.None) => + Inner.StringGetSetExpiryAsync(ToInner(key), expiry, flags); + + public Task?> StringGetLeaseAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.StringGetLeaseAsync(ToInner(key), flags); + + public Task StringGetBitAsync(RedisKey key, long offset, CommandFlags flags = CommandFlags.None) => + Inner.StringGetBitAsync(ToInner(key), offset, flags); + + public Task StringGetRangeAsync(RedisKey key, long start, long end, CommandFlags flags = CommandFlags.None) => + Inner.StringGetRangeAsync(ToInner(key), start, end, flags); + + public Task StringGetSetAsync(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None) => + Inner.StringGetSetAsync(ToInner(key), value, flags); + + public Task StringGetDeleteAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.StringGetDeleteAsync(ToInner(key), flags); + + public Task StringGetWithExpiryAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.StringGetWithExpiryAsync(ToInner(key), flags); + + public Task StringIncrementAsync(RedisKey key, double value, CommandFlags flags = CommandFlags.None) => + Inner.StringIncrementAsync(ToInner(key), value, flags); + + public Task StringIncrementAsync(RedisKey key, long value = 1, CommandFlags flags = CommandFlags.None) => + Inner.StringIncrementAsync(ToInner(key), value, flags); + + public Task StringLengthAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.StringLengthAsync(ToInner(key), flags); + + public Task StringSetAsync(RedisKey key, RedisValue value, Expiration expiry, ValueCondition when, CommandFlags flags = CommandFlags.None) + => Inner.StringSetAsync(ToInner(key), value, expiry, when, flags); + + public Task StringSetAsync(KeyValuePair[] values, When when = When.Always, CommandFlags flags = CommandFlags.None) => + Inner.StringSetAsync(ToInner(values), when, flags); + + public Task StringSetAsync(KeyValuePair[] values, When when, Expiration expiry, CommandFlags flags) => + Inner.StringSetAsync(ToInner(values), when, expiry, flags); + + public Task StringSetAsync(RedisKey key, RedisValue value, TimeSpan? expiry, When when) => + Inner.StringSetAsync(ToInner(key), value, expiry, when); + public Task StringSetAsync(RedisKey key, RedisValue value, TimeSpan? expiry, When when, CommandFlags flags) => + Inner.StringSetAsync(ToInner(key), value, expiry, when, flags); + public Task StringSetAsync(RedisKey key, RedisValue value, TimeSpan? expiry = null, bool keepTtl = false, When when = When.Always, CommandFlags flags = CommandFlags.None) => + Inner.StringSetAsync(ToInner(key), value, expiry, keepTtl, when, flags); + + public Task StringSetAndGetAsync(RedisKey key, RedisValue value, TimeSpan? expiry, When when, CommandFlags flags) => + Inner.StringSetAndGetAsync(ToInner(key), value, expiry, when, flags); + + public Task StringSetAndGetAsync(RedisKey key, RedisValue value, TimeSpan? expiry = null, bool keepTtl = false, When when = When.Always, CommandFlags flags = CommandFlags.None) => + Inner.StringSetAndGetAsync(ToInner(key), value, expiry, keepTtl, when, flags); + + public Task StringSetBitAsync(RedisKey key, long offset, bool bit, CommandFlags flags = CommandFlags.None) => + Inner.StringSetBitAsync(ToInner(key), offset, bit, flags); + + public Task StringSetRangeAsync(RedisKey key, long offset, RedisValue value, CommandFlags flags = CommandFlags.None) => + Inner.StringSetRangeAsync(ToInner(key), offset, value, flags); + + public Task PingAsync(CommandFlags flags = CommandFlags.None) => + Inner.PingAsync(flags); + + public Task KeyTouchAsync(RedisKey[] keys, CommandFlags flags = CommandFlags.None) => + Inner.KeyTouchAsync(ToInner(keys), flags); + + public Task KeyTouchAsync(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.KeyTouchAsync(ToInner(key), flags); + + public bool TryWait(Task task) => + Inner.TryWait(task); + + public TResult Wait(Task task) => + Inner.Wait(task); + + public void Wait(Task task) => + Inner.Wait(task); + + public void WaitAll(params Task[] tasks) => + Inner.WaitAll(tasks); + + protected internal RedisKey ToInner(RedisKey outer) => + RedisKey.WithPrefix(Prefix, outer); + + protected RedisKey ToInnerOrDefault(RedisKey outer) => + (outer == default(RedisKey)) ? outer : ToInner(outer); + + [return: NotNullIfNotNull("args")] + protected ICollection? ToInner(ICollection? args) + { + if (args?.Any(x => x is RedisKey || x is RedisChannel) == true) + { + var withPrefix = new object[args.Count]; + int i = 0; + foreach (var oldArg in args) + { + object newArg; + if (oldArg is RedisKey key) + { + newArg = ToInner(key); + } + else if (oldArg is RedisChannel channel) + { + newArg = ToInner(channel); + } + else + { + newArg = oldArg; + } + withPrefix[i++] = newArg; + } + args = withPrefix; + } + return args; + } + + [return: NotNullIfNotNull("outer")] + protected RedisKey[]? ToInner(RedisKey[]? outer) + { + if (outer == null || outer.Length == 0) + { + return outer; + } + else + { + RedisKey[] inner = new RedisKey[outer.Length]; + + for (int i = 0; i < outer.Length; ++i) + { + inner[i] = ToInner(outer[i]); + } + + return inner; + } + } + + protected KeyValuePair ToInner(KeyValuePair outer) => + new KeyValuePair(ToInner(outer.Key), outer.Value); + + [return: NotNullIfNotNull("outer")] + protected KeyValuePair[]? ToInner(KeyValuePair[]? outer) + { + if (outer == null || outer.Length == 0) + { + return outer; + } + else + { + KeyValuePair[] inner = new KeyValuePair[outer.Length]; + + for (int i = 0; i < outer.Length; ++i) + { + inner[i] = ToInner(outer[i]); + } + + return inner; + } + } + + protected RedisValue ToInner(RedisValue outer) => + RedisKey.ConcatenateBytes(Prefix, null, (byte[]?)outer); + + protected RedisValue SortByToInner(RedisValue outer) => + (outer == "nosort") ? outer : ToInner(outer); + + protected RedisValue SortGetToInner(RedisValue outer) => + (outer == "#") ? outer : ToInner(outer); + + [return: NotNullIfNotNull("outer")] + protected RedisValue[]? SortGetToInner(RedisValue[]? outer) + { + if (outer == null || outer.Length == 0) + { + return outer; + } + else + { + RedisValue[] inner = new RedisValue[outer.Length]; + + for (int i = 0; i < outer.Length; ++i) + { + inner[i] = SortGetToInner(outer[i]); + } + + return inner; + } + } + + protected RedisChannel ToInner(RedisChannel outer) + { + var combined = RedisKey.ConcatenateBytes(Prefix, null, (byte[]?)outer); + return new RedisChannel(combined, outer.IsPattern ? RedisChannel.PatternMode.Pattern : RedisChannel.PatternMode.Literal); + } + + private Func? mapFunction; + protected Func GetMapFunction() => + // create as a delegate when first required, then re-use + mapFunction ??= new Func(ToInner); + } +} diff --git a/src/StackExchange.Redis/KeyspaceIsolation/KeyPrefixedBatch.cs b/src/StackExchange.Redis/KeyspaceIsolation/KeyPrefixedBatch.cs new file mode 100644 index 000000000..6f5679a66 --- /dev/null +++ b/src/StackExchange.Redis/KeyspaceIsolation/KeyPrefixedBatch.cs @@ -0,0 +1,9 @@ +namespace StackExchange.Redis.KeyspaceIsolation +{ + internal sealed class KeyPrefixedBatch : KeyPrefixed, IBatch + { + public KeyPrefixedBatch(IBatch inner, byte[] prefix) : base(inner, prefix) { } + + public void Execute() => Inner.Execute(); + } +} diff --git a/src/StackExchange.Redis/KeyspaceIsolation/KeyPrefixedDatabase.VectorSets.cs b/src/StackExchange.Redis/KeyspaceIsolation/KeyPrefixedDatabase.VectorSets.cs new file mode 100644 index 000000000..83fbb2f85 --- /dev/null +++ b/src/StackExchange.Redis/KeyspaceIsolation/KeyPrefixedDatabase.VectorSets.cs @@ -0,0 +1,74 @@ +using System; + +// ReSharper disable once CheckNamespace +namespace StackExchange.Redis.KeyspaceIsolation; + +internal sealed partial class KeyPrefixedDatabase +{ + // Vector Set operations + public bool VectorSetAdd( + RedisKey key, + VectorSetAddRequest request, + CommandFlags flags = CommandFlags.None) => + Inner.VectorSetAdd(ToInner(key), request, flags); + + public long VectorSetLength(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.VectorSetLength(ToInner(key), flags); + + public int VectorSetDimension(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.VectorSetDimension(ToInner(key), flags); + + public Lease? VectorSetGetApproximateVector(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) => + Inner.VectorSetGetApproximateVector(ToInner(key), member, flags); + + public string? VectorSetGetAttributesJson(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) => + Inner.VectorSetGetAttributesJson(ToInner(key), member, flags); + + public VectorSetInfo? VectorSetInfo(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.VectorSetInfo(ToInner(key), flags); + + public bool VectorSetContains(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) => + Inner.VectorSetContains(ToInner(key), member, flags); + + public Lease? VectorSetGetLinks(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) => + Inner.VectorSetGetLinks(ToInner(key), member, flags); + + public Lease? VectorSetGetLinksWithScores(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) => + Inner.VectorSetGetLinksWithScores(ToInner(key), member, flags); + + public RedisValue VectorSetRandomMember(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.VectorSetRandomMember(ToInner(key), flags); + + public RedisValue[] VectorSetRandomMembers(RedisKey key, long count, CommandFlags flags = CommandFlags.None) => + Inner.VectorSetRandomMembers(ToInner(key), count, flags); + + public bool VectorSetRemove(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) => + Inner.VectorSetRemove(ToInner(key), member, flags); + + public bool VectorSetSetAttributesJson(RedisKey key, RedisValue member, string attributesJson, CommandFlags flags = CommandFlags.None) => + Inner.VectorSetSetAttributesJson(ToInner(key), member, attributesJson, flags); + + public Lease? VectorSetSimilaritySearch( + RedisKey key, + VectorSetSimilaritySearchRequest query, + CommandFlags flags = CommandFlags.None) => + Inner.VectorSetSimilaritySearch(ToInner(key), query, flags); + + public Lease VectorSetRange( + RedisKey key, + RedisValue start = default, + RedisValue end = default, + long count = -1, + Exclude exclude = Exclude.None, + CommandFlags flags = CommandFlags.None) => + Inner.VectorSetRange(ToInner(key), start, end, count, exclude, flags); + + public System.Collections.Generic.IEnumerable VectorSetRangeEnumerate( + RedisKey key, + RedisValue start = default, + RedisValue end = default, + long count = 100, + Exclude exclude = Exclude.None, + CommandFlags flags = CommandFlags.None) => + Inner.VectorSetRangeEnumerate(ToInner(key), start, end, count, exclude, flags); +} diff --git a/src/StackExchange.Redis/KeyspaceIsolation/KeyPrefixedDatabase.cs b/src/StackExchange.Redis/KeyspaceIsolation/KeyPrefixedDatabase.cs new file mode 100644 index 000000000..01fe28505 --- /dev/null +++ b/src/StackExchange.Redis/KeyspaceIsolation/KeyPrefixedDatabase.cs @@ -0,0 +1,836 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Net; + +namespace StackExchange.Redis.KeyspaceIsolation +{ + internal sealed partial class KeyPrefixedDatabase : KeyPrefixed, IDatabase + { + public KeyPrefixedDatabase(IDatabase inner, byte[] prefix) : base(inner, prefix) + { + } + + public IBatch CreateBatch(object? asyncState = null) => + new KeyPrefixedBatch(Inner.CreateBatch(asyncState), Prefix); + + public ITransaction CreateTransaction(object? asyncState = null) => + new KeyPrefixedTransaction(Inner.CreateTransaction(asyncState), Prefix); + + public int Database => Inner.Database; + + public RedisValue DebugObject(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.DebugObject(ToInner(key), flags); + + public bool GeoAdd(RedisKey key, double longitude, double latitude, RedisValue member, CommandFlags flags = CommandFlags.None) => + Inner.GeoAdd(ToInner(key), longitude, latitude, member, flags); + + public long GeoAdd(RedisKey key, GeoEntry[] values, CommandFlags flags = CommandFlags.None) => + Inner.GeoAdd(ToInner(key), values, flags); + + public bool GeoAdd(RedisKey key, GeoEntry value, CommandFlags flags = CommandFlags.None) => + Inner.GeoAdd(ToInner(key), value, flags); + + public bool GeoRemove(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) => + Inner.GeoRemove(ToInner(key), member, flags); + + public double? GeoDistance(RedisKey key, RedisValue member1, RedisValue member2, GeoUnit unit = GeoUnit.Meters, CommandFlags flags = CommandFlags.None) => + Inner.GeoDistance(ToInner(key), member1, member2, unit, flags); + + public string?[] GeoHash(RedisKey key, RedisValue[] members, CommandFlags flags = CommandFlags.None) => + Inner.GeoHash(ToInner(key), members, flags); + + public string? GeoHash(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) => + Inner.GeoHash(ToInner(key), member, flags); + + public GeoPosition?[] GeoPosition(RedisKey key, RedisValue[] members, CommandFlags flags = CommandFlags.None) => + Inner.GeoPosition(ToInner(key), members, flags); + + public GeoPosition? GeoPosition(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) => + Inner.GeoPosition(ToInner(key), member, flags); + + public GeoRadiusResult[] GeoRadius(RedisKey key, RedisValue member, double radius, GeoUnit unit = GeoUnit.Meters, int count = -1, Order? order = null, GeoRadiusOptions options = GeoRadiusOptions.Default, CommandFlags flags = CommandFlags.None) => + Inner.GeoRadius(ToInner(key), member, radius, unit, count, order, options, flags); + + public GeoRadiusResult[] GeoRadius(RedisKey key, double longitude, double latitude, double radius, GeoUnit unit = GeoUnit.Meters, int count = -1, Order? order = null, GeoRadiusOptions options = GeoRadiusOptions.Default, CommandFlags flags = CommandFlags.None) => + Inner.GeoRadius(ToInner(key), longitude, latitude, radius, unit, count, order, options, flags); + + public GeoRadiusResult[] GeoSearch(RedisKey key, RedisValue member, GeoSearchShape shape, int count = -1, bool demandClosest = true, Order? order = null, GeoRadiusOptions options = GeoRadiusOptions.Default, CommandFlags flags = CommandFlags.None) => + Inner.GeoSearch(ToInner(key), member, shape, count, demandClosest, order, options, flags); + + public GeoRadiusResult[] GeoSearch(RedisKey key, double longitude, double latitude, GeoSearchShape shape, int count = -1, bool demandClosest = true, Order? order = null, GeoRadiusOptions options = GeoRadiusOptions.Default, CommandFlags flags = CommandFlags.None) => + Inner.GeoSearch(ToInner(key), longitude, latitude, shape, count, demandClosest, order, options, flags); + + public long GeoSearchAndStore(RedisKey sourceKey, RedisKey destinationKey, RedisValue member, GeoSearchShape shape, int count = -1, bool demandClosest = true, Order? order = null, bool storeDistances = false, CommandFlags flags = CommandFlags.None) => + Inner.GeoSearchAndStore(ToInner(sourceKey), ToInner(destinationKey), member, shape, count, demandClosest, order, storeDistances, flags); + + public long GeoSearchAndStore(RedisKey sourceKey, RedisKey destinationKey, double longitude, double latitude, GeoSearchShape shape, int count = -1, bool demandClosest = true, Order? order = null, bool storeDistances = false, CommandFlags flags = CommandFlags.None) => + Inner.GeoSearchAndStore(ToInner(sourceKey), ToInner(destinationKey), longitude, latitude, shape, count, demandClosest, order, storeDistances, flags); + + public double HashDecrement(RedisKey key, RedisValue hashField, double value, CommandFlags flags = CommandFlags.None) => + Inner.HashDecrement(ToInner(key), hashField, value, flags); + + public long HashDecrement(RedisKey key, RedisValue hashField, long value = 1, CommandFlags flags = CommandFlags.None) => + Inner.HashDecrement(ToInner(key), hashField, value, flags); + + public long HashDelete(RedisKey key, RedisValue[] hashFields, CommandFlags flags = CommandFlags.None) => + Inner.HashDelete(ToInner(key), hashFields, flags); + + public bool HashDelete(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None) => + Inner.HashDelete(ToInner(key), hashField, flags); + + public bool HashExists(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None) => + Inner.HashExists(ToInner(key), hashField, flags); + + public RedisValue HashFieldGetAndDelete(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None) => + Inner.HashFieldGetAndDelete(ToInner(key), hashField, flags); + + public Lease? HashFieldGetLeaseAndDelete(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None) => + Inner.HashFieldGetLeaseAndDelete(ToInner(key), hashField, flags); + + public RedisValue[] HashFieldGetAndDelete(RedisKey key, RedisValue[] hashFields, CommandFlags flags = CommandFlags.None) => + Inner.HashFieldGetAndDelete(ToInner(key), hashFields, flags); + + public RedisValue HashFieldGetAndSetExpiry(RedisKey key, RedisValue hashField, TimeSpan? expiry = null, bool persist = false, CommandFlags flags = CommandFlags.None) => + Inner.HashFieldGetAndSetExpiry(ToInner(key), hashField, expiry, persist, flags); + + public RedisValue HashFieldGetAndSetExpiry(RedisKey key, RedisValue hashField, DateTime expiry, CommandFlags flags = CommandFlags.None) => + Inner.HashFieldGetAndSetExpiry(ToInner(key), hashField, expiry, flags); + + public Lease? HashFieldGetLeaseAndSetExpiry(RedisKey key, RedisValue hashField, TimeSpan? expiry = null, bool persist = false, CommandFlags flags = CommandFlags.None) => + Inner.HashFieldGetLeaseAndSetExpiry(ToInner(key), hashField, expiry, persist, flags); + + public Lease? HashFieldGetLeaseAndSetExpiry(RedisKey key, RedisValue hashField, DateTime expiry, CommandFlags flags = CommandFlags.None) => + Inner.HashFieldGetLeaseAndSetExpiry(ToInner(key), hashField, expiry, flags); + + public RedisValue[] HashFieldGetAndSetExpiry(RedisKey key, RedisValue[] hashFields, TimeSpan? expiry = null, bool persist = false, CommandFlags flags = CommandFlags.None) => + Inner.HashFieldGetAndSetExpiry(ToInner(key), hashFields, expiry, persist, flags); + + public RedisValue[] HashFieldGetAndSetExpiry(RedisKey key, RedisValue[] hashFields, DateTime expiry, CommandFlags flags = CommandFlags.None) => + Inner.HashFieldGetAndSetExpiry(ToInner(key), hashFields, expiry, flags); + + public RedisValue HashFieldSetAndSetExpiry(RedisKey key, RedisValue field, RedisValue value, TimeSpan? expiry = null, bool keepTtl = false, When when = When.Always, CommandFlags flags = CommandFlags.None) => + Inner.HashFieldSetAndSetExpiry(ToInner(key), field, value, expiry, keepTtl, when, flags); + + public RedisValue HashFieldSetAndSetExpiry(RedisKey key, RedisValue field, RedisValue value, DateTime expiry, When when = When.Always, CommandFlags flags = CommandFlags.None) => + Inner.HashFieldSetAndSetExpiry(ToInner(key), field, value, expiry, when, flags); + + public RedisValue HashFieldSetAndSetExpiry(RedisKey key, HashEntry[] hashFields, TimeSpan? expiry = null, bool keepTtl = false, When when = When.Always, CommandFlags flags = CommandFlags.None) => + Inner.HashFieldSetAndSetExpiry(ToInner(key), hashFields, expiry, keepTtl, when, flags); + + public RedisValue HashFieldSetAndSetExpiry(RedisKey key, HashEntry[] hashFields, DateTime expiry, When when = When.Always, CommandFlags flags = CommandFlags.None) => + Inner.HashFieldSetAndSetExpiry(ToInner(key), hashFields, expiry, when, flags); + + public ExpireResult[] HashFieldExpire(RedisKey key, RedisValue[] hashFields, TimeSpan expiry, ExpireWhen when = ExpireWhen.Always, CommandFlags flags = CommandFlags.None) => + Inner.HashFieldExpire(ToInner(key), hashFields, expiry, when, flags); + + public ExpireResult[] HashFieldExpire(RedisKey key, RedisValue[] hashFields, DateTime expiry, ExpireWhen when = ExpireWhen.Always, CommandFlags flags = CommandFlags.None) => + Inner.HashFieldExpire(ToInner(key), hashFields, expiry, when, flags); + + public long[] HashFieldGetExpireDateTime(RedisKey key, RedisValue[] hashFields, CommandFlags flags) => + Inner.HashFieldGetExpireDateTime(ToInner(key), hashFields, flags); + + public PersistResult[] HashFieldPersist(RedisKey key, RedisValue[] hashFields, CommandFlags flags) => + Inner.HashFieldPersist(ToInner(key), hashFields, flags); + + public long[] HashFieldGetTimeToLive(RedisKey key, RedisValue[] hashFields, CommandFlags flags) => + Inner.HashFieldGetTimeToLive(ToInner(key), hashFields, flags); + + public HashEntry[] HashGetAll(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.HashGetAll(ToInner(key), flags); + + public RedisValue[] HashGet(RedisKey key, RedisValue[] hashFields, CommandFlags flags = CommandFlags.None) => + Inner.HashGet(ToInner(key), hashFields, flags); + + public RedisValue HashGet(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None) => + Inner.HashGet(ToInner(key), hashField, flags); + + public Lease? HashGetLease(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None) => + Inner.HashGetLease(ToInner(key), hashField, flags); + + public double HashIncrement(RedisKey key, RedisValue hashField, double value, CommandFlags flags = CommandFlags.None) => + Inner.HashIncrement(ToInner(key), hashField, value, flags); + + public long HashIncrement(RedisKey key, RedisValue hashField, long value = 1, CommandFlags flags = CommandFlags.None) => + Inner.HashIncrement(ToInner(key), hashField, value, flags); + + public RedisValue[] HashKeys(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.HashKeys(ToInner(key), flags); + + public long HashLength(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.HashLength(ToInner(key), flags); + + public RedisValue HashRandomField(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.HashRandomField(ToInner(key), flags); + + public RedisValue[] HashRandomFields(RedisKey key, long count, CommandFlags flags = CommandFlags.None) => + Inner.HashRandomFields(ToInner(key), count, flags); + + public HashEntry[] HashRandomFieldsWithValues(RedisKey key, long count, CommandFlags flags = CommandFlags.None) => + Inner.HashRandomFieldsWithValues(ToInner(key), count, flags); + + public bool HashSet(RedisKey key, RedisValue hashField, RedisValue value, When when = When.Always, CommandFlags flags = CommandFlags.None) => + Inner.HashSet(ToInner(key), hashField, value, when, flags); + + public long HashStringLength(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None) => + Inner.HashStringLength(ToInner(key), hashField, flags); + + public void HashSet(RedisKey key, HashEntry[] hashFields, CommandFlags flags = CommandFlags.None) => + Inner.HashSet(ToInner(key), hashFields, flags); + + public RedisValue[] HashValues(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.HashValues(ToInner(key), flags); + + public bool HyperLogLogAdd(RedisKey key, RedisValue[] values, CommandFlags flags = CommandFlags.None) => + Inner.HyperLogLogAdd(ToInner(key), values, flags); + + public bool HyperLogLogAdd(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None) => + Inner.HyperLogLogAdd(ToInner(key), value, flags); + + public long HyperLogLogLength(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.HyperLogLogLength(ToInner(key), flags); + + public long HyperLogLogLength(RedisKey[] keys, CommandFlags flags = CommandFlags.None) => + Inner.HyperLogLogLength(ToInner(keys), flags); + + public void HyperLogLogMerge(RedisKey destination, RedisKey[] sourceKeys, CommandFlags flags = CommandFlags.None) => + Inner.HyperLogLogMerge(ToInner(destination), ToInner(sourceKeys), flags); + + public void HyperLogLogMerge(RedisKey destination, RedisKey first, RedisKey second, CommandFlags flags = CommandFlags.None) => + Inner.HyperLogLogMerge(ToInner(destination), ToInner(first), ToInner(second), flags); + + public EndPoint? IdentifyEndpoint(RedisKey key = default, CommandFlags flags = CommandFlags.None) => + Inner.IdentifyEndpoint(ToInner(key), flags); + + public bool KeyCopy(RedisKey sourceKey, RedisKey destinationKey, int destinationDatabase = -1, bool replace = false, CommandFlags flags = CommandFlags.None) => + Inner.KeyCopy(ToInner(sourceKey), ToInner(destinationKey), destinationDatabase, replace, flags); + + public long KeyDelete(RedisKey[] keys, CommandFlags flags = CommandFlags.None) => + Inner.KeyDelete(ToInner(keys), flags); + + public bool KeyDelete(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.KeyDelete(ToInner(key), flags); + + public byte[]? KeyDump(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.KeyDump(ToInner(key), flags); + + public string? KeyEncoding(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.KeyEncoding(ToInner(key), flags); + + public bool KeyExists(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.KeyExists(ToInner(key), flags); + public long KeyExists(RedisKey[] keys, CommandFlags flags = CommandFlags.None) => + Inner.KeyExists(ToInner(keys), flags); + + public bool KeyExpire(RedisKey key, DateTime? expiry, CommandFlags flags) => + Inner.KeyExpire(ToInner(key), expiry, flags); + + public bool KeyExpire(RedisKey key, DateTime? expiry, ExpireWhen when = ExpireWhen.Always, CommandFlags flags = CommandFlags.None) => + Inner.KeyExpire(ToInner(key), expiry, when, flags); + + public bool KeyExpire(RedisKey key, TimeSpan? expiry, CommandFlags flags) => + Inner.KeyExpire(ToInner(key), expiry, flags); + + public bool KeyExpire(RedisKey key, TimeSpan? expiry, ExpireWhen when = ExpireWhen.Always, CommandFlags flags = CommandFlags.None) => + Inner.KeyExpire(ToInner(key), expiry, when, flags); + + public DateTime? KeyExpireTime(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.KeyExpireTime(ToInner(key), flags); + + public long? KeyFrequency(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.KeyFrequency(ToInner(key), flags); + + public TimeSpan? KeyIdleTime(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.KeyIdleTime(ToInner(key), flags); + + public void KeyMigrate(RedisKey key, EndPoint toServer, int toDatabase = 0, int timeoutMilliseconds = 0, MigrateOptions migrateOptions = MigrateOptions.None, CommandFlags flags = CommandFlags.None) => + Inner.KeyMigrate(ToInner(key), toServer, toDatabase, timeoutMilliseconds, migrateOptions, flags); + + public bool KeyMove(RedisKey key, int database, CommandFlags flags = CommandFlags.None) => + Inner.KeyMove(ToInner(key), database, flags); + + public bool KeyPersist(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.KeyPersist(ToInner(key), flags); + + public RedisKey KeyRandom(CommandFlags flags = CommandFlags.None) => + throw new NotSupportedException("RANDOMKEY is not supported when a key-prefix is specified"); + + public long? KeyRefCount(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.KeyRefCount(ToInner(key), flags); + + public bool KeyRename(RedisKey key, RedisKey newKey, When when = When.Always, CommandFlags flags = CommandFlags.None) => + Inner.KeyRename(ToInner(key), ToInner(newKey), when, flags); + + public void KeyRestore(RedisKey key, byte[] value, TimeSpan? expiry = null, CommandFlags flags = CommandFlags.None) => + Inner.KeyRestore(ToInner(key), value, expiry, flags); + + public TimeSpan? KeyTimeToLive(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.KeyTimeToLive(ToInner(key), flags); + + public RedisType KeyType(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.KeyType(ToInner(key), flags); + + public RedisValue ListGetByIndex(RedisKey key, long index, CommandFlags flags = CommandFlags.None) => + Inner.ListGetByIndex(ToInner(key), index, flags); + + public long ListInsertAfter(RedisKey key, RedisValue pivot, RedisValue value, CommandFlags flags = CommandFlags.None) => + Inner.ListInsertAfter(ToInner(key), pivot, value, flags); + + public long ListInsertBefore(RedisKey key, RedisValue pivot, RedisValue value, CommandFlags flags = CommandFlags.None) => + Inner.ListInsertBefore(ToInner(key), pivot, value, flags); + + public RedisValue ListLeftPop(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.ListLeftPop(ToInner(key), flags); + + public RedisValue[] ListLeftPop(RedisKey key, long count, CommandFlags flags = CommandFlags.None) => + Inner.ListLeftPop(ToInner(key), count, flags); + + public ListPopResult ListLeftPop(RedisKey[] keys, long count, CommandFlags flags = CommandFlags.None) => + Inner.ListLeftPop(ToInner(keys), count, flags); + + public long ListPosition(RedisKey key, RedisValue element, long rank = 1, long maxLength = 0, CommandFlags flags = CommandFlags.None) => + Inner.ListPosition(ToInner(key), element, rank, maxLength, flags); + + public long[] ListPositions(RedisKey key, RedisValue element, long count, long rank = 1, long maxLength = 0, CommandFlags flags = CommandFlags.None) => + Inner.ListPositions(ToInner(key), element, count, rank, maxLength, flags); + + public long ListLeftPush(RedisKey key, RedisValue[] values, CommandFlags flags = CommandFlags.None) => + Inner.ListLeftPush(ToInner(key), values, flags); + + public long ListLeftPush(RedisKey key, RedisValue[] values, When when = When.Always, CommandFlags flags = CommandFlags.None) => + Inner.ListLeftPush(ToInner(key), values, when, flags); + + public long ListLeftPush(RedisKey key, RedisValue value, When when = When.Always, CommandFlags flags = CommandFlags.None) => + Inner.ListLeftPush(ToInner(key), value, when, flags); + + public long ListLength(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.ListLength(ToInner(key), flags); + + public RedisValue ListMove(RedisKey sourceKey, RedisKey destinationKey, ListSide sourceSide, ListSide destinationSide, CommandFlags flags = CommandFlags.None) => + Inner.ListMove(ToInner(sourceKey), ToInner(destinationKey), sourceSide, destinationSide); + + public RedisValue[] ListRange(RedisKey key, long start = 0, long stop = -1, CommandFlags flags = CommandFlags.None) => + Inner.ListRange(ToInner(key), start, stop, flags); + + public long ListRemove(RedisKey key, RedisValue value, long count = 0, CommandFlags flags = CommandFlags.None) => + Inner.ListRemove(ToInner(key), value, count, flags); + + public RedisValue ListRightPop(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.ListRightPop(ToInner(key), flags); + + public RedisValue[] ListRightPop(RedisKey key, long count, CommandFlags flags = CommandFlags.None) => + Inner.ListRightPop(ToInner(key), count, flags); + + public ListPopResult ListRightPop(RedisKey[] keys, long count, CommandFlags flags = CommandFlags.None) => + Inner.ListRightPop(ToInner(keys), count, flags); + + public RedisValue ListRightPopLeftPush(RedisKey source, RedisKey destination, CommandFlags flags = CommandFlags.None) => + Inner.ListRightPopLeftPush(ToInner(source), ToInner(destination), flags); + + public long ListRightPush(RedisKey key, RedisValue[] values, CommandFlags flags = CommandFlags.None) => + Inner.ListRightPush(ToInner(key), values, flags); + + public long ListRightPush(RedisKey key, RedisValue[] values, When when = When.Always, CommandFlags flags = CommandFlags.None) => + Inner.ListRightPush(ToInner(key), values, when, flags); + + public long ListRightPush(RedisKey key, RedisValue value, When when = When.Always, CommandFlags flags = CommandFlags.None) => + Inner.ListRightPush(ToInner(key), value, when, flags); + + public void ListSetByIndex(RedisKey key, long index, RedisValue value, CommandFlags flags = CommandFlags.None) => + Inner.ListSetByIndex(ToInner(key), index, value, flags); + + public void ListTrim(RedisKey key, long start, long stop, CommandFlags flags = CommandFlags.None) => + Inner.ListTrim(ToInner(key), start, stop, flags); + + public bool LockExtend(RedisKey key, RedisValue value, TimeSpan expiry, CommandFlags flags = CommandFlags.None) => + Inner.LockExtend(ToInner(key), value, expiry, flags); + + public RedisValue LockQuery(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.LockQuery(ToInner(key), flags); + + public bool LockRelease(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None) => + Inner.LockRelease(ToInner(key), value, flags); + + public bool LockTake(RedisKey key, RedisValue value, TimeSpan expiry, CommandFlags flags = CommandFlags.None) => + Inner.LockTake(ToInner(key), value, expiry, flags); + + public string? StringLongestCommonSubsequence(RedisKey first, RedisKey second, CommandFlags flags = CommandFlags.None) => + Inner.StringLongestCommonSubsequence(ToInner(first), ToInner(second), flags); + + public long StringLongestCommonSubsequenceLength(RedisKey first, RedisKey second, CommandFlags flags = CommandFlags.None) => + Inner.StringLongestCommonSubsequenceLength(ToInner(first), ToInner(second), flags); + + public LCSMatchResult StringLongestCommonSubsequenceWithMatches(RedisKey first, RedisKey second, long minLength = 0, CommandFlags flags = CommandFlags.None) => + Inner.StringLongestCommonSubsequenceWithMatches(ToInner(first), ToInner(second), minLength, flags); + + public long Publish(RedisChannel channel, RedisValue message, CommandFlags flags = CommandFlags.None) => + Inner.Publish(ToInner(channel), message, flags); + + public RedisResult Execute(string command, params object[] args) + => Inner.Execute(command, ToInner(args), CommandFlags.None); + + public RedisResult Execute(string command, ICollection args, CommandFlags flags = CommandFlags.None) + => Inner.Execute(command, ToInner(args), flags); + + public RedisResult ScriptEvaluate(byte[] hash, RedisKey[]? keys = null, RedisValue[]? values = null, CommandFlags flags = CommandFlags.None) => + // TODO: The return value could contain prefixed keys. It might make sense to 'unprefix' those? + Inner.ScriptEvaluate(hash, ToInner(keys), values, flags); + + public RedisResult ScriptEvaluate(string script, RedisKey[]? keys = null, RedisValue[]? values = null, CommandFlags flags = CommandFlags.None) => + // TODO: The return value could contain prefixed keys. It might make sense to 'unprefix' those? + Inner.ScriptEvaluate(script: script, keys: ToInner(keys), values: values, flags: flags); + + public RedisResult ScriptEvaluate(LuaScript script, object? parameters = null, CommandFlags flags = CommandFlags.None) => + // TODO: The return value could contain prefixed keys. It might make sense to 'unprefix' those? + script.Evaluate(Inner, parameters, Prefix, flags); + + public RedisResult ScriptEvaluate(LoadedLuaScript script, object? parameters = null, CommandFlags flags = CommandFlags.None) => + // TODO: The return value could contain prefixed keys. It might make sense to 'unprefix' those? + script.Evaluate(Inner, parameters, Prefix, flags); + + public RedisResult ScriptEvaluateReadOnly(byte[] hash, RedisKey[]? keys = null, RedisValue[]? values = null, CommandFlags flags = CommandFlags.None) => + // TODO: The return value could contain prefixed keys. It might make sense to 'unprefix' those? + Inner.ScriptEvaluateReadOnly(hash, ToInner(keys), values, flags); + + public RedisResult ScriptEvaluateReadOnly(string script, RedisKey[]? keys = null, RedisValue[]? values = null, CommandFlags flags = CommandFlags.None) => + // TODO: The return value could contain prefixed keys. It might make sense to 'unprefix' those? + Inner.ScriptEvaluateReadOnly(script, ToInner(keys), values, flags); + + public long SetAdd(RedisKey key, RedisValue[] values, CommandFlags flags = CommandFlags.None) => + Inner.SetAdd(ToInner(key), values, flags); + + public bool SetAdd(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None) => + Inner.SetAdd(ToInner(key), value, flags); + + public long SetCombineAndStore(SetOperation operation, RedisKey destination, RedisKey[] keys, CommandFlags flags = CommandFlags.None) => + Inner.SetCombineAndStore(operation, ToInner(destination), ToInner(keys), flags); + + public long SetCombineAndStore(SetOperation operation, RedisKey destination, RedisKey first, RedisKey second, CommandFlags flags = CommandFlags.None) => + Inner.SetCombineAndStore(operation, ToInner(destination), ToInner(first), ToInner(second), flags); + + public RedisValue[] SetCombine(SetOperation operation, RedisKey[] keys, CommandFlags flags = CommandFlags.None) => + Inner.SetCombine(operation, ToInner(keys), flags); + + public RedisValue[] SetCombine(SetOperation operation, RedisKey first, RedisKey second, CommandFlags flags = CommandFlags.None) => + Inner.SetCombine(operation, ToInner(first), ToInner(second), flags); + + public bool SetContains(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None) => + Inner.SetContains(ToInner(key), value, flags); + + public bool[] SetContains(RedisKey key, RedisValue[] values, CommandFlags flags = CommandFlags.None) => + Inner.SetContains(ToInner(key), values, flags); + + public long SetIntersectionLength(RedisKey[] keys, long limit = 0, CommandFlags flags = CommandFlags.None) => + Inner.SetIntersectionLength(ToInner(keys), limit, flags); + + public long SetLength(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.SetLength(ToInner(key), flags); + + public RedisValue[] SetMembers(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.SetMembers(ToInner(key), flags); + + public bool SetMove(RedisKey source, RedisKey destination, RedisValue value, CommandFlags flags = CommandFlags.None) => + Inner.SetMove(ToInner(source), ToInner(destination), value, flags); + + public RedisValue SetPop(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.SetPop(ToInner(key), flags); + + public RedisValue[] SetPop(RedisKey key, long count, CommandFlags flags = CommandFlags.None) => + Inner.SetPop(ToInner(key), count, flags); + + public RedisValue SetRandomMember(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.SetRandomMember(ToInner(key), flags); + + public RedisValue[] SetRandomMembers(RedisKey key, long count, CommandFlags flags = CommandFlags.None) => + Inner.SetRandomMembers(ToInner(key), count, flags); + + public long SetRemove(RedisKey key, RedisValue[] values, CommandFlags flags = CommandFlags.None) => + Inner.SetRemove(ToInner(key), values, flags); + + public bool SetRemove(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None) => + Inner.SetRemove(ToInner(key), value, flags); + + public long SortAndStore(RedisKey destination, RedisKey key, long skip = 0, long take = -1, Order order = Order.Ascending, SortType sortType = SortType.Numeric, RedisValue by = default, RedisValue[]? get = null, CommandFlags flags = CommandFlags.None) => + Inner.SortAndStore(ToInner(destination), ToInner(key), skip, take, order, sortType, SortByToInner(by), SortGetToInner(get), flags); + + public RedisValue[] Sort(RedisKey key, long skip = 0, long take = -1, Order order = Order.Ascending, SortType sortType = SortType.Numeric, RedisValue by = default, RedisValue[]? get = null, CommandFlags flags = CommandFlags.None) => + Inner.Sort(ToInner(key), skip, take, order, sortType, SortByToInner(by), SortGetToInner(get), flags); + + public long SortedSetAdd(RedisKey key, SortedSetEntry[] values, CommandFlags flags) => + Inner.SortedSetAdd(ToInner(key), values, flags); + + public long SortedSetAdd(RedisKey key, SortedSetEntry[] values, When when = When.Always, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetAdd(ToInner(key), values, when, flags); + + public long SortedSetAdd(RedisKey key, SortedSetEntry[] values, SortedSetWhen when = SortedSetWhen.Always, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetAdd(ToInner(key), values, when, flags); + + public bool SortedSetAdd(RedisKey key, RedisValue member, double score, CommandFlags flags) => + Inner.SortedSetAdd(ToInner(key), member, score, flags); + + public bool SortedSetAdd(RedisKey key, RedisValue member, double score, When when = When.Always, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetAdd(ToInner(key), member, score, when, flags); + + public bool SortedSetAdd(RedisKey key, RedisValue member, double score, SortedSetWhen when = SortedSetWhen.Always, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetAdd(ToInner(key), member, score, when, flags); + + public RedisValue[] SortedSetCombine(SetOperation operation, RedisKey[] keys, double[]? weights = null, Aggregate aggregate = Aggregate.Sum, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetCombine(operation, ToInner(keys), weights, aggregate, flags); + + public SortedSetEntry[] SortedSetCombineWithScores(SetOperation operation, RedisKey[] keys, double[]? weights = null, Aggregate aggregate = Aggregate.Sum, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetCombineWithScores(operation, ToInner(keys), weights, aggregate, flags); + + public long SortedSetCombineAndStore(SetOperation operation, RedisKey destination, RedisKey[] keys, double[]? weights = null, Aggregate aggregate = Aggregate.Sum, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetCombineAndStore(operation, ToInner(destination), ToInner(keys), weights, aggregate, flags); + + public long SortedSetCombineAndStore(SetOperation operation, RedisKey destination, RedisKey first, RedisKey second, Aggregate aggregate = Aggregate.Sum, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetCombineAndStore(operation, ToInner(destination), ToInner(first), ToInner(second), aggregate, flags); + + public double SortedSetDecrement(RedisKey key, RedisValue member, double value, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetDecrement(ToInner(key), member, value, flags); + + public double SortedSetIncrement(RedisKey key, RedisValue member, double value, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetIncrement(ToInner(key), member, value, flags); + + public long SortedSetIntersectionLength(RedisKey[] keys, long limit = 0, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetIntersectionLength(ToInner(keys), limit, flags); + + public long SortedSetLength(RedisKey key, double min = -1.0 / 0.0, double max = 1.0 / 0.0, Exclude exclude = Exclude.None, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetLength(ToInner(key), min, max, exclude, flags); + + public long SortedSetLengthByValue(RedisKey key, RedisValue min, RedisValue max, Exclude exclude = Exclude.None, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetLengthByValue(ToInner(key), min, max, exclude, flags); + + public RedisValue SortedSetRandomMember(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetRandomMember(ToInner(key), flags); + + public RedisValue[] SortedSetRandomMembers(RedisKey key, long count, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetRandomMembers(ToInner(key), count, flags); + + public SortedSetEntry[] SortedSetRandomMembersWithScores(RedisKey key, long count, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetRandomMembersWithScores(ToInner(key), count, flags); + + public RedisValue[] SortedSetRangeByRank(RedisKey key, long start = 0, long stop = -1, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetRangeByRank(ToInner(key), start, stop, order, flags); + + public long SortedSetRangeAndStore( + RedisKey destinationKey, + RedisKey sourceKey, + RedisValue start, + RedisValue stop, + SortedSetOrder sortedSetOrder = SortedSetOrder.ByRank, + Exclude exclude = Exclude.None, + Order order = Order.Ascending, + long skip = 0, + long? take = null, + CommandFlags flags = CommandFlags.None) => + Inner.SortedSetRangeAndStore(ToInner(sourceKey), ToInner(destinationKey), start, stop, sortedSetOrder, exclude, order, skip, take, flags); + + public SortedSetEntry[] SortedSetRangeByRankWithScores(RedisKey key, long start = 0, long stop = -1, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetRangeByRankWithScores(ToInner(key), start, stop, order, flags); + + public RedisValue[] SortedSetRangeByScore(RedisKey key, double start = -1.0 / 0.0, double stop = 1.0 / 0.0, Exclude exclude = Exclude.None, Order order = Order.Ascending, long skip = 0, long take = -1, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetRangeByScore(ToInner(key), start, stop, exclude, order, skip, take, flags); + + public SortedSetEntry[] SortedSetRangeByScoreWithScores(RedisKey key, double start = -1.0 / 0.0, double stop = 1.0 / 0.0, Exclude exclude = Exclude.None, Order order = Order.Ascending, long skip = 0, long take = -1, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetRangeByScoreWithScores(ToInner(key), start, stop, exclude, order, skip, take, flags); + + public RedisValue[] SortedSetRangeByValue(RedisKey key, RedisValue min, RedisValue max, Exclude exclude, long skip, long take, CommandFlags flags) => + Inner.SortedSetRangeByValue(ToInner(key), min, max, exclude, Order.Ascending, skip, take, flags); + + public RedisValue[] SortedSetRangeByValue(RedisKey key, RedisValue min = default, RedisValue max = default, Exclude exclude = Exclude.None, Order order = Order.Ascending, long skip = 0, long take = -1, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetRangeByValue(ToInner(key), min, max, exclude, order, skip, take, flags); + + public long? SortedSetRank(RedisKey key, RedisValue member, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetRank(ToInner(key), member, order, flags); + + public long SortedSetRemove(RedisKey key, RedisValue[] members, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetRemove(ToInner(key), members, flags); + + public bool SortedSetRemove(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetRemove(ToInner(key), member, flags); + + public long SortedSetRemoveRangeByRank(RedisKey key, long start, long stop, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetRemoveRangeByRank(ToInner(key), start, stop, flags); + + public long SortedSetRemoveRangeByScore(RedisKey key, double start, double stop, Exclude exclude = Exclude.None, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetRemoveRangeByScore(ToInner(key), start, stop, exclude, flags); + + public long SortedSetRemoveRangeByValue(RedisKey key, RedisValue min, RedisValue max, Exclude exclude = Exclude.None, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetRemoveRangeByValue(ToInner(key), min, max, exclude, flags); + + public double? SortedSetScore(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetScore(ToInner(key), member, flags); + + public double?[] SortedSetScores(RedisKey key, RedisValue[] members, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetScores(ToInner(key), members, flags); + + public long SortedSetUpdate(RedisKey key, SortedSetEntry[] values, SortedSetWhen when = SortedSetWhen.Always, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetUpdate(ToInner(key), values, when, flags); + + public bool SortedSetUpdate(RedisKey key, RedisValue member, double score, SortedSetWhen when = SortedSetWhen.Always, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetUpdate(ToInner(key), member, score, when, flags); + + public SortedSetEntry? SortedSetPop(RedisKey key, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetPop(ToInner(key), order, flags); + + public SortedSetEntry[] SortedSetPop(RedisKey key, long count, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetPop(ToInner(key), count, order, flags); + + public SortedSetPopResult SortedSetPop(RedisKey[] keys, long count, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None) => + Inner.SortedSetPop(ToInner(keys), count, order, flags); + + public long StreamAcknowledge(RedisKey key, RedisValue groupName, RedisValue messageId, CommandFlags flags = CommandFlags.None) => + Inner.StreamAcknowledge(ToInner(key), groupName, messageId, flags); + + public long StreamAcknowledge(RedisKey key, RedisValue groupName, RedisValue[] messageIds, CommandFlags flags = CommandFlags.None) => + Inner.StreamAcknowledge(ToInner(key), groupName, messageIds, flags); + + public StreamTrimResult StreamAcknowledgeAndDelete(RedisKey key, RedisValue groupName, StreamTrimMode mode, RedisValue messageId, CommandFlags flags = CommandFlags.None) => + Inner.StreamAcknowledgeAndDelete(ToInner(key), groupName, mode, messageId, flags); + + public StreamTrimResult[] StreamAcknowledgeAndDelete(RedisKey key, RedisValue groupName, StreamTrimMode mode, RedisValue[] messageIds, CommandFlags flags = CommandFlags.None) => + Inner.StreamAcknowledgeAndDelete(ToInner(key), groupName, mode, messageIds, flags); + + public RedisValue StreamAdd(RedisKey key, RedisValue streamField, RedisValue streamValue, RedisValue? messageId, int? maxLength, bool useApproximateMaxLength, CommandFlags flags) => + Inner.StreamAdd(ToInner(key), streamField, streamValue, messageId, maxLength, useApproximateMaxLength, flags); + + public RedisValue StreamAdd(RedisKey key, NameValueEntry[] streamPairs, RedisValue? messageId, int? maxLength, bool useApproximateMaxLength, CommandFlags flags) => + Inner.StreamAdd(ToInner(key), streamPairs, messageId, maxLength, useApproximateMaxLength, flags); + + public RedisValue StreamAdd(RedisKey key, RedisValue streamField, RedisValue streamValue, RedisValue? messageId = null, long? maxLength = null, bool useApproximateMaxLength = false, long? limit = null, StreamTrimMode mode = StreamTrimMode.KeepReferences, CommandFlags flags = CommandFlags.None) => + Inner.StreamAdd(ToInner(key), streamField, streamValue, messageId, maxLength, useApproximateMaxLength, limit, mode, flags); + + public RedisValue StreamAdd(RedisKey key, NameValueEntry[] streamPairs, RedisValue? messageId = null, long? maxLength = null, bool useApproximateMaxLength = false, long? limit = null, StreamTrimMode mode = StreamTrimMode.KeepReferences, CommandFlags flags = CommandFlags.None) => + Inner.StreamAdd(ToInner(key), streamPairs, messageId, maxLength, useApproximateMaxLength, limit, mode, flags); + + public RedisValue StreamAdd(RedisKey key, RedisValue streamField, RedisValue streamValue, StreamIdempotentId idempotentId, long? maxLength = null, bool useApproximateMaxLength = false, long? limit = null, StreamTrimMode mode = StreamTrimMode.KeepReferences, CommandFlags flags = CommandFlags.None) => + Inner.StreamAdd(ToInner(key), streamField, streamValue, idempotentId, maxLength, useApproximateMaxLength, limit, mode, flags); + + public RedisValue StreamAdd(RedisKey key, NameValueEntry[] streamPairs, StreamIdempotentId idempotentId, long? maxLength = null, bool useApproximateMaxLength = false, long? limit = null, StreamTrimMode mode = StreamTrimMode.KeepReferences, CommandFlags flags = CommandFlags.None) => + Inner.StreamAdd(ToInner(key), streamPairs, idempotentId, maxLength, useApproximateMaxLength, limit, mode, flags); + + public void StreamConfigure(RedisKey key, StreamConfiguration configuration, CommandFlags flags = CommandFlags.None) => + Inner.StreamConfigure(ToInner(key), configuration, flags); + + public StreamAutoClaimResult StreamAutoClaim(RedisKey key, RedisValue consumerGroup, RedisValue claimingConsumer, long minIdleTimeInMs, RedisValue startAtId, int? count = null, CommandFlags flags = CommandFlags.None) => + Inner.StreamAutoClaim(ToInner(key), consumerGroup, claimingConsumer, minIdleTimeInMs, startAtId, count, flags); + + public StreamAutoClaimIdsOnlyResult StreamAutoClaimIdsOnly(RedisKey key, RedisValue consumerGroup, RedisValue claimingConsumer, long minIdleTimeInMs, RedisValue startAtId, int? count = null, CommandFlags flags = CommandFlags.None) => + Inner.StreamAutoClaimIdsOnly(ToInner(key), consumerGroup, claimingConsumer, minIdleTimeInMs, startAtId, count, flags); + + public StreamEntry[] StreamClaim(RedisKey key, RedisValue consumerGroup, RedisValue claimingConsumer, long minIdleTimeInMs, RedisValue[] messageIds, CommandFlags flags = CommandFlags.None) => + Inner.StreamClaim(ToInner(key), consumerGroup, claimingConsumer, minIdleTimeInMs, messageIds, flags); + + public RedisValue[] StreamClaimIdsOnly(RedisKey key, RedisValue consumerGroup, RedisValue claimingConsumer, long minIdleTimeInMs, RedisValue[] messageIds, CommandFlags flags = CommandFlags.None) => + Inner.StreamClaimIdsOnly(ToInner(key), consumerGroup, claimingConsumer, minIdleTimeInMs, messageIds, flags); + + public bool StreamConsumerGroupSetPosition(RedisKey key, RedisValue groupName, RedisValue position, CommandFlags flags = CommandFlags.None) => + Inner.StreamConsumerGroupSetPosition(ToInner(key), groupName, position, flags); + + public bool StreamCreateConsumerGroup(RedisKey key, RedisValue groupName, RedisValue? position, CommandFlags flags) => + Inner.StreamCreateConsumerGroup(ToInner(key), groupName, position, flags); + + public bool StreamCreateConsumerGroup(RedisKey key, RedisValue groupName, RedisValue? position = null, bool createStream = true, CommandFlags flags = CommandFlags.None) => + Inner.StreamCreateConsumerGroup(ToInner(key), groupName, position, createStream, flags); + + public StreamInfo StreamInfo(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.StreamInfo(ToInner(key), flags); + + public StreamGroupInfo[] StreamGroupInfo(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.StreamGroupInfo(ToInner(key), flags); + + public StreamConsumerInfo[] StreamConsumerInfo(RedisKey key, RedisValue groupName, CommandFlags flags = CommandFlags.None) => + Inner.StreamConsumerInfo(ToInner(key), groupName, flags); + + public long StreamLength(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.StreamLength(ToInner(key), flags); + + public long StreamDelete(RedisKey key, RedisValue[] messageIds, CommandFlags flags = CommandFlags.None) => + Inner.StreamDelete(ToInner(key), messageIds, flags); + + public StreamTrimResult[] StreamDelete(RedisKey key, RedisValue[] messageIds, StreamTrimMode mode, CommandFlags flags = CommandFlags.None) => + Inner.StreamDelete(ToInner(key), messageIds, mode, flags); + + public long StreamDeleteConsumer(RedisKey key, RedisValue groupName, RedisValue consumerName, CommandFlags flags = CommandFlags.None) => + Inner.StreamDeleteConsumer(ToInner(key), groupName, consumerName, flags); + + public bool StreamDeleteConsumerGroup(RedisKey key, RedisValue groupName, CommandFlags flags = CommandFlags.None) => + Inner.StreamDeleteConsumerGroup(ToInner(key), groupName, flags); + + public StreamPendingInfo StreamPending(RedisKey key, RedisValue groupName, CommandFlags flags = CommandFlags.None) => + Inner.StreamPending(ToInner(key), groupName, flags); + + public StreamPendingMessageInfo[] StreamPendingMessages(RedisKey key, RedisValue groupName, int count, RedisValue consumerName, RedisValue? minId, RedisValue? maxId, CommandFlags flags) => + Inner.StreamPendingMessages(ToInner(key), groupName, count, consumerName, minId, maxId, flags); + + public StreamPendingMessageInfo[] StreamPendingMessages(RedisKey key, RedisValue groupName, int count, RedisValue consumerName, RedisValue? minId = null, RedisValue? maxId = null, long? minIdleTimeInMs = null, CommandFlags flags = CommandFlags.None) => + Inner.StreamPendingMessages(ToInner(key), groupName, count, consumerName, minId, maxId, minIdleTimeInMs, flags); + + public StreamEntry[] StreamRange(RedisKey key, RedisValue? minId = null, RedisValue? maxId = null, int? count = null, Order messageOrder = Order.Ascending, CommandFlags flags = CommandFlags.None) => + Inner.StreamRange(ToInner(key), minId, maxId, count, messageOrder, flags); + + public StreamEntry[] StreamRead(RedisKey key, RedisValue position, int? count = null, CommandFlags flags = CommandFlags.None) => + Inner.StreamRead(ToInner(key), position, count, flags); + + public RedisStream[] StreamRead(StreamPosition[] streamPositions, int? countPerStream = null, CommandFlags flags = CommandFlags.None) => + Inner.StreamRead(streamPositions, countPerStream, flags); + + public StreamEntry[] StreamReadGroup(RedisKey key, RedisValue groupName, RedisValue consumerName, RedisValue? position, int? count, CommandFlags flags) => + Inner.StreamReadGroup(ToInner(key), groupName, consumerName, position, count, flags); + + public StreamEntry[] StreamReadGroup(RedisKey key, RedisValue groupName, RedisValue consumerName, RedisValue? position = null, int? count = null, bool noAck = false, CommandFlags flags = CommandFlags.None) => + Inner.StreamReadGroup(ToInner(key), groupName, consumerName, position, count, noAck, flags); + + public StreamEntry[] StreamReadGroup(RedisKey key, RedisValue groupName, RedisValue consumerName, RedisValue? position = null, int? count = null, bool noAck = false, TimeSpan? claimMinIdleTime = null, CommandFlags flags = CommandFlags.None) => + Inner.StreamReadGroup(ToInner(key), groupName, consumerName, position, count, noAck, claimMinIdleTime, flags); + + public RedisStream[] StreamReadGroup(StreamPosition[] streamPositions, RedisValue groupName, RedisValue consumerName, int? countPerStream, CommandFlags flags) => + Inner.StreamReadGroup(streamPositions, groupName, consumerName, countPerStream, flags); + + public RedisStream[] StreamReadGroup(StreamPosition[] streamPositions, RedisValue groupName, RedisValue consumerName, int? countPerStream = null, bool noAck = false, CommandFlags flags = CommandFlags.None) => + Inner.StreamReadGroup(streamPositions, groupName, consumerName, countPerStream, noAck, flags); + + public RedisStream[] StreamReadGroup(StreamPosition[] streamPositions, RedisValue groupName, RedisValue consumerName, int? countPerStream = null, bool noAck = false, TimeSpan? claimMinIdleTime = null, CommandFlags flags = CommandFlags.None) => + Inner.StreamReadGroup(streamPositions, groupName, consumerName, countPerStream, noAck, claimMinIdleTime, flags); + + public long StreamTrim(RedisKey key, int maxLength, bool useApproximateMaxLength, CommandFlags flags) => + Inner.StreamTrim(ToInner(key), maxLength, useApproximateMaxLength, flags); + + public long StreamTrim(RedisKey key, long maxLength, bool useApproximateMaxLength = false, long? limit = null, StreamTrimMode mode = StreamTrimMode.KeepReferences, CommandFlags flags = CommandFlags.None) => + Inner.StreamTrim(ToInner(key), maxLength, useApproximateMaxLength, limit, mode, flags); + + public long StreamTrimByMinId(RedisKey key, RedisValue minId, bool useApproximateMaxLength = false, long? limit = null, StreamTrimMode mode = StreamTrimMode.KeepReferences, CommandFlags flags = CommandFlags.None) => + Inner.StreamTrimByMinId(ToInner(key), minId, useApproximateMaxLength, limit, mode, flags); + + public long StringAppend(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None) => + Inner.StringAppend(ToInner(key), value, flags); + + public long StringBitCount(RedisKey key, long start, long end, CommandFlags flags) => + Inner.StringBitCount(ToInner(key), start, end, flags); + + public long StringBitCount(RedisKey key, long start = 0, long end = -1, StringIndexType indexType = StringIndexType.Byte, CommandFlags flags = CommandFlags.None) => + Inner.StringBitCount(ToInner(key), start, end, indexType, flags); + + public long StringBitOperation(Bitwise operation, RedisKey destination, RedisKey[] keys, CommandFlags flags = CommandFlags.None) => + Inner.StringBitOperation(operation, ToInner(destination), ToInner(keys), flags); + + public long StringBitOperation(Bitwise operation, RedisKey destination, RedisKey first, RedisKey second = default, CommandFlags flags = CommandFlags.None) => + Inner.StringBitOperation(operation, ToInner(destination), ToInner(first), ToInnerOrDefault(second), flags); + + public long StringBitPosition(RedisKey key, bool bit, long start, long end, CommandFlags flags) => + Inner.StringBitPosition(ToInner(key), bit, start, end, flags); + + public long StringBitPosition(RedisKey key, bool bit, long start = 0, long end = -1, StringIndexType indexType = StringIndexType.Byte, CommandFlags flags = CommandFlags.None) => + Inner.StringBitPosition(ToInner(key), bit, start, end, indexType, flags); + + public bool StringDelete(RedisKey key, ValueCondition when, CommandFlags flags = CommandFlags.None) => + Inner.StringDelete(ToInner(key), when, flags); + + public double StringDecrement(RedisKey key, double value, CommandFlags flags = CommandFlags.None) => + Inner.StringDecrement(ToInner(key), value, flags); + + public ValueCondition? StringDigest(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.StringDigest(ToInner(key), flags); + + public long StringDecrement(RedisKey key, long value = 1, CommandFlags flags = CommandFlags.None) => + Inner.StringDecrement(ToInner(key), value, flags); + + public RedisValue[] StringGet(RedisKey[] keys, CommandFlags flags = CommandFlags.None) => + Inner.StringGet(ToInner(keys), flags); + + public RedisValue StringGet(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.StringGet(ToInner(key), flags); + + public RedisValue StringGetSetExpiry(RedisKey key, TimeSpan? expiry, CommandFlags flags = CommandFlags.None) => + Inner.StringGetSetExpiry(ToInner(key), expiry, flags); + + public RedisValue StringGetSetExpiry(RedisKey key, DateTime expiry, CommandFlags flags = CommandFlags.None) => + Inner.StringGetSetExpiry(ToInner(key), expiry, flags); + + public Lease? StringGetLease(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.StringGetLease(ToInner(key), flags); + + public bool StringGetBit(RedisKey key, long offset, CommandFlags flags = CommandFlags.None) => + Inner.StringGetBit(ToInner(key), offset, flags); + + public RedisValue StringGetRange(RedisKey key, long start, long end, CommandFlags flags = CommandFlags.None) => + Inner.StringGetRange(ToInner(key), start, end, flags); + + public RedisValue StringGetSet(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None) => + Inner.StringGetSet(ToInner(key), value, flags); + + public RedisValue StringGetDelete(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.StringGetDelete(ToInner(key), flags); + + public RedisValueWithExpiry StringGetWithExpiry(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.StringGetWithExpiry(ToInner(key), flags); + + public double StringIncrement(RedisKey key, double value, CommandFlags flags = CommandFlags.None) => + Inner.StringIncrement(ToInner(key), value, flags); + + public long StringIncrement(RedisKey key, long value = 1, CommandFlags flags = CommandFlags.None) => + Inner.StringIncrement(ToInner(key), value, flags); + + public long StringLength(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.StringLength(ToInner(key), flags); + + public bool StringSet(RedisKey key, RedisValue value, Expiration expiry, ValueCondition when, CommandFlags flags = CommandFlags.None) + => Inner.StringSet(ToInner(key), value, expiry, when, flags); + + public bool StringSet(KeyValuePair[] values, When when = When.Always, CommandFlags flags = CommandFlags.None) => + Inner.StringSet(ToInner(values), when, flags); + + public bool StringSet(KeyValuePair[] values, When when, Expiration expiry, CommandFlags flags) => + Inner.StringSet(ToInner(values), when, expiry, flags); + + public bool StringSet(RedisKey key, RedisValue value, TimeSpan? expiry, When when) => + Inner.StringSet(ToInner(key), value, expiry, when); + public bool StringSet(RedisKey key, RedisValue value, TimeSpan? expiry, When when, CommandFlags flags) => + Inner.StringSet(ToInner(key), value, expiry, when, flags); + public bool StringSet(RedisKey key, RedisValue value, TimeSpan? expiry = null, bool keepTtl = false, When when = When.Always, CommandFlags flags = CommandFlags.None) => + Inner.StringSet(ToInner(key), value, expiry, keepTtl, when, flags); + + public RedisValue StringSetAndGet(RedisKey key, RedisValue value, TimeSpan? expiry, When when, CommandFlags flags) => + Inner.StringSetAndGet(ToInner(key), value, expiry, when, flags); + + public RedisValue StringSetAndGet(RedisKey key, RedisValue value, TimeSpan? expiry = null, bool keepTtl = false, When when = When.Always, CommandFlags flags = CommandFlags.None) => + Inner.StringSetAndGet(ToInner(key), value, expiry, keepTtl, when, flags); + + public bool StringSetBit(RedisKey key, long offset, bool bit, CommandFlags flags = CommandFlags.None) => + Inner.StringSetBit(ToInner(key), offset, bit, flags); + + public RedisValue StringSetRange(RedisKey key, long offset, RedisValue value, CommandFlags flags = CommandFlags.None) => + Inner.StringSetRange(ToInner(key), offset, value, flags); + + public TimeSpan Ping(CommandFlags flags = CommandFlags.None) => + Inner.Ping(flags); + + IEnumerable IDatabase.HashScan(RedisKey key, RedisValue pattern, int pageSize, CommandFlags flags) + => Inner.HashScan(ToInner(key), pattern, pageSize, flags); + + IEnumerable IDatabase.HashScan(RedisKey key, RedisValue pattern, int pageSize, long cursor, int pageOffset, CommandFlags flags) + => Inner.HashScan(ToInner(key), pattern, pageSize, cursor, pageOffset, flags); + + IEnumerable IDatabase.HashScanNoValues(RedisKey key, RedisValue pattern, int pageSize, long cursor, int pageOffset, CommandFlags flags) + => Inner.HashScanNoValues(ToInner(key), pattern, pageSize, cursor, pageOffset, flags); + + IEnumerable IDatabase.SetScan(RedisKey key, RedisValue pattern, int pageSize, CommandFlags flags) + => Inner.SetScan(ToInner(key), pattern, pageSize, flags); + + IEnumerable IDatabase.SetScan(RedisKey key, RedisValue pattern, int pageSize, long cursor, int pageOffset, CommandFlags flags) + => Inner.SetScan(ToInner(key), pattern, pageSize, cursor, pageOffset, flags); + + IEnumerable IDatabase.SortedSetScan(RedisKey key, RedisValue pattern, int pageSize, CommandFlags flags) + => Inner.SortedSetScan(ToInner(key), pattern, pageSize, flags); + + IEnumerable IDatabase.SortedSetScan(RedisKey key, RedisValue pattern, int pageSize, long cursor, int pageOffset, CommandFlags flags) + => Inner.SortedSetScan(ToInner(key), pattern, pageSize, cursor, pageOffset, flags); + + public bool KeyTouch(RedisKey key, CommandFlags flags = CommandFlags.None) => + Inner.KeyTouch(ToInner(key), flags); + + public long KeyTouch(RedisKey[] keys, CommandFlags flags = CommandFlags.None) => + Inner.KeyTouch(ToInner(keys), flags); + } +} diff --git a/src/StackExchange.Redis/KeyspaceIsolation/KeyPrefixedTransaction.cs b/src/StackExchange.Redis/KeyspaceIsolation/KeyPrefixedTransaction.cs new file mode 100644 index 000000000..89703ba6a --- /dev/null +++ b/src/StackExchange.Redis/KeyspaceIsolation/KeyPrefixedTransaction.cs @@ -0,0 +1,17 @@ +using System.Threading.Tasks; + +namespace StackExchange.Redis.KeyspaceIsolation +{ + internal sealed class KeyPrefixedTransaction : KeyPrefixed, ITransaction + { + public KeyPrefixedTransaction(ITransaction inner, byte[] prefix) : base(inner, prefix) { } + + public ConditionResult AddCondition(Condition condition) => Inner.AddCondition(condition.MapKeys(GetMapFunction())); + + public bool Execute(CommandFlags flags = CommandFlags.None) => Inner.Execute(flags); + + public Task ExecuteAsync(CommandFlags flags = CommandFlags.None) => Inner.ExecuteAsync(flags); + + public void Execute() => Inner.Execute(); + } +} diff --git a/src/StackExchange.Redis/KeyspaceIsolation/TransactionWrapper.cs b/src/StackExchange.Redis/KeyspaceIsolation/TransactionWrapper.cs deleted file mode 100644 index 2e941fde9..000000000 --- a/src/StackExchange.Redis/KeyspaceIsolation/TransactionWrapper.cs +++ /dev/null @@ -1,31 +0,0 @@ -using System.Threading.Tasks; - -namespace StackExchange.Redis.KeyspaceIsolation -{ - internal sealed class TransactionWrapper : WrapperBase, ITransaction - { - public TransactionWrapper(ITransaction inner, byte[] prefix) : base(inner, prefix) - { - } - - public ConditionResult AddCondition(Condition condition) - { - return Inner.AddCondition(condition?.MapKeys(GetMapFunction())); - } - - public bool Execute(CommandFlags flags = CommandFlags.None) - { - return Inner.Execute(flags); - } - - public Task ExecuteAsync(CommandFlags flags = CommandFlags.None) - { - return Inner.ExecuteAsync(flags); - } - - public void Execute() - { - Inner.Execute(); - } - } -} diff --git a/src/StackExchange.Redis/KeyspaceIsolation/WrapperBase.cs b/src/StackExchange.Redis/KeyspaceIsolation/WrapperBase.cs deleted file mode 100644 index ea33ef43f..000000000 --- a/src/StackExchange.Redis/KeyspaceIsolation/WrapperBase.cs +++ /dev/null @@ -1,1048 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Linq; -using System.Net; -using System.Threading.Tasks; - -namespace StackExchange.Redis.KeyspaceIsolation -{ - internal class WrapperBase : IDatabaseAsync where TInner : IDatabaseAsync - { - internal WrapperBase(TInner inner, byte[] keyPrefix) - { - Inner = inner; - Prefix = keyPrefix; - } - - public IConnectionMultiplexer Multiplexer => Inner.Multiplexer; - - internal TInner Inner { get; } - - internal byte[] Prefix { get; } - - public Task DebugObjectAsync(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.DebugObjectAsync(ToInner(key), flags); - } - - public Task GeoAddAsync(RedisKey key, double longitude, double latitude, RedisValue member, CommandFlags flags = CommandFlags.None) - => Inner.GeoAddAsync(ToInner(key), longitude, latitude, member, flags); - - public Task GeoAddAsync(RedisKey key, GeoEntry value, CommandFlags flags = CommandFlags.None) - => Inner.GeoAddAsync(ToInner(key), value, flags); - - public Task GeoAddAsync(RedisKey key, GeoEntry[] values, CommandFlags flags = CommandFlags.None) - => Inner.GeoAddAsync(ToInner(key), values, flags); - - public Task GeoRemoveAsync(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) - => Inner.GeoRemoveAsync(ToInner(key), member, flags); - - public Task GeoDistanceAsync(RedisKey key, RedisValue member1, RedisValue member2, GeoUnit unit = GeoUnit.Meters, CommandFlags flags = CommandFlags.None) - => Inner.GeoDistanceAsync(ToInner(key), member1, member2, unit, flags); - - public Task GeoHashAsync(RedisKey key, RedisValue[] members, CommandFlags flags = CommandFlags.None) - => Inner.GeoHashAsync(ToInner(key), members, flags); - - public Task GeoHashAsync(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) - => Inner.GeoHashAsync(ToInner(key), member, flags); - - public Task GeoPositionAsync(RedisKey key, RedisValue[] members, CommandFlags flags = CommandFlags.None) - => Inner.GeoPositionAsync(ToInner(key), members, flags); - - public Task GeoPositionAsync(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) - => Inner.GeoPositionAsync(ToInner(key), member, flags); - - public Task GeoRadiusAsync(RedisKey key, RedisValue member, double radius, GeoUnit unit = GeoUnit.Meters, int count = -1, Order? order = null, GeoRadiusOptions options = GeoRadiusOptions.Default, CommandFlags flags = CommandFlags.None) - => Inner.GeoRadiusAsync(ToInner(key), member, radius, unit, count, order, options, flags); - - public Task GeoRadiusAsync(RedisKey key, double longitude, double latitude, double radius, GeoUnit unit = GeoUnit.Meters, int count = -1, Order? order = null, GeoRadiusOptions options = GeoRadiusOptions.Default, CommandFlags flags = CommandFlags.None) - => Inner.GeoRadiusAsync(ToInner(key), longitude, latitude, radius, unit, count, order, options, flags); - - public Task HashDecrementAsync(RedisKey key, RedisValue hashField, double value, CommandFlags flags = CommandFlags.None) - { - return Inner.HashDecrementAsync(ToInner(key), hashField, value, flags); - } - - public Task HashDecrementAsync(RedisKey key, RedisValue hashField, long value = 1, CommandFlags flags = CommandFlags.None) - { - return Inner.HashDecrementAsync(ToInner(key), hashField, value, flags); - } - - public Task HashDeleteAsync(RedisKey key, RedisValue[] hashFields, CommandFlags flags = CommandFlags.None) - { - return Inner.HashDeleteAsync(ToInner(key), hashFields, flags); - } - - public Task HashDeleteAsync(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None) - { - return Inner.HashDeleteAsync(ToInner(key), hashField, flags); - } - - public Task HashExistsAsync(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None) - { - return Inner.HashExistsAsync(ToInner(key), hashField, flags); - } - - public Task HashGetAllAsync(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.HashGetAllAsync(ToInner(key), flags); - } - - public Task HashGetAsync(RedisKey key, RedisValue[] hashFields, CommandFlags flags = CommandFlags.None) - { - return Inner.HashGetAsync(ToInner(key), hashFields, flags); - } - - public Task HashGetAsync(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None) - { - return Inner.HashGetAsync(ToInner(key), hashField, flags); - } - - public Task> HashGetLeaseAsync(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None) - { - return Inner.HashGetLeaseAsync(ToInner(key), hashField, flags); - } - - public Task HashIncrementAsync(RedisKey key, RedisValue hashField, double value, CommandFlags flags = CommandFlags.None) - { - return Inner.HashIncrementAsync(ToInner(key), hashField, value, flags); - } - - public Task HashIncrementAsync(RedisKey key, RedisValue hashField, long value = 1, CommandFlags flags = CommandFlags.None) - { - return Inner.HashIncrementAsync(ToInner(key), hashField, value, flags); - } - - public Task HashKeysAsync(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.HashKeysAsync(ToInner(key), flags); - } - - public Task HashLengthAsync(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.HashLengthAsync(ToInner(key), flags); - } - - public IAsyncEnumerable HashScanAsync(RedisKey key, RedisValue pattern, int pageSize, long cursor, int pageOffset, CommandFlags flags) - => Inner.HashScanAsync(ToInner(key), pattern, pageSize, cursor, pageOffset, flags); - - public Task HashSetAsync(RedisKey key, RedisValue hashField, RedisValue value, When when = When.Always, CommandFlags flags = CommandFlags.None) - { - return Inner.HashSetAsync(ToInner(key), hashField, value, when, flags); - } - - public Task HashStringLengthAsync(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None) - { - return Inner.HashStringLengthAsync(ToInner(key), hashField, flags); - } - - public Task HashSetAsync(RedisKey key, HashEntry[] hashFields, CommandFlags flags = CommandFlags.None) - { - return Inner.HashSetAsync(ToInner(key), hashFields, flags); - } - - public Task HashValuesAsync(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.HashValuesAsync(ToInner(key), flags); - } - - public Task HyperLogLogAddAsync(RedisKey key, RedisValue[] values, CommandFlags flags = CommandFlags.None) - { - return Inner.HyperLogLogAddAsync(ToInner(key), values, flags); - } - - public Task HyperLogLogAddAsync(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None) - { - return Inner.HyperLogLogAddAsync(ToInner(key), value, flags); - } - - public Task HyperLogLogLengthAsync(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.HyperLogLogLengthAsync(ToInner(key), flags); - } - - public Task HyperLogLogLengthAsync(RedisKey[] keys, CommandFlags flags = CommandFlags.None) - { - return Inner.HyperLogLogLengthAsync(ToInner(keys), flags); - } - - public Task HyperLogLogMergeAsync(RedisKey destination, RedisKey[] sourceKeys, CommandFlags flags = CommandFlags.None) - { - return Inner.HyperLogLogMergeAsync(ToInner(destination), ToInner(sourceKeys), flags); - } - - public Task HyperLogLogMergeAsync(RedisKey destination, RedisKey first, RedisKey second, CommandFlags flags = CommandFlags.None) - { - return Inner.HyperLogLogMergeAsync(ToInner(destination), ToInner(first), ToInner(second), flags); - } - - public Task IdentifyEndpointAsync(RedisKey key = default(RedisKey), CommandFlags flags = CommandFlags.None) - { - return Inner.IdentifyEndpointAsync(ToInner(key), flags); - } - - public bool IsConnected(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.IsConnected(ToInner(key), flags); - } - - public Task KeyDeleteAsync(RedisKey[] keys, CommandFlags flags = CommandFlags.None) - { - return Inner.KeyDeleteAsync(ToInner(keys), flags); - } - - public Task KeyDeleteAsync(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.KeyDeleteAsync(ToInner(key), flags); - } - - public Task KeyDumpAsync(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.KeyDumpAsync(ToInner(key), flags); - } - - public Task KeyExistsAsync(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.KeyExistsAsync(ToInner(key), flags); - } - - public Task KeyExistsAsync(RedisKey[] keys, CommandFlags flags = CommandFlags.None) - { - return Inner.KeyExistsAsync(ToInner(keys), flags); - } - - public Task KeyExpireAsync(RedisKey key, DateTime? expiry, CommandFlags flags = CommandFlags.None) - { - return Inner.KeyExpireAsync(ToInner(key), expiry, flags); - } - - public Task KeyExpireAsync(RedisKey key, TimeSpan? expiry, CommandFlags flags = CommandFlags.None) - { - return Inner.KeyExpireAsync(ToInner(key), expiry, flags); - } - - public Task KeyIdleTimeAsync(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.KeyIdleTimeAsync(ToInner(key), flags); - } - - public Task KeyMigrateAsync(RedisKey key, EndPoint toServer, int toDatabase = 0, int timeoutMilliseconds = 0, MigrateOptions migrateOptions = MigrateOptions.None, CommandFlags flags = CommandFlags.None) - { - return Inner.KeyMigrateAsync(ToInner(key), toServer, toDatabase, timeoutMilliseconds, migrateOptions, flags); - } - - public Task KeyMoveAsync(RedisKey key, int database, CommandFlags flags = CommandFlags.None) - { - return Inner.KeyMoveAsync(ToInner(key), database, flags); - } - - public Task KeyPersistAsync(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.KeyPersistAsync(ToInner(key), flags); - } - - public Task KeyRandomAsync(CommandFlags flags = CommandFlags.None) - { - throw new NotSupportedException("RANDOMKEY is not supported when a key-prefix is specified"); - } - - public Task KeyRenameAsync(RedisKey key, RedisKey newKey, When when = When.Always, CommandFlags flags = CommandFlags.None) - { - return Inner.KeyRenameAsync(ToInner(key), ToInner(newKey), when, flags); - } - - public Task KeyRestoreAsync(RedisKey key, byte[] value, TimeSpan? expiry = null, CommandFlags flags = CommandFlags.None) - { - return Inner.KeyRestoreAsync(ToInner(key), value, expiry, flags); - } - - public Task KeyTimeToLiveAsync(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.KeyTimeToLiveAsync(ToInner(key), flags); - } - - public Task KeyTypeAsync(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.KeyTypeAsync(ToInner(key), flags); - } - - public Task ListGetByIndexAsync(RedisKey key, long index, CommandFlags flags = CommandFlags.None) - { - return Inner.ListGetByIndexAsync(ToInner(key), index, flags); - } - - public Task ListInsertAfterAsync(RedisKey key, RedisValue pivot, RedisValue value, CommandFlags flags = CommandFlags.None) - { - return Inner.ListInsertAfterAsync(ToInner(key), pivot, value, flags); - } - - public Task ListInsertBeforeAsync(RedisKey key, RedisValue pivot, RedisValue value, CommandFlags flags = CommandFlags.None) - { - return Inner.ListInsertBeforeAsync(ToInner(key), pivot, value, flags); - } - - public Task ListLeftPopAsync(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.ListLeftPopAsync(ToInner(key), flags); - } - - public Task ListLeftPushAsync(RedisKey key, RedisValue[] values, CommandFlags flags = CommandFlags.None) - { - return Inner.ListLeftPushAsync(ToInner(key), values, flags); - } - - public Task ListLeftPushAsync(RedisKey key, RedisValue[] values, When when = When.Always, CommandFlags flags = CommandFlags.None) - { - return Inner.ListLeftPushAsync(ToInner(key), values, when, flags); - } - - public Task ListLeftPushAsync(RedisKey key, RedisValue value, When when = When.Always, CommandFlags flags = CommandFlags.None) - { - return Inner.ListLeftPushAsync(ToInner(key), value, when, flags); - } - - public Task ListLengthAsync(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.ListLengthAsync(ToInner(key), flags); - } - - public Task ListRangeAsync(RedisKey key, long start = 0, long stop = -1, CommandFlags flags = CommandFlags.None) - { - return Inner.ListRangeAsync(ToInner(key), start, stop, flags); - } - - public Task ListRemoveAsync(RedisKey key, RedisValue value, long count = 0, CommandFlags flags = CommandFlags.None) - { - return Inner.ListRemoveAsync(ToInner(key), value, count, flags); - } - - public Task ListRightPopAsync(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.ListRightPopAsync(ToInner(key), flags); - } - - public Task ListRightPopLeftPushAsync(RedisKey source, RedisKey destination, CommandFlags flags = CommandFlags.None) - { - return Inner.ListRightPopLeftPushAsync(ToInner(source), ToInner(destination), flags); - } - - public Task ListRightPushAsync(RedisKey key, RedisValue[] values, CommandFlags flags = CommandFlags.None) - { - return Inner.ListRightPushAsync(ToInner(key), values, flags); - } - - public Task ListRightPushAsync(RedisKey key, RedisValue[] values, When when = When.Always, CommandFlags flags = CommandFlags.None) - { - return Inner.ListRightPushAsync(ToInner(key), values, when, flags); - } - - public Task ListRightPushAsync(RedisKey key, RedisValue value, When when = When.Always, CommandFlags flags = CommandFlags.None) - { - return Inner.ListRightPushAsync(ToInner(key), value, when, flags); - } - - public Task ListSetByIndexAsync(RedisKey key, long index, RedisValue value, CommandFlags flags = CommandFlags.None) - { - return Inner.ListSetByIndexAsync(ToInner(key), index, value, flags); - } - - public Task ListTrimAsync(RedisKey key, long start, long stop, CommandFlags flags = CommandFlags.None) - { - return Inner.ListTrimAsync(ToInner(key), start, stop, flags); - } - - public Task LockExtendAsync(RedisKey key, RedisValue value, TimeSpan expiry, CommandFlags flags = CommandFlags.None) - { - return Inner.LockExtendAsync(ToInner(key), value, expiry, flags); - } - - public Task LockQueryAsync(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.LockQueryAsync(ToInner(key), flags); - } - - public Task LockReleaseAsync(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None) - { - return Inner.LockReleaseAsync(ToInner(key), value, flags); - } - - public Task LockTakeAsync(RedisKey key, RedisValue value, TimeSpan expiry, CommandFlags flags = CommandFlags.None) - { - return Inner.LockTakeAsync(ToInner(key), value, expiry, flags); - } - - public Task PublishAsync(RedisChannel channel, RedisValue message, CommandFlags flags = CommandFlags.None) - { - return Inner.PublishAsync(ToInner(channel), message, flags); - } - - public Task ExecuteAsync(string command, params object[] args) - => Inner.ExecuteAsync(command, ToInner(args), CommandFlags.None); - - public Task ExecuteAsync(string command, ICollection args, CommandFlags flags = CommandFlags.None) - => Inner.ExecuteAsync(command, ToInner(args), flags); - - public Task ScriptEvaluateAsync(byte[] hash, RedisKey[] keys = null, RedisValue[] values = null, CommandFlags flags = CommandFlags.None) - { - // TODO: The return value could contain prefixed keys. It might make sense to 'unprefix' those? - return Inner.ScriptEvaluateAsync(hash, ToInner(keys), values, flags); - } - - public Task ScriptEvaluateAsync(string script, RedisKey[] keys = null, RedisValue[] values = null, CommandFlags flags = CommandFlags.None) - { - // TODO: The return value could contain prefixed keys. It might make sense to 'unprefix' those? - return Inner.ScriptEvaluateAsync(script, ToInner(keys), values, flags); - } - - public Task ScriptEvaluateAsync(LuaScript script, object parameters = null, CommandFlags flags = CommandFlags.None) - { - // TODO: The return value could contain prefixed keys. It might make sense to 'unprefix' those? - return script.EvaluateAsync(Inner, parameters, Prefix, flags); - } - - public Task ScriptEvaluateAsync(LoadedLuaScript script, object parameters = null, CommandFlags flags = CommandFlags.None) - { - // TODO: The return value could contain prefixed keys. It might make sense to 'unprefix' those? - return script.EvaluateAsync(Inner, parameters, Prefix, flags); - } - - public Task SetAddAsync(RedisKey key, RedisValue[] values, CommandFlags flags = CommandFlags.None) - { - return Inner.SetAddAsync(ToInner(key), values, flags); - } - - public Task SetAddAsync(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None) - { - return Inner.SetAddAsync(ToInner(key), value, flags); - } - - public Task SetCombineAndStoreAsync(SetOperation operation, RedisKey destination, RedisKey[] keys, CommandFlags flags = CommandFlags.None) - { - return Inner.SetCombineAndStoreAsync(operation, ToInner(destination), ToInner(keys), flags); - } - - public Task SetCombineAndStoreAsync(SetOperation operation, RedisKey destination, RedisKey first, RedisKey second, CommandFlags flags = CommandFlags.None) - { - return Inner.SetCombineAndStoreAsync(operation, ToInner(destination), ToInner(first), ToInner(second), flags); - } - - public Task SetCombineAsync(SetOperation operation, RedisKey[] keys, CommandFlags flags = CommandFlags.None) - { - return Inner.SetCombineAsync(operation, ToInner(keys), flags); - } - - public Task SetCombineAsync(SetOperation operation, RedisKey first, RedisKey second, CommandFlags flags = CommandFlags.None) - { - return Inner.SetCombineAsync(operation, ToInner(first), ToInner(second), flags); - } - - public Task SetContainsAsync(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None) - { - return Inner.SetContainsAsync(ToInner(key), value, flags); - } - - public Task SetLengthAsync(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.SetLengthAsync(ToInner(key), flags); - } - - public Task SetMembersAsync(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.SetMembersAsync(ToInner(key), flags); - } - - public Task SetMoveAsync(RedisKey source, RedisKey destination, RedisValue value, CommandFlags flags = CommandFlags.None) - { - return Inner.SetMoveAsync(ToInner(source), ToInner(destination), value, flags); - } - - public Task SetPopAsync(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.SetPopAsync(ToInner(key), flags); - } - - public Task SetPopAsync(RedisKey key, long count, CommandFlags flags = CommandFlags.None) - { - return Inner.SetPopAsync(ToInner(key), count, flags); - } - - public Task SetRandomMemberAsync(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.SetRandomMemberAsync(ToInner(key), flags); - } - - public Task SetRandomMembersAsync(RedisKey key, long count, CommandFlags flags = CommandFlags.None) - { - return Inner.SetRandomMembersAsync(ToInner(key), count, flags); - } - - public Task SetRemoveAsync(RedisKey key, RedisValue[] values, CommandFlags flags = CommandFlags.None) - { - return Inner.SetRemoveAsync(ToInner(key), values, flags); - } - - public IAsyncEnumerable SetScanAsync(RedisKey key, RedisValue pattern, int pageSize, long cursor, int pageOffset, CommandFlags flags) - => Inner.SetScanAsync(ToInner(key), pattern, pageSize, cursor, pageOffset, flags); - - public Task SetRemoveAsync(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None) - { - return Inner.SetRemoveAsync(ToInner(key), value, flags); - } - - public Task SortAndStoreAsync(RedisKey destination, RedisKey key, long skip = 0, long take = -1, Order order = Order.Ascending, SortType sortType = SortType.Numeric, RedisValue by = default(RedisValue), RedisValue[] get = null, CommandFlags flags = CommandFlags.None) - { - return Inner.SortAndStoreAsync(ToInner(destination), ToInner(key), skip, take, order, sortType, SortByToInner(by), SortGetToInner(get), flags); - } - - public Task SortAsync(RedisKey key, long skip = 0, long take = -1, Order order = Order.Ascending, SortType sortType = SortType.Numeric, RedisValue by = default(RedisValue), RedisValue[] get = null, CommandFlags flags = CommandFlags.None) - { - return Inner.SortAsync(ToInner(key), skip, take, order, sortType, SortByToInner(by), SortGetToInner(get), flags); - } - - public Task SortedSetAddAsync(RedisKey key, SortedSetEntry[] values, CommandFlags flags) - { - return Inner.SortedSetAddAsync(ToInner(key), values, flags); - } - - public Task SortedSetAddAsync(RedisKey key, SortedSetEntry[] values, When when = When.Always, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetAddAsync(ToInner(key), values, when, flags); - } - - public Task SortedSetAddAsync(RedisKey key, RedisValue member, double score, CommandFlags flags) - { - return Inner.SortedSetAddAsync(ToInner(key), member, score, flags); - } - - public Task SortedSetAddAsync(RedisKey key, RedisValue member, double score, When when = When.Always, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetAddAsync(ToInner(key), member, score, when, flags); - } - - public Task SortedSetCombineAndStoreAsync(SetOperation operation, RedisKey destination, RedisKey[] keys, double[] weights = null, Aggregate aggregate = Aggregate.Sum, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetCombineAndStoreAsync(operation, ToInner(destination), ToInner(keys), weights, aggregate, flags); - } - - public Task SortedSetCombineAndStoreAsync(SetOperation operation, RedisKey destination, RedisKey first, RedisKey second, Aggregate aggregate = Aggregate.Sum, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetCombineAndStoreAsync(operation, ToInner(destination), ToInner(first), ToInner(second), aggregate, flags); - } - - public Task SortedSetDecrementAsync(RedisKey key, RedisValue member, double value, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetDecrementAsync(ToInner(key), member, value, flags); - } - - public Task SortedSetIncrementAsync(RedisKey key, RedisValue member, double value, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetIncrementAsync(ToInner(key), member, value, flags); - } - - public Task SortedSetLengthAsync(RedisKey key, double min = -1.0 / 0.0, double max = 1.0 / 0.0, Exclude exclude = Exclude.None, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetLengthAsync(ToInner(key), min, max, exclude, flags); - } - - public Task SortedSetLengthByValueAsync(RedisKey key, RedisValue min, RedisValue max, Exclude exclude = Exclude.None, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetLengthByValueAsync(ToInner(key), min, max, exclude, flags); - } - - public Task SortedSetRangeByRankAsync(RedisKey key, long start = 0, long stop = -1, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetRangeByRankAsync(ToInner(key), start, stop, order, flags); - } - - public Task SortedSetRangeByRankWithScoresAsync(RedisKey key, long start = 0, long stop = -1, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetRangeByRankWithScoresAsync(ToInner(key), start, stop, order, flags); - } - - public Task SortedSetRangeByScoreAsync(RedisKey key, double start = -1.0 / 0.0, double stop = 1.0 / 0.0, Exclude exclude = Exclude.None, Order order = Order.Ascending, long skip = 0, long take = -1, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetRangeByScoreAsync(ToInner(key), start, stop, exclude, order, skip, take, flags); - } - - public Task SortedSetRangeByScoreWithScoresAsync(RedisKey key, double start = -1.0 / 0.0, double stop = 1.0 / 0.0, Exclude exclude = Exclude.None, Order order = Order.Ascending, long skip = 0, long take = -1, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetRangeByScoreWithScoresAsync(ToInner(key), start, stop, exclude, order, skip, take, flags); - } - - public Task SortedSetRangeByValueAsync(RedisKey key, RedisValue min, RedisValue max, Exclude exclude, long skip, long take, CommandFlags flags) - { - return Inner.SortedSetRangeByValueAsync(ToInner(key), min, max, exclude, Order.Ascending, skip, take, flags); - } - - public Task SortedSetRangeByValueAsync(RedisKey key, RedisValue min = default(RedisValue), RedisValue max = default(RedisValue), Exclude exclude = Exclude.None, Order order = Order.Ascending, long skip = 0, long take = -1, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetRangeByValueAsync(ToInner(key), min, max, exclude, order, skip, take, flags); - } - - public Task SortedSetRankAsync(RedisKey key, RedisValue member, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetRankAsync(ToInner(key), member, order, flags); - } - - public Task SortedSetRemoveAsync(RedisKey key, RedisValue[] members, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetRemoveAsync(ToInner(key), members, flags); - } - - public Task SortedSetRemoveAsync(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetRemoveAsync(ToInner(key), member, flags); - } - - public Task SortedSetRemoveRangeByRankAsync(RedisKey key, long start, long stop, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetRemoveRangeByRankAsync(ToInner(key), start, stop, flags); - } - - public Task SortedSetRemoveRangeByScoreAsync(RedisKey key, double start, double stop, Exclude exclude = Exclude.None, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetRemoveRangeByScoreAsync(ToInner(key), start, stop, exclude, flags); - } - - public Task SortedSetRemoveRangeByValueAsync(RedisKey key, RedisValue min, RedisValue max, Exclude exclude = Exclude.None, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetRemoveRangeByValueAsync(ToInner(key), min, max, exclude, flags); - } - - public Task SortedSetScoreAsync(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetScoreAsync(ToInner(key), member, flags); - } - - public IAsyncEnumerable SortedSetScanAsync(RedisKey key, RedisValue pattern, int pageSize, long cursor, int pageOffset, CommandFlags flags) - => Inner.SortedSetScanAsync(ToInner(key), pattern, pageSize, cursor, pageOffset, flags); - - public Task SortedSetPopAsync(RedisKey key, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetPopAsync(ToInner(key), order, flags); - } - - public Task SortedSetPopAsync(RedisKey key, long count, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None) - { - return Inner.SortedSetPopAsync(ToInner(key), count, order, flags); - } - - public Task StreamAcknowledgeAsync(RedisKey key, RedisValue groupName, RedisValue messageId, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamAcknowledgeAsync(ToInner(key), groupName, messageId, flags); - } - - public Task StreamAcknowledgeAsync(RedisKey key, RedisValue groupName, RedisValue[] messageIds, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamAcknowledgeAsync(ToInner(key), groupName, messageIds, flags); - } - - public Task StreamAddAsync(RedisKey key, RedisValue streamField, RedisValue streamValue, RedisValue? messageId = null, int? maxLength = null, bool useApproximateMaxLength = false, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamAddAsync(ToInner(key), streamField, streamValue, messageId, maxLength, useApproximateMaxLength, flags); - } - - public Task StreamAddAsync(RedisKey key, NameValueEntry[] streamPairs, RedisValue? messageId = null, int? maxLength = null, bool useApproximateMaxLength = false, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamAddAsync(ToInner(key), streamPairs, messageId, maxLength, useApproximateMaxLength, flags); - } - - public Task StreamClaimAsync(RedisKey key, RedisValue consumerGroup, RedisValue claimingConsumer, long minIdleTimeInMs, RedisValue[] messageIds, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamClaimAsync(ToInner(key), consumerGroup, claimingConsumer, minIdleTimeInMs, messageIds, flags); - } - - public Task StreamClaimIdsOnlyAsync(RedisKey key, RedisValue consumerGroup, RedisValue claimingConsumer, long minIdleTimeInMs, RedisValue[] messageIds, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamClaimIdsOnlyAsync(ToInner(key), consumerGroup, claimingConsumer, minIdleTimeInMs, messageIds, flags); - } - - public Task StreamConsumerGroupSetPositionAsync(RedisKey key, RedisValue groupName, RedisValue position, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamConsumerGroupSetPositionAsync(ToInner(key), groupName, position, flags); - } - - public Task StreamCreateConsumerGroupAsync(RedisKey key, RedisValue groupName, RedisValue? position, CommandFlags flags) - { - return Inner.StreamCreateConsumerGroupAsync(ToInner(key), groupName, position, flags); - } - - public Task StreamCreateConsumerGroupAsync(RedisKey key, RedisValue groupName, RedisValue? position = null, bool createStream = true, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamCreateConsumerGroupAsync(ToInner(key), groupName, position, createStream, flags); - } - - public Task StreamInfoAsync(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamInfoAsync(ToInner(key), flags); - } - - public Task StreamGroupInfoAsync(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamGroupInfoAsync(ToInner(key), flags); - } - - public Task StreamConsumerInfoAsync(RedisKey key, RedisValue groupName, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamConsumerInfoAsync(ToInner(key), groupName, flags); - } - - public Task StreamLengthAsync(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamLengthAsync(ToInner(key), flags); - } - - public Task StreamDeleteAsync(RedisKey key, RedisValue[] messageIds, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamDeleteAsync(ToInner(key), messageIds, flags); - } - - public Task StreamDeleteConsumerAsync(RedisKey key, RedisValue groupName, RedisValue consumerName, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamDeleteConsumerAsync(ToInner(key), groupName, consumerName, flags); - } - - public Task StreamDeleteConsumerGroupAsync(RedisKey key, RedisValue groupName, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamDeleteConsumerGroupAsync(ToInner(key), groupName, flags); - } - - public Task StreamPendingAsync(RedisKey key, RedisValue groupName, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamPendingAsync(ToInner(key), groupName, flags); - } - - public Task StreamPendingMessagesAsync(RedisKey key, RedisValue groupName, int count, RedisValue consumerName, RedisValue? minId = null, RedisValue? maxId = null, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamPendingMessagesAsync(ToInner(key), groupName, count, consumerName, minId, maxId, flags); - } - - public Task StreamRangeAsync(RedisKey key, RedisValue? minId = null, RedisValue? maxId = null, int? count = null, Order messageOrder = Order.Ascending, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamRangeAsync(ToInner(key), minId, maxId, count, messageOrder, flags); - } - - public Task StreamReadAsync(RedisKey key, RedisValue position, int? count = null, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamReadAsync(ToInner(key), position, count, flags); - } - - public Task StreamReadAsync(StreamPosition[] streamPositions, int? countPerStream = null, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamReadAsync(streamPositions, countPerStream, flags); - } - - public Task StreamReadGroupAsync(RedisKey key, RedisValue groupName, RedisValue consumerName, RedisValue? position, int? count, CommandFlags flags) - { - return Inner.StreamReadGroupAsync(ToInner(key), groupName, consumerName, position, count, flags); - } - - public Task StreamReadGroupAsync(RedisKey key, RedisValue groupName, RedisValue consumerName, RedisValue? position = null, int? count = null, bool noAck = false, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamReadGroupAsync(ToInner(key), groupName, consumerName, position, count, noAck, flags); - } - - public Task StreamReadGroupAsync(StreamPosition[] streamPositions, RedisValue groupName, RedisValue consumerName, int? countPerStream, CommandFlags flags) - { - return Inner.StreamReadGroupAsync(streamPositions, groupName, consumerName, countPerStream, flags); - } - - public Task StreamReadGroupAsync(StreamPosition[] streamPositions, RedisValue groupName, RedisValue consumerName, int? countPerStream = null, bool noAck = false, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamReadGroupAsync(streamPositions, groupName, consumerName, countPerStream, noAck, flags); - } - - public Task StreamTrimAsync(RedisKey key, int maxLength, bool useApproximateMaxLength = false, CommandFlags flags = CommandFlags.None) - { - return Inner.StreamTrimAsync(ToInner(key), maxLength, useApproximateMaxLength, flags); - } - - public Task StringAppendAsync(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None) - { - return Inner.StringAppendAsync(ToInner(key), value, flags); - } - - public Task StringBitCountAsync(RedisKey key, long start = 0, long end = -1, CommandFlags flags = CommandFlags.None) - { - return Inner.StringBitCountAsync(ToInner(key), start, end, flags); - } - - public Task StringBitOperationAsync(Bitwise operation, RedisKey destination, RedisKey[] keys, CommandFlags flags = CommandFlags.None) - { - return Inner.StringBitOperationAsync(operation, ToInner(destination), ToInner(keys), flags); - } - - public Task StringBitOperationAsync(Bitwise operation, RedisKey destination, RedisKey first, RedisKey second = default(RedisKey), CommandFlags flags = CommandFlags.None) - { - return Inner.StringBitOperationAsync(operation, ToInner(destination), ToInner(first), ToInnerOrDefault(second), flags); - } - - public Task StringBitPositionAsync(RedisKey key, bool bit, long start = 0, long end = -1, CommandFlags flags = CommandFlags.None) - { - return Inner.StringBitPositionAsync(ToInner(key), bit, start, end, flags); - } - - public Task StringDecrementAsync(RedisKey key, double value, CommandFlags flags = CommandFlags.None) - { - return Inner.StringDecrementAsync(ToInner(key), value, flags); - } - - public Task StringDecrementAsync(RedisKey key, long value = 1, CommandFlags flags = CommandFlags.None) - { - return Inner.StringDecrementAsync(ToInner(key), value, flags); - } - - public Task StringGetAsync(RedisKey[] keys, CommandFlags flags = CommandFlags.None) - { - return Inner.StringGetAsync(ToInner(keys), flags); - } - - public Task StringGetAsync(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.StringGetAsync(ToInner(key), flags); - } - - public Task> StringGetLeaseAsync(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.StringGetLeaseAsync(ToInner(key), flags); - } - - public Task StringGetBitAsync(RedisKey key, long offset, CommandFlags flags = CommandFlags.None) - { - return Inner.StringGetBitAsync(ToInner(key), offset, flags); - } - - public Task StringGetRangeAsync(RedisKey key, long start, long end, CommandFlags flags = CommandFlags.None) - { - return Inner.StringGetRangeAsync(ToInner(key), start, end, flags); - } - - public Task StringGetSetAsync(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None) - { - return Inner.StringGetSetAsync(ToInner(key), value, flags); - } - - public Task StringGetWithExpiryAsync(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.StringGetWithExpiryAsync(ToInner(key), flags); - } - - public Task StringIncrementAsync(RedisKey key, double value, CommandFlags flags = CommandFlags.None) - { - return Inner.StringIncrementAsync(ToInner(key), value, flags); - } - - public Task StringIncrementAsync(RedisKey key, long value = 1, CommandFlags flags = CommandFlags.None) - { - return Inner.StringIncrementAsync(ToInner(key), value, flags); - } - - public Task StringLengthAsync(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.StringLengthAsync(ToInner(key), flags); - } - - public Task StringSetAsync(KeyValuePair[] values, When when = When.Always, CommandFlags flags = CommandFlags.None) - { - return Inner.StringSetAsync(ToInner(values), when, flags); - } - - public Task StringSetAsync(RedisKey key, RedisValue value, TimeSpan? expiry = null, When when = When.Always, CommandFlags flags = CommandFlags.None) - { - return Inner.StringSetAsync(ToInner(key), value, expiry, when, flags); - } - - public Task StringSetBitAsync(RedisKey key, long offset, bool bit, CommandFlags flags = CommandFlags.None) - { - return Inner.StringSetBitAsync(ToInner(key), offset, bit, flags); - } - - public Task StringSetRangeAsync(RedisKey key, long offset, RedisValue value, CommandFlags flags = CommandFlags.None) - { - return Inner.StringSetRangeAsync(ToInner(key), offset, value, flags); - } - - public Task PingAsync(CommandFlags flags = CommandFlags.None) - { - return Inner.PingAsync(flags); - } - - - public Task KeyTouchAsync(RedisKey[] keys, CommandFlags flags = CommandFlags.None) - { - return Inner.KeyTouchAsync(ToInner(keys), flags); - } - - public Task KeyTouchAsync(RedisKey key, CommandFlags flags = CommandFlags.None) - { - return Inner.KeyTouchAsync(ToInner(key), flags); - } - - public bool TryWait(Task task) - { - return Inner.TryWait(task); - } - - public TResult Wait(Task task) - { - return Inner.Wait(task); - } - - public void Wait(Task task) - { - Inner.Wait(task); - } - - public void WaitAll(params Task[] tasks) - { - Inner.WaitAll(tasks); - } - protected internal RedisKey ToInner(RedisKey outer) - { - return RedisKey.WithPrefix(Prefix, outer); - } - - protected RedisKey ToInnerOrDefault(RedisKey outer) - { - if (outer == default(RedisKey)) - { - return outer; - } - else - { - return ToInner(outer); - } - } - - protected ICollection ToInner(ICollection args) - { - if (args?.Any(x => x is RedisKey || x is RedisChannel) == true) - { - var withPrefix = new object[args.Count]; - int i = 0; - foreach (var oldArg in args) - { - object newArg; - if (oldArg is RedisKey key) - { - newArg = ToInner(key); - } - else if (oldArg is RedisChannel channel) - { - newArg = ToInner(channel); - } - else - { - newArg = oldArg; - } - withPrefix[i++] = newArg; - } - args = withPrefix; - } - return args; - } - - protected RedisKey[] ToInner(RedisKey[] outer) - { - if (outer == null || outer.Length == 0) - { - return outer; - } - else - { - RedisKey[] inner = new RedisKey[outer.Length]; - - for (int i = 0; i < outer.Length; ++i) - { - inner[i] = ToInner(outer[i]); - } - - return inner; - } - } - - protected KeyValuePair ToInner(KeyValuePair outer) - { - return new KeyValuePair(ToInner(outer.Key), outer.Value); - } - - protected KeyValuePair[] ToInner(KeyValuePair[] outer) - { - if (outer == null || outer.Length == 0) - { - return outer; - } - else - { - KeyValuePair[] inner = new KeyValuePair[outer.Length]; - - for (int i = 0; i < outer.Length; ++i) - { - inner[i] = ToInner(outer[i]); - } - - return inner; - } - } - - protected RedisValue ToInner(RedisValue outer) - { - return RedisKey.ConcatenateBytes(Prefix, null, (byte[])outer); - } - - protected RedisValue SortByToInner(RedisValue outer) - { - if (outer == "nosort") - { - return outer; - } - else - { - return ToInner(outer); - } - } - - protected RedisValue SortGetToInner(RedisValue outer) - { - if (outer == "#") - { - return outer; - } - else - { - return ToInner(outer); - } - } - - protected RedisValue[] SortGetToInner(RedisValue[] outer) - { - if (outer == null || outer.Length == 0) - { - return outer; - } - else - { - RedisValue[] inner = new RedisValue[outer.Length]; - - for (int i = 0; i < outer.Length; ++i) - { - inner[i] = SortGetToInner(outer[i]); - } - - return inner; - } - } - - protected RedisChannel ToInner(RedisChannel outer) - { - return RedisKey.ConcatenateBytes(Prefix, null, (byte[])outer); - } - - private Func mapFunction; - protected Func GetMapFunction() - { - // create as a delegate when first required, then re-use - return mapFunction ?? (mapFunction = new Func(ToInner)); - } - } -} diff --git a/src/StackExchange.Redis/Lease.cs b/src/StackExchange.Redis/Lease.cs index f3f93625a..a5a88e4eb 100644 --- a/src/StackExchange.Redis/Lease.cs +++ b/src/StackExchange.Redis/Lease.cs @@ -6,28 +6,33 @@ namespace StackExchange.Redis { /// - /// A sized region of contiguous memory backed by a memory pool; disposing the lease returns the memory to the pool + /// A sized region of contiguous memory backed by a memory pool; disposing the lease returns the memory to the pool. /// - /// The type of data being leased + /// The type of data being leased. public sealed class Lease : IMemoryOwner { /// - /// A lease of length zero + /// A lease of length zero. /// public static Lease Empty { get; } = new Lease(System.Array.Empty(), 0); - private T[] _arr; + private T[]? _arr; /// - /// The length of the lease + /// Gets whether this lease is empty. + /// + public bool IsEmpty => Length == 0; + + /// + /// The length of the lease. /// public int Length { get; } /// - /// Create a new lease + /// Create a new lease. /// - /// The size required - /// Whether to erase the memory + /// The size required. + /// Whether to erase the memory. public static Lease Create(int length, bool clear = true) { if (length == 0) return Empty; @@ -43,7 +48,7 @@ private Lease(T[] arr, int length) } /// - /// Release all resources owned by the lease + /// Release all resources owned by the lease. /// public void Dispose() { @@ -53,8 +58,10 @@ public void Dispose() if (arr != null) ArrayPool.Shared.Return(arr); } } + [MethodImpl(MethodImplOptions.NoInlining)] private static T[] ThrowDisposed() => throw new ObjectDisposedException(nameof(Lease)); + private T[] Array { [MethodImpl(MethodImplOptions.AggressiveInlining)] @@ -62,17 +69,17 @@ private T[] Array } /// - /// The data as a Memory + /// The data as a . /// public Memory Memory => new Memory(Array, 0, Length); /// - /// The data as a Span + /// The data as a . /// public Span Span => new Span(Array, 0, Length); /// - /// The data as an ArraySegment + /// The data as an . /// public ArraySegment ArraySegment => new ArraySegment(Array, 0, Length); } diff --git a/src/StackExchange.Redis/LinearRetry.cs b/src/StackExchange.Redis/LinearRetry.cs index 872654c92..ddd269eb3 100644 --- a/src/StackExchange.Redis/LinearRetry.cs +++ b/src/StackExchange.Redis/LinearRetry.cs @@ -1,7 +1,7 @@ -namespace StackExchange.Redis +namespace StackExchange.Redis { /// - /// Represents a retry policy that performs retries at a fixed interval. The retries are performed upto a maximum allowed time. + /// Represents a retry policy that performs retries at a fixed interval. The retries are performed up to a maximum allowed time. /// public class LinearRetry : IReconnectRetryPolicy { @@ -10,20 +10,16 @@ public class LinearRetry : IReconnectRetryPolicy /// /// Initializes a new instance using the specified maximum retry elapsed time allowed. /// - /// maximum elapsed time in milliseconds to be allowed for it to perform retries - public LinearRetry(int maxRetryElapsedTimeAllowedMilliseconds) - { + /// maximum elapsed time in milliseconds to be allowed for it to perform retries. + public LinearRetry(int maxRetryElapsedTimeAllowedMilliseconds) => this.maxRetryElapsedTimeAllowedMilliseconds = maxRetryElapsedTimeAllowedMilliseconds; - } /// /// This method is called by the ConnectionMultiplexer to determine if a reconnect operation can be retried now. /// - /// The number of times reconnect retries have already been made by the ConnectionMultiplexer while it was in the connecting state - /// Total elapsed time in milliseconds since the last reconnect retry was made - public bool ShouldRetry(long currentRetryCount, int timeElapsedMillisecondsSinceLastRetry) - { - return timeElapsedMillisecondsSinceLastRetry >= maxRetryElapsedTimeAllowedMilliseconds; - } + /// The number of times reconnect retries have already been made by the ConnectionMultiplexer while it was in the connecting state. + /// Total elapsed time in milliseconds since the last reconnect retry was made. + public bool ShouldRetry(long currentRetryCount, int timeElapsedMillisecondsSinceLastRetry) => + timeElapsedMillisecondsSinceLastRetry >= maxRetryElapsedTimeAllowedMilliseconds; } -} \ No newline at end of file +} diff --git a/src/StackExchange.Redis/LoggerExtensions.cs b/src/StackExchange.Redis/LoggerExtensions.cs new file mode 100644 index 000000000..4a43514e4 --- /dev/null +++ b/src/StackExchange.Redis/LoggerExtensions.cs @@ -0,0 +1,712 @@ +using System; +using System.Net; +using System.Text; +using System.Threading.Tasks; +using Microsoft.Extensions.Logging; + +namespace StackExchange.Redis; + +internal static partial class LoggerExtensions +{ + // Helper structs for complex ToString() calls + internal readonly struct EndPointLogValue(EndPoint? endpoint) + { + public override string ToString() => Format.ToString(endpoint); + } + + internal readonly struct ServerEndPointLogValue(ServerEndPoint server) + { + public override string ToString() => Format.ToString(server); + } + + internal readonly struct ConfigurationOptionsLogValue(ConfigurationOptions options) + { + public override string ToString() => options.ToString(includePassword: false); + } + + // manual extensions + internal static void LogWithThreadPoolStats(this ILogger? log, string message) + { + if (log is null || !log.IsEnabled(LogLevel.Information)) + { + return; + } + + _ = PerfCounterHelper.GetThreadPoolStats(out string iocp, out string worker, out string? workItems); + +#if NET + // use DISH when possible + // similar to: var composed = $"{message}, IOCP: {iocp}, WORKER: {worker}, ..."; on net6+ + var dish = new System.Runtime.CompilerServices.DefaultInterpolatedStringHandler(26, 4); + dish.AppendFormatted(message); + dish.AppendLiteral(", IOCP: "); + dish.AppendFormatted(iocp); + dish.AppendLiteral(", WORKER: "); + dish.AppendFormatted(worker); + if (workItems is not null) + { + dish.AppendLiteral(", POOL: "); + dish.AppendFormatted(workItems); + } + var composed = dish.ToStringAndClear(); +#else + var sb = new StringBuilder(); + sb.Append(message).Append(", IOCP: ").Append(iocp).Append(", WORKER: ").Append(worker); + if (workItems is not null) + { + sb.Append(", POOL: ").Append(workItems); + } + var composed = sb.ToString(); +#endif + log.LogInformationThreadPoolStats(composed); + } + + // Generated LoggerMessage methods + [LoggerMessage( + Level = LogLevel.Error, + Message = "Connection failed: {EndPoint} ({ConnectionType}, {FailureType}): {ErrorMessage}")] + internal static partial void LogErrorConnectionFailed(this ILogger logger, Exception? exception, EndPointLogValue endPoint, ConnectionType connectionType, ConnectionFailureType failureType, string errorMessage); + + [LoggerMessage( + Level = LogLevel.Error, + EventId = 1, + Message = "> {Message}")] + internal static partial void LogErrorInnerException(this ILogger logger, Exception exception, string message); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 2, + Message = "Checking {EndPoint} is available...")] + internal static partial void LogInformationCheckingServerAvailable(this ILogger logger, EndPointLogValue endPoint); + + [LoggerMessage( + Level = LogLevel.Error, + EventId = 3, + Message = "Operation failed on {EndPoint}, aborting: {ErrorMessage}")] + internal static partial void LogErrorOperationFailedOnServer(this ILogger logger, Exception exception, EndPointLogValue endPoint, string errorMessage); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 4, + Message = "Attempting to set tie-breaker on {EndPoint}...")] + internal static partial void LogInformationAttemptingToSetTieBreaker(this ILogger logger, EndPointLogValue endPoint); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 5, + Message = "Making {EndPoint} a primary...")] + internal static partial void LogInformationMakingServerPrimary(this ILogger logger, EndPointLogValue endPoint); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 6, + Message = "Resending tie-breaker to {EndPoint}...")] + internal static partial void LogInformationResendingTieBreaker(this ILogger logger, EndPointLogValue endPoint); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 7, + Message = "Broadcasting via {EndPoint}...")] + internal static partial void LogInformationBroadcastingViaNode(this ILogger logger, EndPointLogValue endPoint); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 8, + Message = "Replicating to {EndPoint}...")] + internal static partial void LogInformationReplicatingToNode(this ILogger logger, EndPointLogValue endPoint); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 9, + Message = "Reconfiguring all endpoints...")] + internal static partial void LogInformationReconfiguringAllEndpoints(this ILogger logger); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 10, + Message = "Verifying the configuration was incomplete; please verify")] + internal static partial void LogInformationVerifyingConfigurationIncomplete(this ILogger logger); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 11, + Message = "Connecting (async) on {Framework} (StackExchange.Redis: v{Version})")] + internal static partial void LogInformationConnectingAsync(this ILogger logger, string framework, string version); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 12, + Message = "Connecting (sync) on {Framework} (StackExchange.Redis: v{Version})")] + internal static partial void LogInformationConnectingSync(this ILogger logger, string framework, string version); + + [LoggerMessage( + Level = LogLevel.Error, + EventId = 13, + Message = "{ErrorMessage}")] + internal static partial void LogErrorSyncConnectTimeout(this ILogger logger, Exception exception, string errorMessage); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 14, + Message = "{Message}")] + internal static partial void LogInformationAfterConnect(this ILogger logger, string message); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 15, + Message = "Total connect time: {ElapsedMs:n0} ms")] + internal static partial void LogInformationTotalConnectTime(this ILogger logger, long elapsedMs); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 16, + Message = "No tasks to await")] + internal static partial void LogInformationNoTasksToAwait(this ILogger logger); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 17, + Message = "All tasks are already complete")] + internal static partial void LogInformationAllTasksComplete(this ILogger logger); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 18, + Message = "{Message}", + SkipEnabledCheck = true)] + internal static partial void LogInformationThreadPoolStats(this ILogger logger, string message); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 19, + Message = "Reconfiguration was already in progress due to: {ActiveCause}, attempted to run for: {NewCause}")] + internal static partial void LogInformationReconfigurationInProgress(this ILogger logger, string? activeCause, string newCause); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 20, + Message = "{Configuration}")] + internal static partial void LogInformationConfiguration(this ILogger logger, ConfigurationOptionsLogValue configuration); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 21, + Message = "{Count} unique nodes specified ({TieBreakerStatus} tiebreaker)")] + internal static partial void LogInformationUniqueNodesSpecified(this ILogger logger, int count, string tieBreakerStatus); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 22, + Message = "Allowing {Count} endpoint(s) {TimeSpan} to respond...")] + internal static partial void LogInformationAllowingEndpointsToRespond(this ILogger logger, int count, TimeSpan timeSpan); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 23, + Message = " Server[{Index}] ({Server}) Status: {Status} (inst: {MessagesSinceLastHeartbeat}, qs: {MessagesSentAwaitingResponse}, in: {BytesAvailableOnSocket}, qu: {MessagesSinceLastHeartbeat2}, aw: {IsWriterActive}, in-pipe: {BytesInReadPipe}, out-pipe: {BytesInWritePipe}, bw: {BacklogStatus}, rs: {ReadStatus}. ws: {WriteStatus})")] + internal static partial void LogInformationServerStatus(this ILogger logger, int index, ServerEndPointLogValue server, TaskStatus status, long messagesSinceLastHeartbeat, long messagesSentAwaitingResponse, long bytesAvailableOnSocket, long messagesSinceLastHeartbeat2, bool isWriterActive, long bytesInReadPipe, long bytesInWritePipe, PhysicalBridge.BacklogStatus backlogStatus, PhysicalConnection.ReadStatus readStatus, PhysicalConnection.WriteStatus writeStatus); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 24, + Message = "Endpoint summary:")] + internal static partial void LogInformationEndpointSummary(this ILogger logger); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 25, + Message = " {EndPoint}: Endpoint is (Interactive: {InteractiveState}, Subscription: {SubscriptionState})")] + internal static partial void LogInformationEndpointState(this ILogger logger, EndPointLogValue endPoint, PhysicalBridge.State interactiveState, PhysicalBridge.State subscriptionState); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 26, + Message = "Task summary:")] + internal static partial void LogInformationTaskSummary(this ILogger logger); + + [LoggerMessage( + Level = LogLevel.Error, + EventId = 27, + Message = " {Server}: Faulted: {ErrorMessage}")] + internal static partial void LogErrorServerFaulted(this ILogger logger, Exception exception, ServerEndPointLogValue server, string errorMessage); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 28, + Message = " {Server}: Connect task canceled")] + internal static partial void LogInformationConnectTaskCanceled(this ILogger logger, ServerEndPointLogValue server); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 29, + Message = " {Server}: Returned with success as {ServerType} {Role} (Source: {Source})")] + internal static partial void LogInformationServerReturnedSuccess(this ILogger logger, ServerEndPointLogValue server, ServerType serverType, string role, string source); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 30, + Message = " {Server}: Returned, but incorrectly")] + internal static partial void LogInformationServerReturnedIncorrectly(this ILogger logger, ServerEndPointLogValue server); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 31, + Message = " {Server}: Did not respond (Task.Status: {TaskStatus})")] + internal static partial void LogInformationServerDidNotRespond(this ILogger logger, ServerEndPointLogValue server, TaskStatus taskStatus); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 32, + Message = "{EndPoint}: Clearing as RedundantPrimary")] + internal static partial void LogInformationClearingAsRedundantPrimary(this ILogger logger, ServerEndPointLogValue endPoint); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 33, + Message = "{EndPoint}: Setting as RedundantPrimary")] + internal static partial void LogInformationSettingAsRedundantPrimary(this ILogger logger, ServerEndPointLogValue endPoint); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 34, + Message = "Cluster: {CoveredSlots} of {TotalSlots} slots covered")] + internal static partial void LogInformationClusterSlotsCovered(this ILogger logger, long coveredSlots, int totalSlots); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 35, + Message = "No subscription changes necessary")] + internal static partial void LogInformationNoSubscriptionChanges(this ILogger logger); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 36, + Message = "Subscriptions attempting reconnect: {SubscriptionChanges}")] + internal static partial void LogInformationSubscriptionsAttemptingReconnect(this ILogger logger, long subscriptionChanges); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 37, + Message = "{StormLog}")] + internal static partial void LogInformationStormLog(this ILogger logger, string stormLog); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 38, + Message = "Resetting failing connections to retry...")] + internal static partial void LogInformationResettingFailingConnections(this ILogger logger); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 39, + Message = " Retrying - attempts left: {AttemptsLeft}...")] + internal static partial void LogInformationRetryingAttempts(this ILogger logger, int attemptsLeft); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 40, + Message = "Starting heartbeat...")] + internal static partial void LogInformationStartingHeartbeat(this ILogger logger); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 41, + Message = "Broadcasting reconfigure...")] + internal static partial void LogInformationBroadcastingReconfigure(this ILogger logger); + + [LoggerMessage( + Level = LogLevel.Error, + EventId = 42, + Message = "Encountered error while updating cluster config: {ErrorMessage}")] + internal static partial void LogErrorEncounteredErrorWhileUpdatingClusterConfig(this ILogger logger, Exception exception, string errorMessage); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 43, + Message = "Election summary:")] + internal static partial void LogInformationElectionSummary(this ILogger logger); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 44, + Message = " Election: {Server} had no tiebreaker set")] + internal static partial void LogInformationElectionNoTiebreaker(this ILogger logger, ServerEndPointLogValue server); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 45, + Message = " Election: {Server} nominates: {ServerResult}")] + internal static partial void LogInformationElectionNominates(this ILogger logger, ServerEndPointLogValue server, string serverResult); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 46, + Message = " Election: No primaries detected")] + internal static partial void LogInformationElectionNoPrimariesDetected(this ILogger logger); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 47, + Message = " Election: Single primary detected: {EndPoint}")] + internal static partial void LogInformationElectionSinglePrimaryDetected(this ILogger logger, EndPointLogValue endPoint); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 48, + Message = " Election: Multiple primaries detected...")] + internal static partial void LogInformationElectionMultiplePrimariesDetected(this ILogger logger); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 49, + Message = " Election: No nominations by tie-breaker")] + internal static partial void LogInformationElectionNoNominationsByTieBreaker(this ILogger logger); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 50, + Message = " Election: Tie-breaker unanimous: {Unanimous}")] + internal static partial void LogInformationElectionTieBreakerUnanimous(this ILogger logger, string unanimous); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 51, + Message = " Election: Elected: {EndPoint}")] + internal static partial void LogInformationElectionElected(this ILogger logger, EndPointLogValue endPoint); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 52, + Message = " Election is contested:")] + internal static partial void LogInformationElectionContested(this ILogger logger); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 53, + Message = " Election: {Key} has {Value} votes")] + internal static partial void LogInformationElectionVotes(this ILogger logger, string key, int value); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 54, + Message = " Election: Choosing primary arbitrarily: {EndPoint}")] + internal static partial void LogInformationElectionChoosingPrimaryArbitrarily(this ILogger logger, EndPointLogValue endPoint); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 55, + Message = "...but we couldn't find that")] + internal static partial void LogInformationCouldNotFindThatEndpoint(this ILogger logger); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 56, + Message = "...but we did find instead: {DeDottedEndpoint}")] + internal static partial void LogInformationFoundAlternativeEndpoint(this ILogger logger, string deDottedEndpoint); + + // ServerEndPoint logging methods + [LoggerMessage( + Level = LogLevel.Information, + EventId = 57, + Message = "{Server}: OnConnectedAsync already connected start")] + internal static partial void LogInformationOnConnectedAsyncAlreadyConnectedStart(this ILogger logger, ServerEndPointLogValue server); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 58, + Message = "{Server}: OnConnectedAsync already connected end")] + internal static partial void LogInformationOnConnectedAsyncAlreadyConnectedEnd(this ILogger logger, ServerEndPointLogValue server); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 59, + Message = "{Server}: OnConnectedAsync init (State={ConnectionState})")] + internal static partial void LogInformationOnConnectedAsyncInit(this ILogger logger, ServerEndPointLogValue server, PhysicalBridge.State? connectionState); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 60, + Message = "{Server}: OnConnectedAsync completed ({Result})")] + internal static partial void LogInformationOnConnectedAsyncCompleted(this ILogger logger, ServerEndPointLogValue server, string result); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 61, + Message = "{Server}: Auto-configuring...")] + internal static partial void LogInformationAutoConfiguring(this ILogger logger, ServerEndPointLogValue server); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 62, + Message = "{EndPoint}: Requesting tie-break (Key=\"{TieBreakerKey}\")...")] + internal static partial void LogInformationRequestingTieBreak(this ILogger logger, EndPointLogValue endPoint, RedisKey tieBreakerKey); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 63, + Message = "{Server}: Server handshake")] + internal static partial void LogInformationServerHandshake(this ILogger logger, ServerEndPointLogValue server); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 64, + Message = "{Server}: Authenticating via HELLO")] + internal static partial void LogInformationAuthenticatingViaHello(this ILogger logger, ServerEndPointLogValue server); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 65, + Message = "{Server}: Authenticating (user/password)")] + internal static partial void LogInformationAuthenticatingUserPassword(this ILogger logger, ServerEndPointLogValue server); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 66, + Message = "{Server}: Authenticating (password)")] + internal static partial void LogInformationAuthenticatingPassword(this ILogger logger, ServerEndPointLogValue server); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 67, + Message = "{Server}: Setting client name: {ClientName}")] + internal static partial void LogInformationSettingClientName(this ILogger logger, ServerEndPointLogValue server, string clientName); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 68, + Message = "{Server}: Setting client lib/ver")] + internal static partial void LogInformationSettingClientLibVer(this ILogger logger, ServerEndPointLogValue server); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 69, + Message = "{Server}: Sending critical tracer (handshake): {CommandAndKey}")] + internal static partial void LogInformationSendingCriticalTracer(this ILogger logger, ServerEndPointLogValue server, string commandAndKey); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 70, + Message = "{Server}: Flushing outbound buffer")] + internal static partial void LogInformationFlushingOutboundBuffer(this ILogger logger, ServerEndPointLogValue server); + + // ResultProcessor logging methods + [LoggerMessage( + Level = LogLevel.Information, + EventId = 71, + Message = "Response from {BridgeName} / {CommandAndKey}: {Result}")] + internal static partial void LogInformationResponse(this ILogger logger, string? bridgeName, string commandAndKey, RawResult result); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 72, + Message = "{Server}: Auto-configured role: replica")] + internal static partial void LogInformationAutoConfiguredRoleReplica(this ILogger logger, ServerEndPointLogValue server); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 73, + Message = "{Server}: Auto-configured (CLIENT) connection-id: {ConnectionId}")] + internal static partial void LogInformationAutoConfiguredClientConnectionId(this ILogger logger, ServerEndPointLogValue server, long connectionId); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 74, + Message = "{Server}: Auto-configured (INFO) role: {Role}")] + internal static partial void LogInformationAutoConfiguredInfoRole(this ILogger logger, ServerEndPointLogValue server, string role); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 75, + Message = "{Server}: Auto-configured (INFO) version: {Version}")] + internal static partial void LogInformationAutoConfiguredInfoVersion(this ILogger logger, ServerEndPointLogValue server, Version version); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 76, + Message = "{Server}: Auto-configured (INFO) server-type: {ServerType}")] + internal static partial void LogInformationAutoConfiguredInfoServerType(this ILogger logger, ServerEndPointLogValue server, ServerType serverType); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 77, + Message = "{Server}: Auto-configured (SENTINEL) server-type: sentinel")] + internal static partial void LogInformationAutoConfiguredSentinelServerType(this ILogger logger, ServerEndPointLogValue server); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 78, + Message = "{Server}: Auto-configured (CONFIG) timeout: {TimeoutSeconds}s")] + internal static partial void LogInformationAutoConfiguredConfigTimeout(this ILogger logger, ServerEndPointLogValue server, int timeoutSeconds); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 79, + Message = "{Server}: Auto-configured (CONFIG) databases: {DatabaseCount}")] + internal static partial void LogInformationAutoConfiguredConfigDatabases(this ILogger logger, ServerEndPointLogValue server, int databaseCount); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 81, + Message = "{Server}: Auto-configured (CONFIG) read-only replica: {ReadOnlyReplica}")] + internal static partial void LogInformationAutoConfiguredConfigReadOnlyReplica(this ILogger logger, ServerEndPointLogValue server, bool readOnlyReplica); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 82, + Message = "{Server}: Auto-configured (HELLO) server-version: {Version}")] + internal static partial void LogInformationAutoConfiguredHelloServerVersion(this ILogger logger, ServerEndPointLogValue server, Version version); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 83, + Message = "{Server}: Auto-configured (HELLO) protocol: {Protocol}")] + internal static partial void LogInformationAutoConfiguredHelloProtocol(this ILogger logger, ServerEndPointLogValue server, RedisProtocol protocol); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 84, + Message = "{Server}: Auto-configured (HELLO) connection-id: {ConnectionId}")] + internal static partial void LogInformationAutoConfiguredHelloConnectionId(this ILogger logger, ServerEndPointLogValue server, long connectionId); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 85, + Message = "{Server}: Auto-configured (HELLO) server-type: {ServerType}")] + internal static partial void LogInformationAutoConfiguredHelloServerType(this ILogger logger, ServerEndPointLogValue server, ServerType serverType); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 86, + Message = "{Server}: Auto-configured (HELLO) role: {Role}")] + internal static partial void LogInformationAutoConfiguredHelloRole(this ILogger logger, ServerEndPointLogValue server, string role); + + // PhysicalBridge logging methods + [LoggerMessage( + Level = LogLevel.Information, + EventId = 87, + Message = "{EndPoint}: OnEstablishingAsync complete")] + internal static partial void LogInformationOnEstablishingComplete(this ILogger logger, EndPointLogValue endPoint); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 88, + Message = "{ErrorMessage}")] + internal static partial void LogInformationConnectionFailureRequested(this ILogger logger, Exception exception, string errorMessage); + + [LoggerMessage( + Level = LogLevel.Error, + EventId = 89, + Message = "{ErrorMessage}")] + internal static partial void LogErrorConnectionIssue(this ILogger logger, Exception exception, string errorMessage); + + [LoggerMessage( + Level = LogLevel.Warning, + EventId = 90, + Message = "Dead socket detected, no reads in {LastReadSecondsAgo} seconds with {TimeoutCount} timeouts, issuing disconnect")] + internal static partial void LogWarningDeadSocketDetected(this ILogger logger, long lastReadSecondsAgo, long timeoutCount); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 91, + Message = "Resurrecting {Bridge} (retry: {RetryCount})")] + internal static partial void LogInformationResurrecting(this ILogger logger, PhysicalBridge bridge, long retryCount); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 92, + Message = "{BridgeName}: Connecting...")] + internal static partial void LogInformationConnecting(this ILogger logger, string bridgeName); + + [LoggerMessage( + Level = LogLevel.Error, + EventId = 93, + Message = "{BridgeName}: Connect failed: {ErrorMessage}")] + internal static partial void LogErrorConnectFailed(this ILogger logger, Exception exception, string bridgeName, string errorMessage); + + // PhysicalConnection logging methods + [LoggerMessage( + Level = LogLevel.Error, + EventId = 94, + Message = "No endpoint")] + internal static partial void LogErrorNoEndpoint(this ILogger logger, Exception exception); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 95, + Message = "{EndPoint}: BeginConnectAsync")] + internal static partial void LogInformationBeginConnectAsync(this ILogger logger, EndPointLogValue endPoint); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 96, + Message = "{EndPoint}: Starting read")] + internal static partial void LogInformationStartingRead(this ILogger logger, EndPointLogValue endPoint); + + [LoggerMessage( + Level = LogLevel.Error, + EventId = 97, + Message = "{EndPoint}: (socket shutdown)")] + internal static partial void LogErrorSocketShutdown(this ILogger logger, Exception exception, EndPointLogValue endPoint); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 98, + Message = "Configuring TLS")] + internal static partial void LogInformationConfiguringTLS(this ILogger logger); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 99, + Message = "TLS connection established successfully using protocol: {SslProtocol}")] + internal static partial void LogInformationTLSConnectionEstablished(this ILogger logger, System.Security.Authentication.SslProtocols sslProtocol); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 100, + Message = "{BridgeName}: Connected")] + internal static partial void LogInformationConnected(this ILogger logger, string bridgeName); + + // ConnectionMultiplexer GetStatus logging methods + [LoggerMessage( + Level = LogLevel.Information, + EventId = 101, + Message = "Endpoint Summary:")] + internal static partial void LogInformationEndpointSummaryHeader(this ILogger logger); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 102, + Message = "Server summary: {ServerSummary}, counters: {ServerCounters}, profile: {ServerProfile}")] + internal static partial void LogInformationServerSummary(this ILogger logger, string serverSummary, ServerCounters serverCounters, string serverProfile); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 105, + Message = "Sync timeouts: {SyncTimeouts}; async timeouts: {AsyncTimeouts}; fire and forget: {FireAndForgets}; last heartbeat: {LastHeartbeatSecondsAgo}s ago")] + internal static partial void LogInformationTimeoutsSummary(this ILogger logger, long syncTimeouts, long asyncTimeouts, long fireAndForgets, long lastHeartbeatSecondsAgo); + + // EndPointCollection logging methods + [LoggerMessage( + Level = LogLevel.Information, + EventId = 106, + Message = "Using DNS to resolve '{DnsHost}'...")] + internal static partial void LogInformationUsingDnsToResolve(this ILogger logger, string dnsHost); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 107, + Message = "'{DnsHost}' => {IpAddress}")] + internal static partial void LogInformationDnsResolutionResult(this ILogger logger, string dnsHost, IPAddress ipAddress); + + [LoggerMessage( + Level = LogLevel.Error, + EventId = 108, + Message = "{ErrorMessage}")] + internal static partial void LogErrorDnsResolution(this ILogger logger, Exception exception, string errorMessage); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = 109, + Message = "Service name not defined.")] + internal static partial void LogInformationServiceNameNotDefined(this ILogger logger); +} diff --git a/src/StackExchange.Redis/LoggingPipe.cs b/src/StackExchange.Redis/LoggingPipe.cs index 9adef7768..3c89110ae 100644 --- a/src/StackExchange.Redis/LoggingPipe.cs +++ b/src/StackExchange.Redis/LoggingPipe.cs @@ -1,10 +1,4 @@ -using System; -using System.Buffers; -using System.IO; -using System.IO.Pipelines; -using System.Runtime.InteropServices; - -namespace StackExchange.Redis +namespace StackExchange.Redis { #if LOGOUTPUT sealed class LoggingPipe : IDuplexPipe @@ -54,7 +48,7 @@ private async Task CloneAsync(string path, PipeReader from, PipeWriter to) while(true) { - var result = await from.ReadAsync(); + var result = await from.ReadAsync().ForAwait(); var buffer = result.Buffer; if (result.IsCompleted && buffer.IsEmpty) break; diff --git a/src/StackExchange.Redis/LuaScript.cs b/src/StackExchange.Redis/LuaScript.cs index a88340b95..7a99f635a 100644 --- a/src/StackExchange.Redis/LuaScript.cs +++ b/src/StackExchange.Redis/LuaScript.cs @@ -1,6 +1,7 @@ using System; using System.Collections; using System.Collections.Concurrent; +using System.ComponentModel; using System.Threading.Tasks; namespace StackExchange.Redis @@ -12,16 +13,18 @@ namespace StackExchange.Redis /// Public fields and properties of the passed in object are treated as parameters. /// /// - /// Parameters of type RedisKey are sent to Redis as KEY (https://redis.io/commands/eval) in addition to arguments, + /// Parameters of type RedisKey are sent to Redis as KEY (https://redis.io/commands/eval) in addition to arguments, /// so as to play nicely with Redis Cluster. /// /// All members of this class are thread safe. /// public sealed class LuaScript { - // Since the mapping of "script text" -> LuaScript doesn't depend on any particular details of - // the redis connection itself, this cache is global. - private static readonly ConcurrentDictionary Cache = new ConcurrentDictionary(); + /// + /// Since the mapping of "script text" -> LuaScript doesn't depend on any particular details of + /// the redis connection itself, this cache is global. + /// + private static readonly ConcurrentDictionary Cache = new(); /// /// The original Lua script that was used to create this. @@ -34,12 +37,14 @@ public sealed class LuaScript /// public string ExecutableScript { get; } - // Arguments are in the order they have to passed to the script in + /// + /// Arguments are in the order they have to passed to the script in. + /// internal string[] Arguments { get; } private bool HasArguments => Arguments?.Length > 0; - private readonly Hashtable ParameterMappers; + private readonly Hashtable? ParameterMappers; internal LuaScript(string originalScript, string executableScript, string[] arguments) { @@ -54,8 +59,7 @@ internal LuaScript(string originalScript, string executableScript, string[] argu } /// - /// Finalizer, used to prompt cleanups of the script cache when - /// a LuaScript reference goes out of scope. + /// Finalizer - used to prompt cleanups of the script cache when a LuaScript reference goes out of scope. /// ~LuaScript() { @@ -84,9 +88,7 @@ internal LuaScript(string originalScript, string executableScript, string[] argu /// The script to prepare. public static LuaScript Prepare(string script) { - LuaScript ret; - - if (!Cache.TryGetValue(script, out WeakReference weakRef) || (ret = (LuaScript)weakRef.Target) == null) + if (!Cache.TryGetValue(script, out WeakReference? weakRef) || weakRef.Target is not LuaScript ret) { ret = ScriptParameterMapper.PrepareScript(script); Cache[script] = new WeakReference(ret); @@ -95,22 +97,22 @@ public static LuaScript Prepare(string script) return ret; } - internal void ExtractParameters(object ps, RedisKey? keyPrefix, out RedisKey[] keys, out RedisValue[] args) + internal void ExtractParameters(object? ps, RedisKey? keyPrefix, out RedisKey[]? keys, out RedisValue[]? args) { if (HasArguments) { if (ps == null) throw new ArgumentNullException(nameof(ps), "Script requires parameters"); var psType = ps.GetType(); - var mapper = (Func)ParameterMappers[psType]; + var mapper = (Func?)ParameterMappers![psType]; if (mapper == null) { lock (ParameterMappers) { - mapper = (Func)ParameterMappers[psType]; + mapper = (Func?)ParameterMappers[psType]; if (mapper == null) { - if (!ScriptParameterMapper.IsValidParameterHash(psType, this, out string missingMember, out string badMemberType)) + if (!ScriptParameterMapper.IsValidParameterHash(psType, this, out string? missingMember, out string? badMemberType)) { if (missingMember != null) { @@ -143,10 +145,10 @@ internal void ExtractParameters(object ps, RedisKey? keyPrefix, out RedisKey[] k /// The parameter object to use. /// The key prefix to use, if any. /// The command flags to use. - public RedisResult Evaluate(IDatabase db, object ps = null, RedisKey? withKeyPrefix = null, CommandFlags flags = CommandFlags.None) + public RedisResult Evaluate(IDatabase db, object? ps = null, RedisKey? withKeyPrefix = null, CommandFlags flags = CommandFlags.None) { - ExtractParameters(ps, withKeyPrefix, out RedisKey[] keys, out RedisValue[] args); - return db.ScriptEvaluate(ExecutableScript, keys, args, flags); + ExtractParameters(ps, withKeyPrefix, out RedisKey[]? keys, out RedisValue[]? args); + return db.ScriptEvaluate(script: ExecutableScript, keys: keys, values: args, flags: flags); } /// @@ -156,18 +158,18 @@ public RedisResult Evaluate(IDatabase db, object ps = null, RedisKey? withKeyPre /// The parameter object to use. /// The key prefix to use, if any. /// The command flags to use. - public Task EvaluateAsync(IDatabaseAsync db, object ps = null, RedisKey? withKeyPrefix = null, CommandFlags flags = CommandFlags.None) + public Task EvaluateAsync(IDatabaseAsync db, object? ps = null, RedisKey? withKeyPrefix = null, CommandFlags flags = CommandFlags.None) { - ExtractParameters(ps, withKeyPrefix, out RedisKey[] keys, out RedisValue[] args); - return db.ScriptEvaluateAsync(ExecutableScript, keys, args, flags); + ExtractParameters(ps, withKeyPrefix, out RedisKey[]? keys, out RedisValue[]? args); + return db.ScriptEvaluateAsync(script: ExecutableScript, keys: keys, values: args, flags: flags); } /// /// /// Loads this LuaScript into the given IServer so it can be run with it's SHA1 hash, instead of - /// passing the full script on each Evaluate or EvaluateAsync call. + /// using the implicit SHA1 hash that's calculated after the script is sent to the server for the first time. /// - /// Note: the FireAndForget command flag cannot be set + /// Note: the FireAndForget command flag cannot be set. /// /// The server to load the script on. /// The command flags to use. @@ -179,15 +181,15 @@ public LoadedLuaScript Load(IServer server, CommandFlags flags = CommandFlags.No } var hash = server.ScriptLoad(ExecutableScript, flags); - return new LoadedLuaScript(this, hash); + return new LoadedLuaScript(this, hash!); // not nullable because fire and forget is disabled } /// /// /// Loads this LuaScript into the given IServer so it can be run with it's SHA1 hash, instead of - /// passing the full script on each Evaluate or EvaluateAsync call. + /// using the implicit SHA1 hash that's calculated after the script is sent to the server for the first time. /// - /// Note: the FireAndForget command flag cannot be set + /// Note: the FireAndForget command flag cannot be set. /// /// The server to load the script on. /// The command flags to use. @@ -198,8 +200,8 @@ public async Task LoadAsync(IServer server, CommandFlags flags throw new ArgumentOutOfRangeException(nameof(flags), "Loading a script cannot be FireAndForget"); } - var hash = await server.ScriptLoadAsync(ExecutableScript, flags).ForAwait(); - return new LoadedLuaScript(this, hash); + var hash = await server.ScriptLoadAsync(ExecutableScript, flags).ForAwait()!; + return new LoadedLuaScript(this, hash!); // not nullable because fire and forget is disabled } } @@ -218,7 +220,7 @@ public async Task LoadAsync(IServer server, CommandFlags flags /// Public fields and properties of the passed in object are treated as parameters. /// /// - /// Parameters of type RedisKey are sent to Redis as KEY (https://redis.io/commands/eval) in addition to arguments, + /// Parameters of type RedisKey are sent to Redis as KEY (https://redis.io/commands/eval) in addition to arguments, /// so as to play nicely with Redis Cluster. /// /// All members of this class are thread safe. @@ -239,6 +241,8 @@ public sealed class LoadedLuaScript /// The SHA1 hash of ExecutableScript. /// This is sent to Redis instead of ExecutableScript during Evaluate and EvaluateAsync calls. /// + /// Be aware that using hash directly is not resilient to Redis server restarts. + [EditorBrowsable(EditorBrowsableState.Never)] public byte[] Hash { get; } // internal for testing purposes only @@ -253,35 +257,37 @@ internal LoadedLuaScript(LuaScript original, byte[] hash) /// /// Evaluates this LoadedLuaScript against the given database, extracting parameters for the passed in object if any. /// - /// This method sends the SHA1 hash of the ExecutableScript instead of the script itself. If the script has not - /// been loaded into the passed Redis instance it will fail. + /// This method sends the SHA1 hash of the ExecutableScript instead of the script itself. + /// If the script has not been loaded into the passed Redis instance, it will fail. /// /// /// The redis database to evaluate against. /// The parameter object to use. /// The key prefix to use, if any. /// The command flags to use. - public RedisResult Evaluate(IDatabase db, object ps = null, RedisKey? withKeyPrefix = null, CommandFlags flags = CommandFlags.None) + public RedisResult Evaluate(IDatabase db, object? ps = null, RedisKey? withKeyPrefix = null, CommandFlags flags = CommandFlags.None) { - Original.ExtractParameters(ps, withKeyPrefix, out RedisKey[] keys, out RedisValue[] args); - return db.ScriptEvaluate(Hash, keys, args, flags); + Original.ExtractParameters(ps, withKeyPrefix, out RedisKey[]? keys, out RedisValue[]? args); + + return db.ScriptEvaluate(script: ExecutableScript, keys: keys, values: args, flags: flags); } /// /// Evaluates this LoadedLuaScript against the given database, extracting parameters for the passed in object if any. /// - /// This method sends the SHA1 hash of the ExecutableScript instead of the script itself. If the script has not - /// been loaded into the passed Redis instance it will fail. + /// This method sends the SHA1 hash of the ExecutableScript instead of the script itself. + /// If the script has not been loaded into the passed Redis instance, it will fail. /// /// /// The redis database to evaluate against. /// The parameter object to use. /// The key prefix to use, if any. /// The command flags to use. - public Task EvaluateAsync(IDatabaseAsync db, object ps = null, RedisKey? withKeyPrefix = null, CommandFlags flags = CommandFlags.None) + public Task EvaluateAsync(IDatabaseAsync db, object? ps = null, RedisKey? withKeyPrefix = null, CommandFlags flags = CommandFlags.None) { - Original.ExtractParameters(ps, withKeyPrefix, out RedisKey[] keys, out RedisValue[] args); - return db.ScriptEvaluateAsync(Hash, keys, args, flags); + Original.ExtractParameters(ps, withKeyPrefix, out RedisKey[]? keys, out RedisValue[]? args); + + return db.ScriptEvaluateAsync(script: ExecutableScript, keys: keys, values: args, flags: flags); } } } diff --git a/src/StackExchange.Redis/Maintenance/AzureMaintenanceEvent.cs b/src/StackExchange.Redis/Maintenance/AzureMaintenanceEvent.cs new file mode 100644 index 000000000..0a5874c29 --- /dev/null +++ b/src/StackExchange.Redis/Maintenance/AzureMaintenanceEvent.cs @@ -0,0 +1,202 @@ +using System; +using System.Globalization; +using System.Net; +using System.Threading.Tasks; + +namespace StackExchange.Redis.Maintenance +{ + /// + /// Azure node maintenance event. For more information, please see: . + /// + public sealed class AzureMaintenanceEvent : ServerMaintenanceEvent + { + private const string PubSubChannelName = "AzureRedisEvents"; + + internal AzureMaintenanceEvent(string? azureEvent) + { + if (azureEvent == null) + { + return; + } + + // The message consists of key-value pairs delimited by pipes. For example, a message might look like: + // NotificationType|NodeMaintenanceStarting|StartTimeUtc|2021-09-23T12:34:19|IsReplica|False|IpAddress|13.67.42.199|SSLPort|15001|NonSSLPort|13001 + var message = azureEvent.AsSpan(); + try + { + while (message.Length > 0) + { + if (message[0] == '|') + { + message = message.Slice(1); + continue; + } + + // Grab the next pair + var nextDelimiter = message.IndexOf('|'); + if (nextDelimiter < 0) + { + // The rest of the message is not a key-value pair and is therefore malformed. Stop processing it. + break; + } + + if (nextDelimiter == message.Length - 1) + { + // The message is missing the value for this key-value pair. It is malformed so we stop processing it. + break; + } + + var key = message.Slice(0, nextDelimiter); + message = message.Slice(key.Length + 1); + + var valueEnd = message.IndexOf('|'); + var value = valueEnd > -1 ? message.Slice(0, valueEnd) : message; + message = message.Slice(value.Length); + + if (key.Length > 0 && value.Length > 0) + { +#if NET + switch (key) + { + case var _ when key.SequenceEqual(nameof(NotificationType).AsSpan()): + NotificationTypeString = value.ToString(); + NotificationType = ParseNotificationType(NotificationTypeString); + break; + case var _ when key.SequenceEqual("StartTimeInUTC".AsSpan()) && DateTime.TryParseExact(value, "s", CultureInfo.InvariantCulture, DateTimeStyles.AssumeUniversal, out DateTime startTime): + StartTimeUtc = DateTime.SpecifyKind(startTime, DateTimeKind.Utc); + break; + case var _ when key.SequenceEqual(nameof(IsReplica).AsSpan()) && bool.TryParse(value, out var isReplica): + IsReplica = isReplica; + break; + case var _ when key.SequenceEqual(nameof(IPAddress).AsSpan()) && IPAddress.TryParse(value, out var ipAddress): + IPAddress = ipAddress; + break; + case var _ when key.SequenceEqual("SSLPort".AsSpan()) && Format.TryParseInt32(value, out var port): + SslPort = port; + break; + case var _ when key.SequenceEqual("NonSSLPort".AsSpan()) && Format.TryParseInt32(value, out var nonsslport): + NonSslPort = nonsslport; + break; + } +#else + switch (key) + { + case var _ when key.SequenceEqual(nameof(NotificationType).AsSpan()): + NotificationTypeString = value.ToString(); + NotificationType = ParseNotificationType(NotificationTypeString); + break; + case var _ when key.SequenceEqual("StartTimeInUTC".AsSpan()) && DateTime.TryParseExact(value.ToString(), "s", CultureInfo.InvariantCulture, DateTimeStyles.AssumeUniversal, out DateTime startTime): + StartTimeUtc = DateTime.SpecifyKind(startTime, DateTimeKind.Utc); + break; + case var _ when key.SequenceEqual(nameof(IsReplica).AsSpan()) && bool.TryParse(value.ToString(), out var isReplica): + IsReplica = isReplica; + break; + case var _ when key.SequenceEqual(nameof(IPAddress).AsSpan()) && IPAddress.TryParse(value.ToString(), out var ipAddress): + IPAddress = ipAddress; + break; + case var _ when key.SequenceEqual("SSLPort".AsSpan()) && Format.TryParseInt32(value.ToString(), out var port): + SslPort = port; + break; + case var _ when key.SequenceEqual("NonSSLPort".AsSpan()) && Format.TryParseInt32(value.ToString(), out var nonsslport): + NonSslPort = nonsslport; + break; + } +#endif + } + } + } + catch + { + // TODO: Append to rolling debug log when it's present + } + } + + internal static async Task AddListenerAsync(ConnectionMultiplexer multiplexer, Action? log = null) + { + if (!multiplexer.CommandMap.IsAvailable(RedisCommand.SUBSCRIBE)) + { + return; + } + + try + { + var sub = multiplexer.GetSubscriber(); + // ReSharper disable once ConditionIsAlwaysTrueOrFalseAccordingToNullableAPIContract + if (sub is null) + { + log?.Invoke("Failed to GetSubscriber for AzureRedisEvents"); + return; + } + + await sub.SubscribeAsync(RedisChannel.Literal(PubSubChannelName), (_, message) => + { + try + { + var newMessage = new AzureMaintenanceEvent(message!); + newMessage.NotifyMultiplexer(multiplexer); + + switch (newMessage.NotificationType) + { + case AzureNotificationType.NodeMaintenanceEnded: + case AzureNotificationType.NodeMaintenanceFailoverComplete: + case AzureNotificationType.NodeMaintenanceScaleComplete: + multiplexer.ReconfigureAsync($"Azure Event: {newMessage.NotificationType.ToString()}").RedisFireAndForget(); + break; + } + } + catch (Exception e) + { + log?.Invoke($"Encountered exception: {e}"); + } + }).ForAwait(); + } + catch (Exception e) + { + log?.Invoke($"Encountered exception: {e}"); + } + } + + /// + /// Indicates the type of event (raw string form). + /// + public string NotificationTypeString { get; } = "Unknown"; + + /// + /// The parsed version of for easier consumption. + /// + public AzureNotificationType NotificationType { get; } + + /// + /// Indicates if the event is for a replica node. + /// + public bool IsReplica { get; } + + /// + /// IPAddress of the node event is intended for. + /// + public IPAddress? IPAddress { get; } + + /// + /// SSL Port. + /// + public int SslPort { get; } + + /// + /// Non-SSL port. + /// + public int NonSslPort { get; } + + private static AzureNotificationType ParseNotificationType(string typeString) => typeString switch + { + "NodeMaintenanceScheduled" => AzureNotificationType.NodeMaintenanceScheduled, + "NodeMaintenanceStarting" => AzureNotificationType.NodeMaintenanceStarting, + "NodeMaintenanceStart" => AzureNotificationType.NodeMaintenanceStart, + "NodeMaintenanceEnded" => AzureNotificationType.NodeMaintenanceEnded, + // This is temporary until server changes go into effect - to be removed in later versions + "NodeMaintenanceFailover" => AzureNotificationType.NodeMaintenanceFailoverComplete, + "NodeMaintenanceFailoverComplete" => AzureNotificationType.NodeMaintenanceFailoverComplete, + "NodeMaintenanceScaleComplete" => AzureNotificationType.NodeMaintenanceScaleComplete, + _ => AzureNotificationType.Unknown, + }; + } +} diff --git a/src/StackExchange.Redis/Maintenance/AzureNotificationType.cs b/src/StackExchange.Redis/Maintenance/AzureNotificationType.cs new file mode 100644 index 000000000..13237f914 --- /dev/null +++ b/src/StackExchange.Redis/Maintenance/AzureNotificationType.cs @@ -0,0 +1,43 @@ +namespace StackExchange.Redis.Maintenance +{ + /// + /// The types of notifications that Azure is sending for events happening. + /// + public enum AzureNotificationType + { + /// + /// Unrecognized event type, likely needs a library update to recognize new events. + /// + Unknown, + + /// + /// Indicates that a maintenance event is scheduled. May be several minutes from now. + /// + NodeMaintenanceScheduled, + + /// + /// This event gets fired ~20s before maintenance begins. + /// + NodeMaintenanceStarting, + + /// + /// This event gets fired when maintenance is imminent (<5s). + /// + NodeMaintenanceStart, + + /// + /// Indicates that the node maintenance operation is over. + /// + NodeMaintenanceEnded, + + /// + /// Indicates that a replica has been promoted to primary. + /// + NodeMaintenanceFailoverComplete, + + /// + /// Indicates that a scale event (adding or removing nodes) has completed for a cluster. + /// + NodeMaintenanceScaleComplete, + } +} diff --git a/src/StackExchange.Redis/Maintenance/ServerMaintenanceEvent.cs b/src/StackExchange.Redis/Maintenance/ServerMaintenanceEvent.cs new file mode 100644 index 000000000..cb0d43c6c --- /dev/null +++ b/src/StackExchange.Redis/Maintenance/ServerMaintenanceEvent.cs @@ -0,0 +1,41 @@ +using System; + +namespace StackExchange.Redis.Maintenance +{ + /// + /// Base class for all server maintenance events. + /// + public class ServerMaintenanceEvent + { + internal ServerMaintenanceEvent() + { + ReceivedTimeUtc = DateTime.UtcNow; + } + + /// + /// Raw message received from the server. + /// + public string? RawMessage { get; protected set; } + + /// + /// The time the event was received. If we know when the event is expected to start will be populated. + /// + public DateTime ReceivedTimeUtc { get; } + + /// + /// Indicates the expected start time of the event. + /// + public DateTime? StartTimeUtc { get; protected set; } + + /// + /// Returns a string representing the maintenance event with all of its properties. + /// + public override string? ToString() => RawMessage; + + /// + /// Notifies a ConnectionMultiplexer of this event, for anyone observing its handler. + /// + protected void NotifyMultiplexer(ConnectionMultiplexer multiplexer) + => multiplexer.OnServerMaintenanceEvent(this); + } +} diff --git a/src/StackExchange.Redis/Message.ValueCondition.cs b/src/StackExchange.Redis/Message.ValueCondition.cs new file mode 100644 index 000000000..53ddc651b --- /dev/null +++ b/src/StackExchange.Redis/Message.ValueCondition.cs @@ -0,0 +1,58 @@ +using System; + +namespace StackExchange.Redis; + +internal partial class Message +{ + public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisKey key, in ValueCondition when) + => new KeyConditionMessage(db, flags, command, key, when); + + public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisKey key, in RedisValue value, Expiration expiry, in ValueCondition when) + => new KeyValueExpiryConditionMessage(db, flags, command, key, value, expiry, when); + + private sealed class KeyConditionMessage( + int db, + CommandFlags flags, + RedisCommand command, + in RedisKey key, + in ValueCondition when) + : CommandKeyBase(db, flags, command, key) + { + private readonly ValueCondition _when = when; + + public override int ArgCount => 1 + _when.TokenCount; + + protected override void WriteImpl(PhysicalConnection physical) + { + physical.WriteHeader(Command, ArgCount); + physical.Write(Key); + _when.WriteTo(physical); + } + } + + private sealed class KeyValueExpiryConditionMessage( + int db, + CommandFlags flags, + RedisCommand command, + in RedisKey key, + in RedisValue value, + Expiration expiry, + in ValueCondition when) + : CommandKeyBase(db, flags, command, key) + { + private readonly RedisValue _value = value; + private readonly ValueCondition _when = when; + private readonly Expiration _expiry = expiry; + + public override int ArgCount => 2 + _expiry.TokenCount + _when.TokenCount; + + protected override void WriteImpl(PhysicalConnection physical) + { + physical.WriteHeader(Command, ArgCount); + physical.Write(Key); + physical.WriteBulkString(_value); + _expiry.WriteTo(physical); + _when.WriteTo(physical); + } + } +} diff --git a/src/StackExchange.Redis/Message.cs b/src/StackExchange.Redis/Message.cs index 574557bfb..0ffcf4256 100644 --- a/src/StackExchange.Redis/Message.cs +++ b/src/StackExchange.Redis/Message.cs @@ -1,28 +1,27 @@ using System; +using System.Buffers.Binary; using System.Collections.Generic; using System.Diagnostics; -using System.IO; using System.Linq; using System.Runtime.CompilerServices; using System.Text; using System.Threading; -using System.Threading.Tasks; +using Microsoft.Extensions.Logging; using StackExchange.Redis.Profiling; -using static StackExchange.Redis.ConnectionMultiplexer; namespace StackExchange.Redis { internal sealed class LoggingMessage : Message { - public readonly LogProxy log; + public readonly ILogger log; private readonly Message tail; - public static Message Create(LogProxy log, Message tail) + public static Message Create(ILogger? log, Message tail) { return log == null ? tail : new LoggingMessage(log, tail); } - private LoggingMessage(LogProxy log, Message tail) : base(tail.Db, tail.Flags, tail.Command) + private LoggingMessage(ILogger log, Message tail) : base(tail.Db, tail.Flags, tail.Command) { this.log = log; this.tail = tail; @@ -40,32 +39,21 @@ protected override void WriteImpl(PhysicalConnection physical) try { var bridge = physical.BridgeCouldBeNull; - log?.WriteLine($"{bridge.Name}: Writing: {tail.CommandAndKey}"); + log?.LogTrace($"{bridge?.Name}: Writing: {tail.CommandAndKey}"); } catch { } tail.WriteTo(physical); } public override int ArgCount => tail.ArgCount; - public LogProxy Log => log; + public ILogger Log => log; } - internal abstract class Message : ICompletable + internal abstract partial class Message : ICompletable { public readonly int Db; -#if DEBUG - internal int QueuePosition { get; private set; } - internal PhysicalConnection.WriteStatus ConnectionWriteState { get; private set; } -#endif - [Conditional("DEBUG")] - internal void SetBacklogState(int position, PhysicalConnection physical) - { -#if DEBUG - QueuePosition = position; - ConnectionWriteState = physical?.GetWriteStatus() ?? PhysicalConnection.WriteStatus.NA; -#endif - } + private uint _highIntegrityToken; internal const CommandFlags InternalCallFlag = (CommandFlags)128; @@ -73,30 +61,30 @@ internal void SetBacklogState(int position, PhysicalConnection physical) private const CommandFlags AskingFlag = (CommandFlags)32, ScriptUnavailableFlag = (CommandFlags)256, - NeedsAsyncTimeoutCheckFlag = (CommandFlags)1024; + DemandSubscriptionConnection = (CommandFlags)2048; - private const CommandFlags MaskMasterServerPreference = CommandFlags.DemandMaster - | CommandFlags.DemandReplica - | CommandFlags.PreferMaster - | CommandFlags.PreferReplica; + private const CommandFlags MaskPrimaryServerPreference = CommandFlags.DemandMaster + | CommandFlags.DemandReplica + | CommandFlags.PreferMaster + | CommandFlags.PreferReplica; private const CommandFlags UserSelectableFlags = CommandFlags.None | CommandFlags.DemandMaster | CommandFlags.DemandReplica | CommandFlags.PreferMaster | CommandFlags.PreferReplica -#pragma warning disable CS0618 +#pragma warning disable CS0618 // Type or member is obsolete | CommandFlags.HighPriority #pragma warning restore CS0618 | CommandFlags.FireAndForget | CommandFlags.NoRedirect | CommandFlags.NoScriptCache; - private IResultBox resultBox; + private IResultBox? resultBox; - private ResultProcessor resultProcessor; + private ResultProcessor? resultProcessor; // All for profiling purposes - private ProfiledCommand performance; + private ProfiledCommand? performance; internal DateTime CreatedDateTime; internal long CreatedTimestamp; @@ -122,30 +110,30 @@ protected Message(int db, CommandFlags flags, RedisCommand command) } } - bool masterOnly = IsMasterOnly(command); + bool primaryOnly = command.IsPrimaryOnly(); Db = db; this.command = command; Flags = flags & UserSelectableFlags; - if (masterOnly) SetMasterOnly(); + if (primaryOnly) SetPrimaryOnly(); CreatedDateTime = DateTime.UtcNow; - CreatedTimestamp = System.Diagnostics.Stopwatch.GetTimestamp(); + CreatedTimestamp = Stopwatch.GetTimestamp(); Status = CommandStatus.WaitingToBeSent; } - internal void SetMasterOnly() + internal void SetPrimaryOnly() { - switch (GetMasterReplicaFlags(Flags)) + switch (GetPrimaryReplicaFlags(Flags)) { case CommandFlags.DemandReplica: - throw ExceptionFactory.MasterOnly(false, command, null, null); + throw ExceptionFactory.PrimaryOnly(false, command, null, null); case CommandFlags.DemandMaster: // already fine as-is break; case CommandFlags.PreferMaster: case CommandFlags.PreferReplica: - default: // we will run this on the master, then - Flags = SetMasterReplicaFlags(Flags, CommandFlags.DemandMaster); + default: // we will run this on the primary, then + Flags = SetPrimaryReplicaFlags(Flags, CommandFlags.DemandMaster); break; } } @@ -166,7 +154,7 @@ internal void PrepareToResend(ServerEndPoint resendTo, bool isMoved) performance = null; CreatedDateTime = DateTime.UtcNow; - CreatedTimestamp = System.Diagnostics.Stopwatch.GetTimestamp(); + CreatedTimestamp = Stopwatch.GetTimestamp(); performance = ProfiledCommand.NewAttachedToSameContext(oldPerformance, resendTo, isMoved); performance.SetMessage(this); Status = CommandStatus.WaitingToBeSent; @@ -178,7 +166,7 @@ internal void PrepareToResend(ServerEndPoint resendTo, bool isMoved) public virtual string CommandAndKey => Command.ToString(); /// - /// Things with the potential to cause harm, or to reveal configuration information + /// Things with the potential to cause harm, or to reveal configuration information. /// public bool IsAdmin { @@ -194,6 +182,7 @@ public bool IsAdmin case RedisCommand.DEBUG: case RedisCommand.FLUSHALL: case RedisCommand.FLUSHDB: + case RedisCommand.HOTKEYS: case RedisCommand.INFO: case RedisCommand.KEYS: case RedisCommand.MONITOR: @@ -213,17 +202,21 @@ public bool IsAdmin public bool IsAsking => (Flags & AskingFlag) != 0; + public bool IsHighIntegrity => _highIntegrityToken != 0; + + public uint HighIntegrityToken => _highIntegrityToken; + + internal void WithHighIntegrity(uint value) + => _highIntegrityToken = value; + internal bool IsScriptUnavailable => (Flags & ScriptUnavailableFlag) != 0; - internal void SetScriptUnavailable() - { - Flags |= ScriptUnavailableFlag; - } + internal void SetScriptUnavailable() => Flags |= ScriptUnavailableFlag; public bool IsFireAndForget => (Flags & CommandFlags.FireAndForget) != 0; public bool IsInternalCall => (Flags & InternalCallFlag) != 0; - public IResultBox ResultBox => resultBox; + public IResultBox? ResultBox => resultBox; public abstract int ArgCount { get; } // note: over-estimate if necessary @@ -234,64 +227,46 @@ public static Message Create(int db, CommandFlags flags, RedisCommand command) return new CommandMessage(db, flags, command); } - public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisKey key) - { - return new CommandKeyMessage(db, flags, command, key); - } + public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisKey key) => + new CommandKeyMessage(db, flags, command, key); - public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisKey key0, in RedisKey key1) - { - return new CommandKeyKeyMessage(db, flags, command, key0, key1); - } + public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisKey key0, in RedisKey key1) => + new CommandKeyKeyMessage(db, flags, command, key0, key1); - public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisKey key0, in RedisKey key1, in RedisValue value) - { - return new CommandKeyKeyValueMessage(db, flags, command, key0, key1, value); - } + public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisKey key0, in RedisKey key1, in RedisValue value) => + new CommandKeyKeyValueMessage(db, flags, command, key0, key1, value); - public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisKey key0, in RedisKey key1, in RedisKey key2) - { - return new CommandKeyKeyKeyMessage(db, flags, command, key0, key1, key2); - } + public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisKey key0, in RedisKey key1, in RedisKey key2) => + new CommandKeyKeyKeyMessage(db, flags, command, key0, key1, key2); - public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisValue value) - { - return new CommandValueMessage(db, flags, command, value); - } + public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisValue value) => + new CommandValueMessage(db, flags, command, value); - public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisKey key, in RedisValue value) - { - return new CommandKeyValueMessage(db, flags, command, key, value); - } + public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisKey key, in RedisValue value) => + new CommandKeyValueMessage(db, flags, command, key, value); - public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisChannel channel) - { - return new CommandChannelMessage(db, flags, command, channel); - } + public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisChannel channel) => + new CommandChannelMessage(db, flags, command, channel); - public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisChannel channel, in RedisValue value) - { - return new CommandChannelValueMessage(db, flags, command, channel, value); - } + public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisChannel channel, in RedisValue value) => + new CommandChannelValueMessage(db, flags, command, channel, value); - public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisValue value, in RedisChannel channel) - { - return new CommandValueChannelMessage(db, flags, command, value, channel); - } + public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisValue value, in RedisChannel channel) => + new CommandValueChannelMessage(db, flags, command, value, channel); - public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisKey key, in RedisValue value0, in RedisValue value1) - { - return new CommandKeyValueValueMessage(db, flags, command, key, value0, value1); - } + public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisKey key, in RedisValue value0, in RedisValue value1) => + new CommandKeyValueValueMessage(db, flags, command, key, value0, value1); - public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisKey key, in RedisValue value0, in RedisValue value1, in RedisValue value2) - { - return new CommandKeyValueValueValueMessage(db, flags, command, key, value0, value1, value2); - } + public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisKey key, in RedisValue value0, in RedisValue value1, in RedisValue value2) => + new CommandKeyValueValueValueMessage(db, flags, command, key, value0, value1, value2); public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisKey key, GeoEntry[] values) { +#if NET + ArgumentNullException.ThrowIfNull(values); +#else if (values == null) throw new ArgumentNullException(nameof(values)); +#endif if (values.Length == 0) { throw new ArgumentOutOfRangeException(nameof(values)); @@ -299,7 +274,7 @@ public static Message Create(int db, CommandFlags flags, RedisCommand command, i if (values.Length == 1) { var value = values[0]; - return Message.Create(db, flags, command, key, value.Longitude, value.Latitude, value.Member); + return Create(db, flags, command, key, value.Longitude, value.Latitude, value.Member); } var arr = new RedisValue[3 * values.Length]; int index = 0; @@ -312,116 +287,121 @@ public static Message Create(int db, CommandFlags flags, RedisCommand command, i return new CommandKeyValuesMessage(db, flags, command, key, arr); } - public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisKey key, in RedisValue value0, in RedisValue value1, in RedisValue value2, in RedisValue value3) - { - return new CommandKeyValueValueValueValueMessage(db, flags, command, key, value0, value1, value2, value3); - } - - public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisValue value0, in RedisValue value1) - { - return new CommandValueValueMessage(db, flags, command, value0, value1); - } - - public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisValue value, in RedisKey key) - { - return new CommandValueKeyMessage(db, flags, command, value, key); - } - - public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisValue value0, in RedisValue value1, in RedisValue value2) - { - return new CommandValueValueValueMessage(db, flags, command, value0, value1, value2); - } - - public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisValue value0, in RedisValue value1, in RedisValue value2, in RedisValue value3, in RedisValue value4) - { - return new CommandValueValueValueValueValueMessage(db, flags, command, value0, value1, value2, value3, value4); - } - - public static Message CreateInSlot(int db, int slot, CommandFlags flags, RedisCommand command, RedisValue[] values) - { - return new CommandSlotValuesMessage(db, slot, flags, command, values); - } - - public static bool IsMasterOnly(RedisCommand command) - { - switch (command) - { - case RedisCommand.APPEND: - case RedisCommand.BITOP: - case RedisCommand.BLPOP: - case RedisCommand.BRPOP: - case RedisCommand.BRPOPLPUSH: - case RedisCommand.DECR: - case RedisCommand.DECRBY: - case RedisCommand.DEL: - case RedisCommand.EXPIRE: - case RedisCommand.EXPIREAT: - case RedisCommand.FLUSHALL: - case RedisCommand.FLUSHDB: - case RedisCommand.GETSET: - case RedisCommand.HDEL: - case RedisCommand.HINCRBY: - case RedisCommand.HINCRBYFLOAT: - case RedisCommand.HMSET: - case RedisCommand.HSET: - case RedisCommand.HSETNX: - case RedisCommand.INCR: - case RedisCommand.INCRBY: - case RedisCommand.INCRBYFLOAT: - case RedisCommand.LINSERT: - case RedisCommand.LPOP: - case RedisCommand.LPUSH: - case RedisCommand.LPUSHX: - case RedisCommand.LREM: - case RedisCommand.LSET: - case RedisCommand.LTRIM: - case RedisCommand.MIGRATE: - case RedisCommand.MOVE: - case RedisCommand.MSET: - case RedisCommand.MSETNX: - case RedisCommand.PERSIST: - case RedisCommand.PEXPIRE: - case RedisCommand.PEXPIREAT: - case RedisCommand.PFADD: - case RedisCommand.PFMERGE: - case RedisCommand.PSETEX: - case RedisCommand.RENAME: - case RedisCommand.RENAMENX: - case RedisCommand.RESTORE: - case RedisCommand.RPOP: - case RedisCommand.RPOPLPUSH: - case RedisCommand.RPUSH: - case RedisCommand.RPUSHX: - case RedisCommand.SADD: - case RedisCommand.SDIFFSTORE: - case RedisCommand.SET: - case RedisCommand.SETBIT: - case RedisCommand.SETEX: - case RedisCommand.SETNX: - case RedisCommand.SETRANGE: - case RedisCommand.SINTERSTORE: - case RedisCommand.SMOVE: - case RedisCommand.SPOP: - case RedisCommand.SREM: - case RedisCommand.SUNIONSTORE: - case RedisCommand.SWAPDB: - case RedisCommand.TOUCH: - case RedisCommand.UNLINK: - case RedisCommand.ZADD: - case RedisCommand.ZINTERSTORE: - case RedisCommand.ZINCRBY: - case RedisCommand.ZPOPMAX: - case RedisCommand.ZPOPMIN: - case RedisCommand.ZREM: - case RedisCommand.ZREMRANGEBYLEX: - case RedisCommand.ZREMRANGEBYRANK: - case RedisCommand.ZREMRANGEBYSCORE: - case RedisCommand.ZUNIONSTORE: - return true; - default: - return false; - } - } + public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisKey key, in RedisValue value0, in RedisValue value1, in RedisValue value2, in RedisValue value3) => + new CommandKeyValueValueValueValueMessage(db, flags, command, key, value0, value1, value2, value3); + + public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisKey key, in RedisValue value0, in RedisValue value1, in RedisValue value2, in RedisValue value3, in RedisValue value4) => + new CommandKeyValueValueValueValueValueMessage(db, flags, command, key, value0, value1, value2, value3, value4); + + public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisKey key, in RedisValue value0, in RedisValue value1, in RedisValue value2, in RedisValue value3, in RedisValue value4, in RedisValue value5) => + new CommandKeyValueValueValueValueValueValueMessage(db, flags, command, key, value0, value1, value2, value3, value4, value5); + + public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisKey key, in RedisValue value0, in RedisValue value1, in RedisValue value2, in RedisValue value3, in RedisValue value4, in RedisValue value5, in RedisValue value6) => + new CommandKeyValueValueValueValueValueValueValueMessage(db, flags, command, key, value0, value1, value2, value3, value4, value5, value6); + + public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisValue value0, in RedisValue value1) => + new CommandValueValueMessage(db, flags, command, value0, value1); + + public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisValue value, in RedisKey key) => + new CommandValueKeyMessage(db, flags, command, value, key); + + public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisValue value0, in RedisValue value1, in RedisValue value2) => + new CommandValueValueValueMessage(db, flags, command, value0, value1, value2); + + public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisValue value0, in RedisValue value1, in RedisValue value2, in RedisValue value3, in RedisValue value4) => + new CommandValueValueValueValueValueMessage(db, flags, command, value0, value1, value2, value3, value4); + + public static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisKey key, in RedisValue value0, in RedisValue value1, in RedisValue[] values) => + new CommandKeyValueValueValuesMessage(db, flags, command, key, value0, value1, values); + + public static Message Create( + int db, + CommandFlags flags, + RedisCommand command, + in RedisKey key0, + in RedisKey key1, + in RedisValue value0, + in RedisValue value1) => + new CommandKeyKeyValueValueMessage(db, flags, command, key0, key1, value0, value1); + + public static Message Create( + int db, + CommandFlags flags, + RedisCommand command, + in RedisKey key0, + in RedisKey key1, + in RedisValue value0, + in RedisValue value1, + in RedisValue value2) => + new CommandKeyKeyValueValueValueMessage(db, flags, command, key0, key1, value0, value1, value2); + + public static Message Create( + int db, + CommandFlags flags, + RedisCommand command, + in RedisKey key0, + in RedisKey key1, + in RedisValue value0, + in RedisValue value1, + in RedisValue value2, + in RedisValue value3) => + new CommandKeyKeyValueValueValueValueMessage(db, flags, command, key0, key1, value0, value1, value2, value3); + + public static Message Create( + int db, + CommandFlags flags, + RedisCommand command, + in RedisKey key0, + in RedisKey key1, + in RedisValue value0, + in RedisValue value1, + in RedisValue value2, + in RedisValue value3, + in RedisValue value4) => + new CommandKeyKeyValueValueValueValueValueMessage(db, flags, command, key0, key1, value0, value1, value2, value3, value4); + + public static Message Create( + int db, + CommandFlags flags, + RedisCommand command, + in RedisKey key0, + in RedisKey key1, + in RedisValue value0, + in RedisValue value1, + in RedisValue value2, + in RedisValue value3, + in RedisValue value4, + in RedisValue value5) => + new CommandKeyKeyValueValueValueValueValueValueMessage(db, flags, command, key0, key1, value0, value1, value2, value3, value4, value5); + + public static Message Create( + int db, + CommandFlags flags, + RedisCommand command, + in RedisKey key0, + in RedisKey key1, + in RedisValue value0, + in RedisValue value1, + in RedisValue value2, + in RedisValue value3, + in RedisValue value4, + in RedisValue value5, + in RedisValue value6) => + new CommandKeyKeyValueValueValueValueValueValueValueMessage(db, flags, command, key0, key1, value0, value1, value2, value3, value4, value5, value6); + + public static Message CreateInSlot(int db, int slot, CommandFlags flags, RedisCommand command, RedisValue[] values) => + new CommandSlotValuesMessage(db, slot, flags, command, values); + + public static Message Create(int db, CommandFlags flags, RedisCommand command, KeyValuePair[] values, Expiration expiry, When when) + => new MultiSetMessage(db, flags, command, values, expiry, when); + + /// Gets whether this is primary-only. + /// + /// Note that the constructor runs the switch statement above, so + /// this will already be true for primary-only commands, even if the + /// user specified etc. + /// + public bool IsPrimaryOnly() => GetPrimaryReplicaFlags(Flags) == CommandFlags.DemandMaster; public virtual void AppendStormLog(StringBuilder sb) { @@ -429,14 +409,7 @@ public virtual void AppendStormLog(StringBuilder sb) sb.Append(CommandAndKey); } - public virtual int GetHashSlot(ServerSelectionStrategy serverSelectionStrategy) { return ServerSelectionStrategy.NoSlot; } - public bool IsMasterOnly() - { - // note that the constructor runs the switch statement above, so - // this will alread be true for master-only commands, even if the - // user specified PreferMaster etc - return GetMasterReplicaFlags(Flags) == CommandFlags.DemandMaster; - } + public virtual int GetHashSlot(ServerSelectionStrategy serverSelectionStrategy) => ServerSelectionStrategy.NoSlot; /// /// This does a few important things: @@ -444,26 +417,34 @@ public bool IsMasterOnly() /// (i.e. "why does my standalone server keep saying ERR unknown command 'cluster' ?") /// 2: it allows the initial PING and GET (during connect) to get queued rather /// than be rejected as no-server-available (note that this doesn't apply to - /// handshake messages, as they bypass the queue completely) - /// 3: it disables non-pref logging, as it is usually server-targeted + /// handshake messages, as they bypass the queue completely). + /// 3: it disables non-pref logging, as it is usually server-targeted. /// - public void SetInternalCall() - { - Flags |= InternalCallFlag; - } + public void SetInternalCall() => Flags |= InternalCallFlag; - public override string ToString() - { - return $"[{Db}]:{CommandAndKey} ({resultProcessor?.GetType().Name ?? "(n/a)"})"; - } + /// + /// Gets a string representation of this message: "[{DB}]:{CommandAndKey} ({resultProcessor})". + /// + public override string ToString() => + $"[{Db}]:{CommandAndKey} ({resultProcessor?.GetType().Name ?? "(n/a)"})"; + + /// + /// Gets a string representation of this message without the key: "[{DB}]:{Command} ({resultProcessor})". + /// + public string ToStringCommandOnly() => + $"[{Db}]:{Command} ({resultProcessor?.GetType().Name ?? "(n/a)"})"; public void SetResponseReceived() => performance?.SetResponseReceived(); - bool ICompletable.TryComplete(bool isAsync) { Complete(); return true; } + bool ICompletable.TryComplete(bool isAsync) + { + Complete(); + return true; + } public void Complete() { - //Ensure we can never call Complete on the same resultBox from two threads by grabbing it now + // Ensure we can never call Complete on the same resultBox from two threads by grabbing it now var currBox = Interlocked.Exchange(ref resultBox, null); // set the completion/performance data @@ -472,76 +453,89 @@ public void Complete() currBox?.ActivateContinuations(); } - internal bool ResultBoxIsAsync + internal bool ResultBoxIsAsync => Volatile.Read(ref resultBox)?.IsAsync == true; + + internal static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisKey key, RedisKey[] keys) => keys.Length switch { - get - { - var currBox = Volatile.Read(ref resultBox); - return currBox != null && currBox.IsAsync; - } - } + 0 => new CommandKeyMessage(db, flags, command, key), + 1 => new CommandKeyKeyMessage(db, flags, command, key, keys[0]), + 2 => new CommandKeyKeyKeyMessage(db, flags, command, key, keys[0], keys[1]), + _ => new CommandKeyKeysMessage(db, flags, command, key, keys), + }; - internal static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisKey key, RedisKey[] keys) + internal static Message Create(int db, CommandFlags flags, RedisCommand command, IList keys) => keys.Count switch { - switch (keys.Length) - { - case 0: return new CommandKeyMessage(db, flags, command, key); - case 1: return new CommandKeyKeyMessage(db, flags, command, key, keys[0]); - case 2: return new CommandKeyKeyKeyMessage(db, flags, command, key, keys[0], keys[1]); - default: return new CommandKeyKeysMessage(db, flags, command, key, keys); - } - } + 0 => new CommandMessage(db, flags, command), + 1 => new CommandKeyMessage(db, flags, command, keys[0]), + 2 => new CommandKeyKeyMessage(db, flags, command, keys[0], keys[1]), + 3 => new CommandKeyKeyKeyMessage(db, flags, command, keys[0], keys[1], keys[2]), + _ => new CommandKeysMessage(db, flags, command, (keys as RedisKey[]) ?? keys.ToArray()), + }; - internal static Message Create(int db, CommandFlags flags, RedisCommand command, IList keys) + internal static Message Create(int db, CommandFlags flags, RedisCommand command, IList values) => values.Count switch { - switch (keys.Count) - { - case 0: return new CommandMessage(db, flags, command); - case 1: return new CommandKeyMessage(db, flags, command, keys[0]); - case 2: return new CommandKeyKeyMessage(db, flags, command, keys[0], keys[1]); - case 3: return new CommandKeyKeyKeyMessage(db, flags, command, keys[0], keys[1], keys[2]); - default: return new CommandKeysMessage(db, flags, command, (keys as RedisKey[]) ?? keys.ToArray()); - } - } + 0 => new CommandMessage(db, flags, command), + 1 => new CommandValueMessage(db, flags, command, values[0]), + 2 => new CommandValueValueMessage(db, flags, command, values[0], values[1]), + 3 => new CommandValueValueValueMessage(db, flags, command, values[0], values[1], values[2]), + // no 4; not worth adding + 5 => new CommandValueValueValueValueValueMessage(db, flags, command, values[0], values[1], values[2], values[3], values[4]), + _ => new CommandValuesMessage(db, flags, command, (values as RedisValue[]) ?? values.ToArray()), + }; - internal static Message Create(int db, CommandFlags flags, RedisCommand command, IList values) + internal static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisKey key, RedisValue[] values) { - switch (values.Count) +#if NET + ArgumentNullException.ThrowIfNull(values); +#else + if (values == null) throw new ArgumentNullException(nameof(values)); +#endif + return values.Length switch { - case 0: return new CommandMessage(db, flags, command); - case 1: return new CommandValueMessage(db, flags, command, values[0]); - case 2: return new CommandValueValueMessage(db, flags, command, values[0], values[1]); - case 3: return new CommandValueValueValueMessage(db, flags, command, values[0], values[1], values[2]); - // no 4; not worth adding - case 5: return new CommandValueValueValueValueValueMessage(db, flags, command, values[0], values[1], values[2], values[3], values[4]); - default: return new CommandValuesMessage(db, flags, command, (values as RedisValue[]) ?? values.ToArray()); - } + 0 => new CommandKeyMessage(db, flags, command, key), + 1 => new CommandKeyValueMessage(db, flags, command, key, values[0]), + 2 => new CommandKeyValueValueMessage(db, flags, command, key, values[0], values[1]), + 3 => new CommandKeyValueValueValueMessage(db, flags, command, key, values[0], values[1], values[2]), + 4 => new CommandKeyValueValueValueValueMessage(db, flags, command, key, values[0], values[1], values[2], values[3]), + _ => new CommandKeyValuesMessage(db, flags, command, key, values), + }; } - internal static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisKey key, RedisValue[] values) + internal static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisKey key0, in RedisKey key1, RedisValue[] values) { +#if NET + ArgumentNullException.ThrowIfNull(values); +#else if (values == null) throw new ArgumentNullException(nameof(values)); - switch (values.Length) +#endif + return values.Length switch { - case 0: return new CommandKeyMessage(db, flags, command, key); - case 1: return new CommandKeyValueMessage(db, flags, command, key, values[0]); - case 2: return new CommandKeyValueValueMessage(db, flags, command, key, values[0], values[1]); - case 3: return new CommandKeyValueValueValueMessage(db, flags, command, key, values[0], values[1], values[2]); - case 4: return new CommandKeyValueValueValueValueMessage(db, flags, command, key, values[0], values[1], values[2], values[3]); - default: return new CommandKeyValuesMessage(db, flags, command, key, values); - } + 0 => new CommandKeyKeyMessage(db, flags, command, key0, key1), + 1 => new CommandKeyKeyValueMessage(db, flags, command, key0, key1, values[0]), + 2 => new CommandKeyKeyValueValueMessage(db, flags, command, key0, key1, values[0], values[1]), + 3 => new CommandKeyKeyValueValueValueMessage(db, flags, command, key0, key1, values[0], values[1], values[2]), + 4 => new CommandKeyKeyValueValueValueValueMessage(db, flags, command, key0, key1, values[0], values[1], values[2], values[3]), + 5 => new CommandKeyKeyValueValueValueValueValueMessage(db, flags, command, key0, key1, values[0], values[1], values[2], values[3], values[4]), + 6 => new CommandKeyKeyValueValueValueValueValueValueMessage(db, flags, command, key0, key1, values[0], values[1], values[2], values[3], values[4], values[5]), + 7 => new CommandKeyKeyValueValueValueValueValueValueValueMessage(db, flags, command, key0, key1, values[0], values[1], values[2], values[3], values[4], values[5], values[6]), + _ => new CommandKeyKeyValuesMessage(db, flags, command, key0, key1, values), + }; } internal static Message Create(int db, CommandFlags flags, RedisCommand command, in RedisKey key0, RedisValue[] values, in RedisKey key1) { +#if NET + ArgumentNullException.ThrowIfNull(values); +#else if (values == null) throw new ArgumentNullException(nameof(values)); +#endif return new CommandKeyValuesKeyMessage(db, flags, command, key0, values, key1); } - internal static CommandFlags GetMasterReplicaFlags(CommandFlags flags) + internal static CommandFlags GetPrimaryReplicaFlags(CommandFlags flags) { // for the purposes of the switch, we only care about two bits - return flags & MaskMasterServerPreference; + return flags & MaskPrimaryServerPreference; } internal static bool RequiresDatabase(RedisCommand command) @@ -554,10 +548,13 @@ internal static bool RequiresDatabase(RedisCommand command) case RedisCommand.BGSAVE: case RedisCommand.CLIENT: case RedisCommand.CLUSTER: + case RedisCommand.COMMAND: case RedisCommand.CONFIG: case RedisCommand.DISCARD: case RedisCommand.ECHO: case RedisCommand.FLUSHALL: + case RedisCommand.HELLO: + case RedisCommand.HOTKEYS: case RedisCommand.INFO: case RedisCommand.LASTSAVE: case RedisCommand.LATENCY: @@ -580,6 +577,9 @@ internal static bool RequiresDatabase(RedisCommand command) case RedisCommand.SLAVEOF: case RedisCommand.SLOWLOG: case RedisCommand.SUBSCRIBE: + case RedisCommand.SPUBLISH: + case RedisCommand.SSUBSCRIBE: + case RedisCommand.SUNSUBSCRIBE: case RedisCommand.SWAPDB: case RedisCommand.SYNC: case RedisCommand.TIME: @@ -591,11 +591,11 @@ internal static bool RequiresDatabase(RedisCommand command) } } - internal static CommandFlags SetMasterReplicaFlags(CommandFlags everything, CommandFlags masterReplica) + internal static CommandFlags SetPrimaryReplicaFlags(CommandFlags everything, CommandFlags primaryReplica) { // take away the two flags we don't want, and add back the ones we care about return (everything & ~(CommandFlags.DemandMaster | CommandFlags.DemandReplica | CommandFlags.PreferMaster | CommandFlags.PreferReplica)) - | masterReplica; + | primaryReplica; } internal void Cancel() => resultBox?.Cancel(); @@ -621,13 +621,13 @@ internal bool ComputeResult(PhysicalConnection connection, in RawResult result) } } - internal void Fail(ConnectionFailureType failure, Exception innerException, string annotation) + internal void Fail(ConnectionFailureType failure, Exception? innerException, string? annotation, ConnectionMultiplexer? muxer) { PhysicalConnection.IdentifyFailureType(innerException, ref failure); - resultProcessor?.ConnectionFail(this, failure, innerException, annotation); + resultProcessor?.ConnectionFail(this, failure, innerException, annotation, muxer); } - internal virtual void SetExceptionAndComplete(Exception exception, PhysicalBridge bridge) + internal virtual void SetExceptionAndComplete(Exception exception, PhysicalBridge? bridge) { resultBox?.SetException(exception); Complete(); @@ -643,14 +643,10 @@ internal bool TrySetResult(T value) return false; } - internal void SetEnqueued(PhysicalConnection connection) + internal void SetEnqueued(PhysicalConnection? connection) { -#if DEBUG - QueuePosition = -1; - ConnectionWriteState = PhysicalConnection.WriteStatus.NA; -#endif SetWriteTime(); - performance?.SetEnqueued(); + performance?.SetEnqueued(connection?.BridgeCouldBeNull?.ConnectionType); _enqueuedTo = connection; if (connection == null) { @@ -662,15 +658,17 @@ internal void SetEnqueued(PhysicalConnection connection) } } - internal void TryGetHeadMessages(out Message now, out Message next) + internal void TryGetHeadMessages(out Message? now, out Message? next) { - var connection = _enqueuedTo; now = next = null; - if (connection != null) connection.GetHeadMessages(out now, out next); + _enqueuedTo?.GetHeadMessages(out now, out next); } - internal bool TryGetPhysicalState(out PhysicalConnection.WriteStatus ws, out PhysicalConnection.ReadStatus rs, - out long sentDelta, out long receivedDelta) + internal bool TryGetPhysicalState( + out PhysicalConnection.WriteStatus ws, + out PhysicalConnection.ReadStatus rs, + out long sentDelta, + out long receivedDelta) { var connection = _enqueuedTo; sentDelta = receivedDelta = -1; @@ -691,7 +689,12 @@ internal bool TryGetPhysicalState(out PhysicalConnection.WriteStatus ws, out Phy } } - private PhysicalConnection _enqueuedTo; + internal bool IsBacklogged => Status == CommandStatus.WaitingInBacklog; + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal void SetBacklogged() => Status = CommandStatus.WaitingInBacklog; + + private PhysicalConnection? _enqueuedTo; private long _queuedStampReceived, _queuedStampSent; internal void SetRequestSent() @@ -704,31 +707,33 @@ internal void SetRequestSent() [MethodImpl(MethodImplOptions.AggressiveInlining)] internal void SetWriteTime() { - if ((Flags & NeedsAsyncTimeoutCheckFlag) != 0) - { - _writeTickCount = Environment.TickCount; // note this might be reset if we resend a message, cluster-moved etc; I'm OK with that - } + _writeTickCount = Environment.TickCount; // note this might be reset if we resend a message, cluster-moved etc; I'm OK with that } private int _writeTickCount; + public int GetWriteTime() => Volatile.Read(ref _writeTickCount); - private void SetNeedsTimeoutCheck() => Flags |= NeedsAsyncTimeoutCheckFlag; - internal bool HasAsyncTimedOut(int now, int timeoutMilliseconds, out int millisecondsTaken) + /// + /// Gets if this command should be sent over the subscription bridge. + /// + internal bool IsForSubscriptionBridge => (Flags & DemandSubscriptionConnection) != 0; + + public virtual string CommandString => Command.ToString(); + + /// + /// Sends this command to the subscription connection rather than the interactive. + /// + internal void SetForSubscriptionBridge() => Flags |= DemandSubscriptionConnection; + + /// + /// Checks if this message has violated the provided timeout. + /// Whether it's a sync operation in a .Wait() or in the backlog queue or written/pending asynchronously, we need to timeout everything. + /// ...or we get indefinite Task hangs for completions. + /// + internal bool HasTimedOut(int now, int timeoutMilliseconds, out int millisecondsTaken) { - if ((Flags & NeedsAsyncTimeoutCheckFlag) != 0) - { - millisecondsTaken = unchecked(now - _writeTickCount); // note: we can't just check "if sent < cutoff" because of wrap-aro - if (millisecondsTaken >= timeoutMilliseconds) - { - Flags &= ~NeedsAsyncTimeoutCheckFlag; // note: we don't remove it from the queue - still might need to marry it up; but: it is toast - return true; - } - } - else - { - millisecondsTaken = default; - } - return false; + millisecondsTaken = unchecked(now - _writeTickCount); // note: we can't just check "if sent < cutoff" because of wrap-around + return millisecondsTaken >= timeoutMilliseconds; } internal void SetAsking(bool value) @@ -737,31 +742,35 @@ internal void SetAsking(bool value) else Flags &= ~AskingFlag; // and the bits taketh away } - internal void SetNoRedirect() - { - Flags |= CommandFlags.NoRedirect; - } + internal void SetNoRedirect() => Flags |= CommandFlags.NoRedirect; - internal void SetPreferMaster() - { - Flags = (Flags & ~MaskMasterServerPreference) | CommandFlags.PreferMaster; - } + internal void SetPreferPrimary() => + Flags = (Flags & ~MaskPrimaryServerPreference) | CommandFlags.PreferMaster; - internal void SetPreferReplica() - { - Flags = (Flags & ~MaskMasterServerPreference) | CommandFlags.PreferReplica; - } + internal void SetPreferReplica() => + Flags = (Flags & ~MaskPrimaryServerPreference) | CommandFlags.PreferReplica; - internal void SetSource(ResultProcessor resultProcessor, IResultBox resultBox) - { // note order here reversed to prevent overload resolution errors - if (resultBox != null && resultBox.IsAsync) SetNeedsTimeoutCheck(); + /// + /// Sets the processor and box for this message to execute. + /// + /// + /// Note order here is reversed to prevent overload resolution errors. + /// + internal void SetSource(ResultProcessor? resultProcessor, IResultBox? resultBox) + { this.resultBox = resultBox; this.resultProcessor = resultProcessor; } - internal void SetSource(IResultBox resultBox, ResultProcessor resultProcessor) + /// + /// Sets the box and processor for this message to execute. + /// + /// + /// Note order here is reversed to prevent overload resolution errors. + /// + /// The type of the result box result. + internal void SetSource(IResultBox resultBox, ResultProcessor? resultProcessor) { - if (resultBox != null && resultBox.IsAsync) SetNeedsTimeoutCheck(); this.resultBox = resultBox; this.resultProcessor = resultProcessor; } @@ -774,16 +783,85 @@ internal void WriteTo(PhysicalConnection physical) { WriteImpl(physical); } - catch (Exception ex) when (!(ex is RedisCommandException)) // these have specific meaning; don't wrap + catch (Exception ex) when (ex is not RedisCommandException) // these have specific meaning; don't wrap { physical?.OnInternalError(ex); - Fail(ConnectionFailureType.InternalFailure, ex, null); + Fail(ConnectionFailureType.InternalFailure, ex, null, physical?.BridgeCouldBeNull?.Multiplexer); + } + } + + private static ReadOnlySpan ChecksumTemplate => "$4\r\nXXXX\r\n"u8; + + internal void WriteHighIntegrityChecksumRequest(PhysicalConnection physical) + { + Debug.Assert(IsHighIntegrity, "should only be used for high-integrity"); + try + { + physical.WriteHeader(RedisCommand.ECHO, 1); // use WriteHeader to allow command-rewrite + + Span chk = stackalloc byte[10]; + Debug.Assert(ChecksumTemplate.Length == chk.Length, "checksum template length error"); + ChecksumTemplate.CopyTo(chk); + BinaryPrimitives.WriteUInt32LittleEndian(chk.Slice(4, 4), _highIntegrityToken); + physical.WriteRaw(chk); + } + catch (Exception ex) + { + physical?.OnInternalError(ex); + Fail(ConnectionFailureType.InternalFailure, ex, null, physical?.BridgeCouldBeNull?.Multiplexer); + } + } + + internal static Message CreateHello(int protocolVersion, string? username, string? password, string? clientName, CommandFlags flags) + => new HelloMessage(protocolVersion, username, password, clientName, flags); + + internal sealed class HelloMessage : Message + { + private readonly string? _username, _password, _clientName; + private readonly int _protocolVersion; + + internal HelloMessage(int protocolVersion, string? username, string? password, string? clientName, CommandFlags flags) + : base(-1, flags, RedisCommand.HELLO) + { + _protocolVersion = protocolVersion; + _username = username; + _password = password; + _clientName = clientName; + } + + public override string CommandAndKey => Command + " " + _protocolVersion; + + public override int ArgCount + { + get + { + int count = 1; // HELLO protover + if (!string.IsNullOrWhiteSpace(_password)) count += 3; // [AUTH username password] + if (!string.IsNullOrWhiteSpace(_clientName)) count += 2; // [SETNAME client] + return count; + } + } + protected override void WriteImpl(PhysicalConnection physical) + { + physical.WriteHeader(Command, ArgCount); + physical.WriteBulkString(_protocolVersion); + if (!string.IsNullOrWhiteSpace(_password)) + { + physical.WriteBulkString("AUTH"u8); + physical.WriteBulkString(string.IsNullOrWhiteSpace(_username) ? RedisLiterals.@default : _username); + physical.WriteBulkString(_password); + } + if (!string.IsNullOrWhiteSpace(_clientName)) + { + physical.WriteBulkString("SETNAME"u8); + physical.WriteBulkString(_clientName); + } } } internal abstract class CommandChannelBase : Message { - protected readonly RedisChannel Channel; + internal readonly RedisChannel Channel; protected CommandChannelBase(int db, CommandFlags flags, RedisCommand command, in RedisChannel channel) : base(db, flags, command) { @@ -792,6 +870,9 @@ protected CommandChannelBase(int db, CommandFlags flags, RedisCommand command, i } public override string CommandAndKey => Command + " " + Channel; + + public override int GetHashSlot(ServerSelectionStrategy serverSelectionStrategy) + => Channel.IsKeyRouted ? serverSelectionStrategy.HashSlot(Channel) : ServerSelectionStrategy.NoSlot; } internal abstract class CommandKeyBase : Message @@ -804,17 +885,15 @@ protected CommandKeyBase(int db, CommandFlags flags, RedisCommand command, in Re Key = key; } - public override string CommandAndKey => Command + " " + (string)Key; + public override string CommandAndKey => Command + " " + (string?)Key; - public override int GetHashSlot(ServerSelectionStrategy serverSelectionStrategy) - { - return serverSelectionStrategy.HashSlot(Key); - } + public override int GetHashSlot(ServerSelectionStrategy serverSelectionStrategy) => serverSelectionStrategy.HashSlot(Key); } private sealed class CommandChannelMessage : CommandChannelBase { - public CommandChannelMessage(int db, CommandFlags flags, RedisCommand command, in RedisChannel channel) : base(db, flags, command, channel) + public CommandChannelMessage(int db, CommandFlags flags, RedisCommand command, in RedisChannel channel) + : base(db, flags, command, channel) { } protected override void WriteImpl(PhysicalConnection physical) { @@ -827,7 +906,8 @@ protected override void WriteImpl(PhysicalConnection physical) private sealed class CommandChannelValueMessage : CommandChannelBase { private readonly RedisValue value; - public CommandChannelValueMessage(int db, CommandFlags flags, RedisCommand command, in RedisChannel channel, in RedisValue value) : base(db, flags, command, channel) + public CommandChannelValueMessage(int db, CommandFlags flags, RedisCommand command, in RedisChannel channel, in RedisValue value) + : base(db, flags, command, channel) { value.AssertNotNull(); this.value = value; @@ -1086,6 +1166,62 @@ protected override void WriteImpl(PhysicalConnection physical) public override int ArgCount => values.Length + 1; } + private sealed class CommandKeyKeyValuesMessage : CommandKeyBase + { + private readonly RedisKey key1; + private readonly RedisValue[] values; + public CommandKeyKeyValuesMessage(int db, CommandFlags flags, RedisCommand command, in RedisKey key, in RedisKey key1, RedisValue[] values) : base(db, flags, command, key) + { + for (int i = 0; i < values.Length; i++) + { + values[i].AssertNotNull(); + } + + key1.AssertNotNull(); + this.key1 = key1; + this.values = values; + } + + protected override void WriteImpl(PhysicalConnection physical) + { + physical.WriteHeader(Command, values.Length + 2); + physical.Write(Key); + physical.Write(key1); + for (int i = 0; i < values.Length; i++) physical.WriteBulkString(values[i]); + } + public override int ArgCount => values.Length + 1; + } + + private sealed class CommandKeyValueValueValuesMessage : CommandKeyBase + { + private readonly RedisValue value0; + private readonly RedisValue value1; + private readonly RedisValue[] values; + public CommandKeyValueValueValuesMessage(int db, CommandFlags flags, RedisCommand command, in RedisKey key, in RedisValue value0, in RedisValue value1, RedisValue[] values) : base(db, flags, command, key) + { + for (int i = 0; i < values.Length; i++) + { + values[i].AssertNotNull(); + } + + value0.AssertNotNull(); + value1.AssertNotNull(); + this.value0 = value0; + this.value1 = value1; + this.values = values; + } + + protected override void WriteImpl(PhysicalConnection physical) + { + physical.WriteHeader(Command, values.Length + 3); + physical.Write(Key); + physical.WriteBulkString(value0); + physical.WriteBulkString(value1); + for (int i = 0; i < values.Length; i++) physical.WriteBulkString(values[i]); + } + public override int ArgCount => values.Length + 3; + } + private sealed class CommandKeyValueValueMessage : CommandKeyBase { private readonly RedisValue value0, value1; @@ -1158,6 +1294,371 @@ protected override void WriteImpl(PhysicalConnection physical) public override int ArgCount => 5; } + private sealed class CommandKeyValueValueValueValueValueMessage : CommandKeyBase + { + private readonly RedisValue value0, value1, value2, value3, value4; + public CommandKeyValueValueValueValueValueMessage(int db, CommandFlags flags, RedisCommand command, in RedisKey key, in RedisValue value0, in RedisValue value1, in RedisValue value2, in RedisValue value3, in RedisValue value4) : base(db, flags, command, key) + { + value0.AssertNotNull(); + value1.AssertNotNull(); + value2.AssertNotNull(); + value3.AssertNotNull(); + value4.AssertNotNull(); + this.value0 = value0; + this.value1 = value1; + this.value2 = value2; + this.value3 = value3; + this.value4 = value4; + } + + protected override void WriteImpl(PhysicalConnection physical) + { + physical.WriteHeader(Command, 6); + physical.Write(Key); + physical.WriteBulkString(value0); + physical.WriteBulkString(value1); + physical.WriteBulkString(value2); + physical.WriteBulkString(value3); + physical.WriteBulkString(value4); + } + public override int ArgCount => 6; + } + + private sealed class CommandKeyValueValueValueValueValueValueMessage : CommandKeyBase + { + private readonly RedisValue value0, value1, value2, value3, value4, value5; + + public CommandKeyValueValueValueValueValueValueMessage(int db, CommandFlags flags, RedisCommand command, in RedisKey key, in RedisValue value0, in RedisValue value1, in RedisValue value2, in RedisValue value3, in RedisValue value4, in RedisValue value5) : base(db, flags, command, key) + { + value0.AssertNotNull(); + value1.AssertNotNull(); + value2.AssertNotNull(); + value3.AssertNotNull(); + value4.AssertNotNull(); + value5.AssertNotNull(); + this.value0 = value0; + this.value1 = value1; + this.value2 = value2; + this.value3 = value3; + this.value4 = value4; + this.value5 = value5; + } + + protected override void WriteImpl(PhysicalConnection physical) + { + physical.WriteHeader(Command, ArgCount); + physical.Write(Key); + physical.WriteBulkString(value0); + physical.WriteBulkString(value1); + physical.WriteBulkString(value2); + physical.WriteBulkString(value3); + physical.WriteBulkString(value4); + physical.WriteBulkString(value5); + } + public override int ArgCount => 7; + } + + private sealed class CommandKeyValueValueValueValueValueValueValueMessage : CommandKeyBase + { + private readonly RedisValue value0, value1, value2, value3, value4, value5, value6; + + public CommandKeyValueValueValueValueValueValueValueMessage(int db, CommandFlags flags, RedisCommand command, in RedisKey key, in RedisValue value0, in RedisValue value1, in RedisValue value2, in RedisValue value3, in RedisValue value4, in RedisValue value5, in RedisValue value6) : base(db, flags, command, key) + { + value0.AssertNotNull(); + value1.AssertNotNull(); + value2.AssertNotNull(); + value3.AssertNotNull(); + value4.AssertNotNull(); + value5.AssertNotNull(); + value6.AssertNotNull(); + this.value0 = value0; + this.value1 = value1; + this.value2 = value2; + this.value3 = value3; + this.value4 = value4; + this.value5 = value5; + this.value6 = value6; + } + + protected override void WriteImpl(PhysicalConnection physical) + { + physical.WriteHeader(Command, ArgCount); + physical.Write(Key); + physical.WriteBulkString(value0); + physical.WriteBulkString(value1); + physical.WriteBulkString(value2); + physical.WriteBulkString(value3); + physical.WriteBulkString(value4); + physical.WriteBulkString(value5); + physical.WriteBulkString(value6); + } + public override int ArgCount => 8; + } + + private sealed class CommandKeyKeyValueValueMessage : CommandKeyBase + { + private readonly RedisValue value0, value1; + private readonly RedisKey key1; + + public CommandKeyKeyValueValueMessage( + int db, + CommandFlags flags, + RedisCommand command, + in RedisKey key0, + in RedisKey key1, + in RedisValue value0, + in RedisValue value1) : base(db, flags, command, key0) + { + key1.AssertNotNull(); + value0.AssertNotNull(); + value1.AssertNotNull(); + this.key1 = key1; + this.value0 = value0; + this.value1 = value1; + } + + protected override void WriteImpl(PhysicalConnection physical) + { + physical.WriteHeader(Command, ArgCount); + physical.Write(Key); + physical.Write(key1); + physical.WriteBulkString(value0); + physical.WriteBulkString(value1); + } + + public override int ArgCount => 4; + } + + private sealed class CommandKeyKeyValueValueValueMessage : CommandKeyBase + { + private readonly RedisValue value0, value1, value2; + private readonly RedisKey key1; + + public CommandKeyKeyValueValueValueMessage( + int db, + CommandFlags flags, + RedisCommand command, + in RedisKey key0, + in RedisKey key1, + in RedisValue value0, + in RedisValue value1, + in RedisValue value2) : base(db, flags, command, key0) + { + key1.AssertNotNull(); + value0.AssertNotNull(); + value1.AssertNotNull(); + value2.AssertNotNull(); + this.key1 = key1; + this.value0 = value0; + this.value1 = value1; + this.value2 = value2; + } + + protected override void WriteImpl(PhysicalConnection physical) + { + physical.WriteHeader(Command, ArgCount); + physical.Write(Key); + physical.Write(key1); + physical.WriteBulkString(value0); + physical.WriteBulkString(value1); + physical.WriteBulkString(value2); + } + + public override int ArgCount => 5; + } + + private sealed class CommandKeyKeyValueValueValueValueMessage : CommandKeyBase + { + private readonly RedisValue value0, value1, value2, value3; + private readonly RedisKey key1; + + public CommandKeyKeyValueValueValueValueMessage( + int db, + CommandFlags flags, + RedisCommand command, + in RedisKey key0, + in RedisKey key1, + in RedisValue value0, + in RedisValue value1, + in RedisValue value2, + in RedisValue value3) : base(db, flags, command, key0) + { + key1.AssertNotNull(); + value0.AssertNotNull(); + value1.AssertNotNull(); + value2.AssertNotNull(); + value3.AssertNotNull(); + this.key1 = key1; + this.value0 = value0; + this.value1 = value1; + this.value2 = value2; + this.value3 = value3; + } + + protected override void WriteImpl(PhysicalConnection physical) + { + physical.WriteHeader(Command, ArgCount); + physical.Write(Key); + physical.Write(key1); + physical.WriteBulkString(value0); + physical.WriteBulkString(value1); + physical.WriteBulkString(value2); + physical.WriteBulkString(value3); + } + + public override int ArgCount => 6; + } + + private sealed class CommandKeyKeyValueValueValueValueValueMessage : CommandKeyBase + { + private readonly RedisValue value0, value1, value2, value3, value4; + private readonly RedisKey key1; + + public CommandKeyKeyValueValueValueValueValueMessage( + int db, + CommandFlags flags, + RedisCommand command, + in RedisKey key0, + in RedisKey key1, + in RedisValue value0, + in RedisValue value1, + in RedisValue value2, + in RedisValue value3, + in RedisValue value4) : base(db, flags, command, key0) + { + key1.AssertNotNull(); + value0.AssertNotNull(); + value1.AssertNotNull(); + value2.AssertNotNull(); + value3.AssertNotNull(); + value4.AssertNotNull(); + this.key1 = key1; + this.value0 = value0; + this.value1 = value1; + this.value2 = value2; + this.value3 = value3; + this.value4 = value4; + } + + protected override void WriteImpl(PhysicalConnection physical) + { + physical.WriteHeader(Command, ArgCount); + physical.Write(Key); + physical.Write(key1); + physical.WriteBulkString(value0); + physical.WriteBulkString(value1); + physical.WriteBulkString(value2); + physical.WriteBulkString(value3); + physical.WriteBulkString(value4); + } + + public override int ArgCount => 7; + } + + private sealed class CommandKeyKeyValueValueValueValueValueValueMessage : CommandKeyBase + { + private readonly RedisValue value0, value1, value2, value3, value4, value5; + private readonly RedisKey key1; + + public CommandKeyKeyValueValueValueValueValueValueMessage( + int db, + CommandFlags flags, + RedisCommand command, + in RedisKey key0, + in RedisKey key1, + in RedisValue value0, + in RedisValue value1, + in RedisValue value2, + in RedisValue value3, + in RedisValue value4, + in RedisValue value5) : base(db, flags, command, key0) + { + key1.AssertNotNull(); + value0.AssertNotNull(); + value1.AssertNotNull(); + value2.AssertNotNull(); + value3.AssertNotNull(); + value4.AssertNotNull(); + value5.AssertNotNull(); + this.key1 = key1; + this.value0 = value0; + this.value1 = value1; + this.value2 = value2; + this.value3 = value3; + this.value4 = value4; + this.value5 = value5; + } + + protected override void WriteImpl(PhysicalConnection physical) + { + physical.WriteHeader(Command, ArgCount); + physical.Write(Key); + physical.Write(key1); + physical.WriteBulkString(value0); + physical.WriteBulkString(value1); + physical.WriteBulkString(value2); + physical.WriteBulkString(value3); + physical.WriteBulkString(value4); + physical.WriteBulkString(value5); + } + + public override int ArgCount => 8; + } + + private sealed class CommandKeyKeyValueValueValueValueValueValueValueMessage : CommandKeyBase + { + private readonly RedisValue value0, value1, value2, value3, value4, value5, value6; + private readonly RedisKey key1; + + public CommandKeyKeyValueValueValueValueValueValueValueMessage( + int db, + CommandFlags flags, + RedisCommand command, + in RedisKey key0, + in RedisKey key1, + in RedisValue value0, + in RedisValue value1, + in RedisValue value2, + in RedisValue value3, + in RedisValue value4, + in RedisValue value5, + in RedisValue value6) : base(db, flags, command, key0) + { + key1.AssertNotNull(); + value0.AssertNotNull(); + value1.AssertNotNull(); + value2.AssertNotNull(); + value3.AssertNotNull(); + value4.AssertNotNull(); + value5.AssertNotNull(); + value6.AssertNotNull(); + this.key1 = key1; + this.value0 = value0; + this.value1 = value1; + this.value2 = value2; + this.value3 = value3; + this.value4 = value4; + this.value5 = value5; + this.value6 = value6; + } + + protected override void WriteImpl(PhysicalConnection physical) + { + physical.WriteHeader(Command, ArgCount); + physical.Write(Key); + physical.Write(key1); + physical.WriteBulkString(value0); + physical.WriteBulkString(value1); + physical.WriteBulkString(value2); + physical.WriteBulkString(value3); + physical.WriteBulkString(value4); + physical.WriteBulkString(value5); + physical.WriteBulkString(value6); + } + + public override int ArgCount => 9; + } + private sealed class CommandMessage : Message { public CommandMessage(int db, CommandFlags flags, RedisCommand command) : base(db, flags, command) { } @@ -1168,7 +1669,7 @@ protected override void WriteImpl(PhysicalConnection physical) public override int ArgCount => 0; } - private class CommandSlotValuesMessage : Message + private sealed class CommandSlotValuesMessage : Message { private readonly int slot; private readonly RedisValue[] values; @@ -1184,26 +1685,73 @@ public CommandSlotValuesMessage(int db, int slot, CommandFlags flags, RedisComma this.values = values; } + public override int GetHashSlot(ServerSelectionStrategy serverSelectionStrategy) => slot; + + protected override void WriteImpl(PhysicalConnection physical) + { + physical.WriteHeader(command, values.Length); + for (int i = 0; i < values.Length; i++) + { + physical.WriteBulkString(values[i]); + } + } + public override int ArgCount => values.Length; + } + + private sealed class MultiSetMessage(int db, CommandFlags flags, RedisCommand command, KeyValuePair[] values, Expiration expiry, When when) : Message(db, flags, command) + { public override int GetHashSlot(ServerSelectionStrategy serverSelectionStrategy) { + int slot = ServerSelectionStrategy.NoSlot; + for (int i = 0; i < values.Length; i++) + { + slot = serverSelectionStrategy.CombineSlot(slot, values[i].Key); + } return slot; } + // we support: + // - MSET {key1} {value1} [{key2} {value2}...] + // - MSETNX {key1} {value1} [{key2} {value2}...] + // - MSETEX {count} {key1} {value1} [{key2} {value2}...] [standard-expiry-tokens] + public override int ArgCount => Command == RedisCommand.MSETEX + ? (1 + (2 * values.Length) + expiry.TokenCount + (when is When.Exists or When.NotExists ? 1 : 0)) + : (2 * values.Length); // MSET/MSETNX only support simple syntax + protected override void WriteImpl(PhysicalConnection physical) { - physical.WriteHeader(command, values.Length); + var cmd = Command; + physical.WriteHeader(cmd, ArgCount); + if (cmd == RedisCommand.MSETEX) // need count prefix + { + physical.WriteBulkString(values.Length); + } for (int i = 0; i < values.Length; i++) { - physical.WriteBulkString(values[i]); + physical.Write(values[i].Key); + physical.WriteBulkString(values[i].Value); + } + if (cmd == RedisCommand.MSETEX) // allow expiry/mode tokens + { + expiry.WriteTo(physical); + switch (when) + { + case When.Exists: + physical.WriteBulkString("XX"u8); + break; + case When.NotExists: + physical.WriteBulkString("NX"u8); + break; + } } } - public override int ArgCount => values.Length; } private sealed class CommandValueChannelMessage : CommandChannelBase { private readonly RedisValue value; - public CommandValueChannelMessage(int db, CommandFlags flags, RedisCommand command, in RedisValue value, in RedisChannel channel) : base(db, flags, command, channel) + public CommandValueChannelMessage(int db, CommandFlags flags, RedisCommand command, in RedisValue value, in RedisChannel channel) + : base(db, flags, command, channel) { value.AssertNotNull(); this.value = value; @@ -1231,7 +1779,7 @@ public CommandValueKeyMessage(int db, CommandFlags flags, RedisCommand command, public override void AppendStormLog(StringBuilder sb) { base.AppendStormLog(sb); - sb.Append(" (").Append((string)value).Append(')'); + sb.Append(" (").Append((string?)value).Append(')'); } protected override void WriteImpl(PhysicalConnection physical) @@ -1345,5 +1893,15 @@ protected override void WriteImpl(PhysicalConnection physical) } public override int ArgCount => 1; } + + // this is a placeholder message for use when (for example) unable to queue the + // connection queue due to a lock timeout + internal sealed class UnknownMessage : Message + { + public static UnknownMessage Instance { get; } = new(); + private UnknownMessage() : base(0, CommandFlags.None, RedisCommand.UNKNOWN) { } + public override int ArgCount => 0; + protected override void WriteImpl(PhysicalConnection physical) => throw new InvalidOperationException("This message cannot be written"); + } } } diff --git a/src/StackExchange.Redis/MessageCompletable.cs b/src/StackExchange.Redis/MessageCompletable.cs index 481477a0a..8f4737943 100644 --- a/src/StackExchange.Redis/MessageCompletable.cs +++ b/src/StackExchange.Redis/MessageCompletable.cs @@ -18,7 +18,7 @@ public MessageCompletable(RedisChannel channel, RedisValue message, Action (string)channel; + public override string? ToString() => (string?)channel; public bool TryComplete(bool isAsync) { @@ -26,7 +26,7 @@ public bool TryComplete(bool isAsync) { if (handler != null) { - ConnectionMultiplexer.TraceWithoutContext("Invoking (async)...: " + (string)channel, "Subscription"); + ConnectionMultiplexer.TraceWithoutContext("Invoking (async)...: " + (string?)channel, "Subscription"); if (handler.IsSingle()) { try { handler(channel, message); } catch { } @@ -48,9 +48,6 @@ public bool TryComplete(bool isAsync) } } - void ICompletable.AppendStormLog(StringBuilder sb) - { - sb.Append("event, pub/sub: ").Append((string)channel); - } + void ICompletable.AppendStormLog(StringBuilder sb) => sb.Append("event, pub/sub: ").Append((string?)channel); } } diff --git a/src/StackExchange.Redis/NameValueEntry.cs b/src/StackExchange.Redis/NameValueEntry.cs deleted file mode 100644 index 7b2cee98e..000000000 --- a/src/StackExchange.Redis/NameValueEntry.cs +++ /dev/null @@ -1,84 +0,0 @@ -using System; -using System.Collections.Generic; - -namespace StackExchange.Redis -{ - /// - /// Describes a value contained in a stream (a name/value pair). - /// - public readonly struct NameValueEntry : IEquatable - { - internal readonly RedisValue name, value; - - /// - /// Initializes a value. - /// - /// The name for this entry. - /// The value for this entry. - public NameValueEntry(RedisValue name, RedisValue value) - { - this.name = name; - this.value = value; - } - - /// - /// The name of the field. - /// - public RedisValue Name => name; - - /// - /// The value of the field. - /// - public RedisValue Value => value; - - /// - /// Converts to a key/value pair - /// - /// The to create a from. - public static implicit operator KeyValuePair(NameValueEntry value) => - new KeyValuePair(value.name, value.value); - - /// - /// Converts from a key/value pair - /// - /// The to get a from. - public static implicit operator NameValueEntry(KeyValuePair value) => - new NameValueEntry(value.Key, value.Value); - - /// - /// See Object.ToString() - /// - public override string ToString() => name + ": " + value; - - /// - /// See Object.GetHashCode() - /// - public override int GetHashCode() => name.GetHashCode() ^ value.GetHashCode(); - - /// - /// Compares two values for equality. - /// - /// The to compare to. - public override bool Equals(object obj) => obj is NameValueEntry heObj && Equals(heObj); - - /// - /// Compares two values for equality. - /// - /// The to compare to. - public bool Equals(NameValueEntry other) => name == other.name && value == other.value; - - /// - /// Compares two values for equality - /// - /// The first to compare. - /// The second to compare. - public static bool operator ==(NameValueEntry x, NameValueEntry y) => x.name == y.name && x.value == y.value; - - /// - /// Compares two values for non-equality - /// - /// The first to compare. - /// The second to compare. - public static bool operator !=(NameValueEntry x, NameValueEntry y) => x.name != y.name || x.value != y.value; - } -} diff --git a/src/StackExchange.Redis/NullableHacks.cs b/src/StackExchange.Redis/NullableHacks.cs new file mode 100644 index 000000000..4ebebf73b --- /dev/null +++ b/src/StackExchange.Redis/NullableHacks.cs @@ -0,0 +1,148 @@ +// https://github.com/dotnet/runtime/blob/527f9ae88a0ee216b44d556f9bdc84037fe0ebda/src/libraries/System.Private.CoreLib/src/System/Diagnostics/CodeAnalysis/NullableAttributes.cs + +#pragma warning disable +#define INTERNAL_NULLABLE_ATTRIBUTES + +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +namespace System.Diagnostics.CodeAnalysis +{ +#if !NET + /// Specifies that null is allowed as an input even if the corresponding type disallows it. + [AttributeUsage(AttributeTargets.Field | AttributeTargets.Parameter | AttributeTargets.Property, Inherited = false)] + internal sealed class AllowNullAttribute : Attribute { } + + /// Specifies that null is disallowed as an input even if the corresponding type allows it. + [AttributeUsage(AttributeTargets.Field | AttributeTargets.Parameter | AttributeTargets.Property, Inherited = false)] + internal sealed class DisallowNullAttribute : Attribute { } + + /// Specifies that an output may be null even if the corresponding type disallows it. + [AttributeUsage(AttributeTargets.Field | AttributeTargets.Parameter | AttributeTargets.Property | AttributeTargets.ReturnValue, Inherited = false)] + internal sealed class MaybeNullAttribute : Attribute { } + + /// Specifies that an output will not be null even if the corresponding type allows it. Specifies that an input argument was not null when the call returns. + [AttributeUsage(AttributeTargets.Field | AttributeTargets.Parameter | AttributeTargets.Property | AttributeTargets.ReturnValue, Inherited = false)] + internal sealed class NotNullAttribute : Attribute { } + + /// Specifies that when a method returns , the parameter may be null even if the corresponding type disallows it. + [AttributeUsage(AttributeTargets.Parameter, Inherited = false)] + internal sealed class MaybeNullWhenAttribute : Attribute + { + /// Initializes the attribute with the specified return value condition. + /// + /// The return value condition. If the method returns this value, the associated parameter may be null. + /// + public MaybeNullWhenAttribute(bool returnValue) => ReturnValue = returnValue; + + /// Gets the return value condition. + public bool ReturnValue { get; } + } + + /// Specifies that when a method returns , the parameter will not be null even if the corresponding type allows it. + [AttributeUsage(AttributeTargets.Parameter, Inherited = false)] + internal sealed class NotNullWhenAttribute : Attribute + { + /// Initializes the attribute with the specified return value condition. + /// + /// The return value condition. If the method returns this value, the associated parameter will not be null. + /// + public NotNullWhenAttribute(bool returnValue) => ReturnValue = returnValue; + + /// Gets the return value condition. + public bool ReturnValue { get; } + } + + /// Specifies that the output will be non-null if the named parameter is non-null. + [AttributeUsage(AttributeTargets.Parameter | AttributeTargets.Property | AttributeTargets.ReturnValue, AllowMultiple = true, Inherited = false)] + internal sealed class NotNullIfNotNullAttribute : Attribute + { + /// Initializes the attribute with the associated parameter name. + /// + /// The associated parameter name. The output will be non-null if the argument to the parameter specified is non-null. + /// + public NotNullIfNotNullAttribute(string parameterName) => ParameterName = parameterName; + + /// Gets the associated parameter name. + public string ParameterName { get; } + } + + /// Applied to a method that will never return under any circumstance. + [AttributeUsage(AttributeTargets.Method, Inherited = false)] + internal sealed class DoesNotReturnAttribute : Attribute { } + + /// Specifies that the method will not return if the associated Boolean parameter is passed the specified value. + [AttributeUsage(AttributeTargets.Parameter, Inherited = false)] + internal sealed class DoesNotReturnIfAttribute : Attribute + { + /// Initializes the attribute with the specified parameter value. + /// + /// The condition parameter value. Code after the method will be considered unreachable by diagnostics if the argument to + /// the associated parameter matches this value. + /// + public DoesNotReturnIfAttribute(bool parameterValue) => ParameterValue = parameterValue; + + /// Gets the condition parameter value. + public bool ParameterValue { get; } + } +#endif + +#if !NET + /// Specifies that the method or property will ensure that the listed field and property members have not-null values. + [AttributeUsage(AttributeTargets.Method | AttributeTargets.Property, Inherited = false, AllowMultiple = true)] + internal sealed class MemberNotNullAttribute : Attribute + { + /// Initializes the attribute with a field or property member. + /// + /// The field or property member that is promised to be not-null. + /// + public MemberNotNullAttribute(string member) => Members = new[] { member }; + + /// Initializes the attribute with the list of field and property members. + /// + /// The list of field and property members that are promised to be not-null. + /// + public MemberNotNullAttribute(params string[] members) => Members = members; + + /// Gets field or property member names. + public string[] Members { get; } + } + + /// Specifies that the method or property will ensure that the listed field and property members have not-null values when returning with the specified return value condition. + [AttributeUsage(AttributeTargets.Method | AttributeTargets.Property, Inherited = false, AllowMultiple = true)] + internal sealed class MemberNotNullWhenAttribute : Attribute + { + /// Initializes the attribute with the specified return value condition and a field or property member. + /// + /// The return value condition. If the method returns this value, the associated parameter will not be null. + /// + /// + /// The field or property member that is promised to be not-null. + /// + public MemberNotNullWhenAttribute(bool returnValue, string member) + { + ReturnValue = returnValue; + Members = new[] { member }; + } + + /// Initializes the attribute with the specified return value condition and list of field and property members. + /// + /// The return value condition. If the method returns this value, the associated parameter will not be null. + /// + /// + /// The list of field and property members that are promised to be not-null. + /// + public MemberNotNullWhenAttribute(bool returnValue, params string[] members) + { + ReturnValue = returnValue; + Members = members; + } + + /// Gets the return value condition. + public bool ReturnValue { get; } + + /// Gets field or property member names. + public string[] Members { get; } + } +#endif +} diff --git a/src/StackExchange.Redis/Obsoletions.cs b/src/StackExchange.Redis/Obsoletions.cs new file mode 100644 index 000000000..44ede249b --- /dev/null +++ b/src/StackExchange.Redis/Obsoletions.cs @@ -0,0 +1,7 @@ +namespace StackExchange.Redis; + +internal static class Obsoletions +{ + public const string LegacyFormatterImplMessage = "This API supports obsolete formatter-based serialization. It should not be called or extended by application code."; + public const string LegacyFormatterImplDiagId = "SYSLIB0051"; +} diff --git a/src/StackExchange.Redis/PerfCounterHelper.cs b/src/StackExchange.Redis/PerfCounterHelper.cs index e4f608d76..763c86f04 100644 --- a/src/StackExchange.Redis/PerfCounterHelper.cs +++ b/src/StackExchange.Redis/PerfCounterHelper.cs @@ -1,71 +1,16 @@ -using System; -using System.Diagnostics; -using System.Runtime.InteropServices; -using System.Threading; +using System.Threading; namespace StackExchange.Redis { -#pragma warning disable CA1416 // windows only APIs; we've guarded against that internal static class PerfCounterHelper { - private static readonly object staticLock = new object(); - private static volatile PerformanceCounter _cpu; - private static volatile bool _disabled = !RuntimeInformation.IsOSPlatform(OSPlatform.Windows); - - public static bool TryGetSystemCPU(out float value) - { - value = -1; - - try - { - if (!_disabled && _cpu == null) - { - lock (staticLock) - { - if (_cpu == null) - { - _cpu = new PerformanceCounter("Processor", "% Processor Time", "_Total"); - - // First call always returns 0, so get that out of the way. - _cpu.NextValue(); - } - } - } - } - catch (UnauthorizedAccessException) - { - // Some environments don't allow access to Performance Counters, so stop trying. - _disabled = true; - } - catch (Exception e) - { - // this shouldn't happen, but just being safe... - Trace.WriteLine(e); - } - - if (!_disabled && _cpu != null) - { - value = _cpu.NextValue(); - return true; - } - return false; - } - - internal static string GetThreadPoolAndCPUSummary(bool includePerformanceCounters) - { - GetThreadPoolStats(out string iocp, out string worker); - var cpu = includePerformanceCounters ? GetSystemCpuPercent() : "n/a"; - return $"IOCP: {iocp}, WORKER: {worker}, Local-CPU: {cpu}"; - } - - internal static string GetSystemCpuPercent() + internal static string GetThreadPoolAndCPUSummary() { - return TryGetSystemCPU(out float systemCPU) - ? Math.Round(systemCPU, 2) + "%" - : "unavailable"; + GetThreadPoolStats(out string iocp, out string worker, out string? workItems); + return $"IOCP: {iocp}, WORKER: {worker}, POOL: {workItems ?? "n/a"}"; } - internal static int GetThreadPoolStats(out string iocp, out string worker) + internal static int GetThreadPoolStats(out string iocp, out string worker, out string? workItems) { ThreadPool.GetMaxThreads(out int maxWorkerThreads, out int maxIoThreads); ThreadPool.GetAvailableThreads(out int freeWorkerThreads, out int freeIoThreads); @@ -76,8 +21,14 @@ internal static int GetThreadPoolStats(out string iocp, out string worker) iocp = $"(Busy={busyIoThreads},Free={freeIoThreads},Min={minIoThreads},Max={maxIoThreads})"; worker = $"(Busy={busyWorkerThreads},Free={freeWorkerThreads},Min={minWorkerThreads},Max={maxWorkerThreads})"; + +#if NET + workItems = $"(Threads={ThreadPool.ThreadCount},QueuedItems={ThreadPool.PendingWorkItemCount},CompletedItems={ThreadPool.CompletedWorkItemCount},Timers={Timer.ActiveCount})"; +#else + workItems = null; +#endif + return busyWorkerThreads; } } -#pragma warning restore CA1416 // windows only APIs; we've guarded against that } diff --git a/src/StackExchange.Redis/PhysicalBridge.cs b/src/StackExchange.Redis/PhysicalBridge.cs index 6036c561a..36d8268bf 100644 --- a/src/StackExchange.Redis/PhysicalBridge.cs +++ b/src/StackExchange.Redis/PhysicalBridge.cs @@ -2,15 +2,16 @@ using System.Collections.Concurrent; using System.Collections.Generic; using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; using System.Runtime.CompilerServices; using System.Text; using System.Threading; -using System.Threading.Channels; using System.Threading.Tasks; +using Microsoft.Extensions.Logging; +#if !NETCOREAPP using Pipelines.Sockets.Unofficial.Threading; using static Pipelines.Sockets.Unofficial.Threading.MutexSlim; -using static StackExchange.Redis.ConnectionMultiplexer; -using PendingSubscriptionState = global::StackExchange.Redis.ConnectionMultiplexer.Subscription.PendingSubscriptionState; +#endif namespace StackExchange.Redis { @@ -20,24 +21,39 @@ internal sealed class PhysicalBridge : IDisposable private const int ProfileLogSamples = 10; - private const double ProfileLogSeconds = (ConnectionMultiplexer.MillisecondsPerHeartbeat * ProfileLogSamples) / 1000.0; + private const double ProfileLogSeconds = (1000 /* ms */ * ProfileLogSamples) / 1000.0; private static readonly Message ReusableAskingCommand = Message.Create(-1, CommandFlags.FireAndForget, RedisCommand.ASKING); private readonly long[] profileLog = new long[ProfileLogSamples]; - private readonly ConcurrentQueue _backlog = new ConcurrentQueue(); + /// + /// We have 1 queue in play on this bridge. + /// We're bypassing the queue for handshake events that go straight to the socket. + /// Everything else that's not an internal call goes into the queue if there is a queue. + /// + /// + /// In a later release we want to remove per-server events from this queue completely and shunt queued messages + /// to another capable primary connection if one is available to process them faster (order is already hosed). + /// For now, simplicity in: queue it all, replay or timeout it all. + /// + private readonly ConcurrentQueue _backlog = new(); + private bool BacklogHasItems => !_backlog.IsEmpty; private int _backlogProcessorIsRunning = 0; + private int _backlogCurrentEnqueued = 0; + private long _backlogTotalEnqueued = 0; private int activeWriters = 0; private int beating; private int failConnectCount = 0; private volatile bool isDisposed; + private volatile bool shouldResetConnectionRetryCount; + private bool _needsReconnect; private long nonPreferredEndpointCount; - //private volatile int missedHeartbeats; + // private volatile int missedHeartbeats; private long operationCount, socketCount; - private volatile PhysicalConnection physical; + private volatile PhysicalConnection? physical; private long profileLastLog; private int profileLogIndex; @@ -46,7 +62,20 @@ internal sealed class PhysicalBridge : IDisposable private volatile int state = (int)State.Disconnected; - internal string PhysicalName => physical?.ToString(); + internal long? ConnectionId => physical?.ConnectionId; + +#if NET + private readonly SemaphoreSlim _singleWriterMutex = new(1, 1); +#else + private readonly MutexSlim _singleWriterMutex; +#endif + + internal string? PhysicalName => physical?.ToString(); + + private uint _nextHighIntegrityToken; // zero means not enabled + + public DateTime? ConnectedAt { get; private set; } + public PhysicalBridge(ServerEndPoint serverEndPoint, ConnectionType type, int timeoutMilliseconds) { ServerEndPoint = serverEndPoint; @@ -54,7 +83,14 @@ public PhysicalBridge(ServerEndPoint serverEndPoint, ConnectionType type, int ti Multiplexer = serverEndPoint.Multiplexer; Name = Format.ToString(serverEndPoint.EndPoint) + "/" + ConnectionType.ToString(); TimeoutMilliseconds = timeoutMilliseconds; +#if !NETCOREAPP _singleWriterMutex = new MutexSlim(timeoutMilliseconds: timeoutMilliseconds); +#endif + if (type == ConnectionType.Interactive && Multiplexer.RawConfig.HighIntegrity) + { + // we just need this to be non-zero to enable tracking + _nextHighIntegrityToken = 1; + } } private readonly int TimeoutMilliseconds; @@ -64,10 +100,10 @@ public enum State : byte Connecting, ConnectedEstablishing, ConnectedEstablished, - Disconnected + Disconnected, } - public Exception LastException { get; private set; } + public Exception? LastException { get; private set; } public ConnectionType ConnectionType { get; } @@ -79,14 +115,7 @@ public enum State : byte public ServerEndPoint ServerEndPoint { get; } - public long SubscriptionCount - { - get - { - var tmp = physical; - return tmp == null ? 0 : physical.SubscriptionCount; - } - } + public long SubscriptionCount => physical?.SubscriptionCount ?? 0; internal State ConnectionState => (State)state; internal bool IsBeating => Interlocked.CompareExchange(ref beating, 0, 0) == 1; @@ -95,20 +124,49 @@ public long SubscriptionCount public RedisCommand LastCommand { get; private set; } + /// + /// If we have (or had) a connection, report the protocol being used. + /// + /// The value remains after disconnect, so that appropriate follow-up actions (pub/sub etc) can work reliably. + public RedisProtocol? Protocol => _protocol == 0 ? default(RedisProtocol?) : _protocol; + private RedisProtocol _protocol; // note starts at zero, not RESP2 + internal void SetProtocol(RedisProtocol protocol) => _protocol = protocol; + + /// + /// Indicates whether the bridge needs to reconnect. + /// + internal bool NeedsReconnect => Volatile.Read(ref _needsReconnect); + + /// + /// Marks that the bridge needs to reconnect. + /// + internal void MarkNeedsReconnect() => Volatile.Write(ref _needsReconnect, true); + public void Dispose() { isDisposed = true; - ShutdownSubscriptionQueue(); + // If there's anything in the backlog and we're being torn down - exfil it immediately (e.g. so all awaitables complete) + AbandonPendingBacklog(new ObjectDisposedException("Connection is being disposed")); + try + { + _backlogAutoReset?.Set(); + _backlogAutoReset?.Dispose(); + } + catch { } using (var tmp = physical) { physical = null; } GC.SuppressFinalize(this); } + ~PhysicalBridge() { isDisposed = true; // make damn sure we don't true to resurrect + // If there's anything in the backlog and we're being torn down - exfil it immediately (e.g. so all awaitables complete) + AbandonPendingBacklog(new ObjectDisposedException("Connection is being finalized")); + // shouldn't *really* touch managed objects // in a finalizer, but we need to kill that socket, // and this is the first place that isn't going to @@ -130,29 +188,31 @@ public void ReportNextFailure() private WriteResult QueueOrFailMessage(Message message) { - if (message.IsInternalCall && message.Command != RedisCommand.QUIT) - { - // you can go in the queue, but we won't be starting - // a worker, because the handshake has not completed - message.SetEnqueued(null); - message.SetBacklogState(_backlog.Count, null); - _backlog.Enqueue(message); - return WriteResult.Success; // we'll take it... - } - else + // If it's an internal call that's not a QUIT + // or we're allowed to queue in general, then queue + if (message.IsInternalCall || Multiplexer.RawConfig.BacklogPolicy.QueueWhileDisconnected) { - // sorry, we're just not ready for you yet; - message.Cancel(); - Multiplexer?.OnMessageFaulted(message, null); - message.Complete(); - return WriteResult.NoConnectionAvailable; + // Let's just never ever queue a QUIT message + if (message.Command != RedisCommand.QUIT) + { + message.SetEnqueued(null); + BacklogEnqueue(message); + // Note: we don't start a worker on each message here + return WriteResult.Success; // Successfully queued, so indicate success + } } + + // Anything else goes in the bin - we're just not ready for you yet + message.Cancel(); + Multiplexer.OnMessageFaulted(message, null); + message.Complete(); + return WriteResult.NoConnectionAvailable; } private WriteResult FailDueToNoConnection(Message message) { message.Cancel(); - Multiplexer?.OnMessageFaulted(message, null); + Multiplexer.OnMessageFaulted(message, null); message.Complete(); return WriteResult.NoConnectionAvailable; } @@ -161,27 +221,48 @@ private WriteResult FailDueToNoConnection(Message message) public WriteResult TryWriteSync(Message message, bool isReplica) { if (isDisposed) throw new ObjectDisposedException(Name); - if (!IsConnected) return QueueOrFailMessage(message); + if (!IsConnected || NeedsReconnect) return QueueOrFailMessage(message); var physical = this.physical; - if (physical == null) return FailDueToNoConnection(message); - -#pragma warning disable CS0618 + if (physical == null) + { + // If we're not connected yet and supposed to, queue it up + if (Multiplexer.RawConfig.BacklogPolicy.QueueWhileDisconnected) + { + if (TryPushToBacklog(message, onlyIfExists: false)) + { + message.SetEnqueued(null); + return WriteResult.Success; + } + } + return FailDueToNoConnection(message); + } var result = WriteMessageTakingWriteLockSync(physical, message); -#pragma warning restore CS0618 LogNonPreferred(message.Flags, isReplica); return result; } - public ValueTask TryWriteAsync(Message message, bool isReplica) + public ValueTask TryWriteAsync(Message message, bool isReplica, bool bypassBacklog = false) { if (isDisposed) throw new ObjectDisposedException(Name); - if (!IsConnected) return new ValueTask(QueueOrFailMessage(message)); + if ((!IsConnected || NeedsReconnect) && !bypassBacklog) return new ValueTask(QueueOrFailMessage(message)); var physical = this.physical; - if (physical == null) return new ValueTask(FailDueToNoConnection(message)); + if (physical == null) + { + // If we're not connected yet and supposed to, queue it up + if (!bypassBacklog && Multiplexer.RawConfig.BacklogPolicy.QueueWhileDisconnected) + { + if (TryPushToBacklog(message, onlyIfExists: false)) + { + message.SetEnqueued(null); + return new ValueTask(WriteResult.Success); + } + } + return new ValueTask(FailDueToNoConnection(message)); + } - var result = WriteMessageTakingWriteLockAsync(physical, message); + var result = WriteMessageTakingWriteLockAsync(physical, message, bypassBacklog: bypassBacklog); LogNonPreferred(message.Flags, isReplica); return result; } @@ -195,17 +276,17 @@ internal void AppendProfile(StringBuilder sb) } clone[ProfileLogSamples] = Interlocked.Read(ref operationCount); Array.Sort(clone); - sb.Append(" ").Append(clone[0]); + sb.Append(' ').Append(clone[0]); for (int i = 1; i < clone.Length; i++) { if (clone[i] != clone[i - 1]) { - sb.Append("+").Append(clone[i] - clone[i - 1]); + sb.Append('+').Append(clone[i] - clone[i - 1]); } } if (clone[0] != clone[ProfileLogSamples]) { - sb.Append("=").Append(clone[ProfileLogSamples]); + sb.Append('=').Append(clone[ProfileLogSamples]); } double rate = (clone[ProfileLogSamples] - clone[0]) / ProfileLogSeconds; sb.Append(" (").Append(rate.ToString("N2")).Append(" ops/s; spans ").Append(ProfileLogSeconds).Append("s)"); @@ -217,96 +298,76 @@ internal void GetCounters(ConnectionCounters counters) counters.SocketCount = Interlocked.Read(ref socketCount); counters.WriterCount = Interlocked.CompareExchange(ref activeWriters, 0, 0); counters.NonPreferredEndpointCount = Interlocked.Read(ref nonPreferredEndpointCount); + counters.PendingUnsentItems = Volatile.Read(ref _backlogCurrentEnqueued); physical?.GetCounters(counters); } - private Channel _subscriptionBackgroundQueue; - private static readonly UnboundedChannelOptions s_subscriptionQueueOptions = new UnboundedChannelOptions - { - AllowSynchronousContinuations = false, // we do *not* want the async work to end up on the caller's thread - SingleReader = true, // only one reader will be started per channel - SingleWriter = true, // writes will be synchronized, because order matters - }; - - private Channel GetSubscriptionQueue() + internal readonly struct BridgeStatus { - var queue = _subscriptionBackgroundQueue; - if (queue == null) - { - queue = Channel.CreateUnbounded(s_subscriptionQueueOptions); - var existing = Interlocked.CompareExchange(ref _subscriptionBackgroundQueue, queue, null); - - if (existing != null) return existing; // we didn't win, but that's fine - - // we won (_subqueue is now queue) - // this means we have a new channel without a reader; let's fix that! - Task.Run(() => ExecuteSubscriptionLoop()); - } - return queue; - } - - private void ShutdownSubscriptionQueue() - { - try - { - Interlocked.CompareExchange(ref _subscriptionBackgroundQueue, null, null)?.Writer.TryComplete(); - } - catch { } + /// + /// Number of messages sent since the last heartbeat was processed. + /// + public int MessagesSinceLastHeartbeat { get; init; } + + /// + /// The time this connection was connected at, if it's connected currently. + /// + public DateTime? ConnectedAt { get; init; } + + /// + /// Whether the pipe writer is currently active. + /// + public bool IsWriterActive { get; init; } + + /// + /// Status of the currently processing backlog, if any. + /// + public BacklogStatus BacklogStatus { get; init; } + + /// + /// The number of messages that are in the backlog queue (waiting to be sent when the connection is healthy again). + /// + public int BacklogMessagesPending { get; init; } + + /// + /// The number of messages that are in the backlog queue (waiting to be sent when the connection is healthy again). + /// + public int BacklogMessagesPendingCounter { get; init; } + + /// + /// The number of messages ever added to the backlog queue in the life of this connection. + /// + public long TotalBacklogMessagesQueued { get; init; } + + /// + /// Status for the underlying . + /// + public PhysicalConnection.ConnectionStatus Connection { get; init; } + + /// + /// The default bridge stats, notable *not* the same as default since initializers don't run. + /// + public static BridgeStatus Zero { get; } = new() { Connection = PhysicalConnection.ConnectionStatus.Zero }; + + public override string ToString() => + $"MessagesSinceLastHeartbeat: {MessagesSinceLastHeartbeat}, ConnectedAt: {ConnectedAt?.ToString("u") ?? "n/a"}, Writer: {(IsWriterActive ? "Active" : "Inactive")}, BacklogStatus: {BacklogStatus}, BacklogMessagesPending: (Queue: {BacklogMessagesPending}, Counter: {BacklogMessagesPendingCounter}), TotalBacklogMessagesQueued: {TotalBacklogMessagesQueued}, Connection: ({Connection})"; } - private async Task ExecuteSubscriptionLoop() // pushes items that have been enqueued over the bridge + internal BridgeStatus GetStatus() => new() { - // note: this will execute on the default pool rather than our dedicated pool; I'm... OK with this - var queue = _subscriptionBackgroundQueue ?? Interlocked.CompareExchange(ref _subscriptionBackgroundQueue, null, null); // just to be sure we can read it! - try - { - while (await queue.Reader.WaitToReadAsync().ForAwait() && queue.Reader.TryRead(out var next)) - { - try - { - if ((await TryWriteAsync(next.Message, next.IsReplica).ForAwait()) != WriteResult.Success) - { - next.Abort(); - } - } - catch (Exception ex) - { - next.Fail(ex); - } - } - } - catch (Exception ex) - { - Multiplexer.OnInternalError(ex, ServerEndPoint?.EndPoint, ConnectionType); - } - } - - internal bool TryEnqueueBackgroundSubscriptionWrite(in PendingSubscriptionState state) - => isDisposed ? false : (_subscriptionBackgroundQueue ?? GetSubscriptionQueue()).Writer.TryWrite(state); - - internal void GetOutstandingCount(out int inst, out int qs, out long @in, out int qu, out bool aw, out long toRead, out long toWrite, - out BacklogStatus bs, out PhysicalConnection.ReadStatus rs, out PhysicalConnection.WriteStatus ws) - { - inst = (int)(Interlocked.Read(ref operationCount) - Interlocked.Read(ref profileLastLog)); - qu = _backlog.Count; - aw = !_singleWriterMutex.IsAvailable; - bs = _backlogStatus; - var tmp = physical; - if (tmp == null) - { - qs = 0; - toRead = toWrite = @in = -1; - rs = PhysicalConnection.ReadStatus.NA; - ws = PhysicalConnection.WriteStatus.NA; - } - else - { - qs = tmp.GetSentAwaitingResponseCount(); - @in = tmp.GetSocketBytes(out toRead, out toWrite); - rs = tmp.GetReadStatus(); - ws = tmp.GetWriteStatus(); - } - } + MessagesSinceLastHeartbeat = (int)(Interlocked.Read(ref operationCount) - Interlocked.Read(ref profileLastLog)), + ConnectedAt = ConnectedAt, +#if NET + IsWriterActive = _singleWriterMutex.CurrentCount == 0, +#else + IsWriterActive = !_singleWriterMutex.IsAvailable, +#endif + BacklogMessagesPending = _backlog.Count, + BacklogMessagesPendingCounter = Volatile.Read(ref _backlogCurrentEnqueued), + BacklogStatus = _backlogStatus, + TotalBacklogMessagesQueued = _backlogTotalEnqueued, + Connection = physical?.GetStatus() ?? PhysicalConnection.ConnectionStatus.Default, + }; internal string GetStormLog() { @@ -325,12 +386,16 @@ internal void IncrementOpCount() Interlocked.Increment(ref operationCount); } - internal void KeepAlive() + /// + /// Sends a keepalive message (ECHO or PING) to keep connections alive and check validity of response. + /// + /// Whether to run even then the connection isn't idle. + internal void KeepAlive(bool forceRun = false) { - if (!(physical?.IsIdle() ?? false)) return; // don't pile on if already doing something + if (!forceRun && !(physical?.IsIdle() ?? false)) return; // don't pile on if already doing something var commandMap = Multiplexer.CommandMap; - Message msg = null; + Message? msg = null; var features = ServerEndPoint.GetFeatures(); switch (ConnectionType) { @@ -342,12 +407,12 @@ internal void KeepAlive() if (commandMap.IsAvailable(RedisCommand.PING) && features.PingOnSubscriber) { msg = Message.Create(-1, CommandFlags.FireAndForget, RedisCommand.PING); + msg.SetForSubscriptionBridge(); msg.SetSource(ResultProcessor.Tracer, null); } else if (commandMap.IsAvailable(RedisCommand.UNSUBSCRIBE)) { - msg = Message.Create(-1, CommandFlags.FireAndForget, RedisCommand.UNSUBSCRIBE, - (RedisChannel)Multiplexer.UniqueId); + msg = Message.Create(-1, CommandFlags.FireAndForget, RedisCommand.UNSUBSCRIBE, RedisChannel.Literal(Multiplexer.UniqueId)); msg.SetSource(ResultProcessor.TrackSubscriptions, null); } break; @@ -357,9 +422,9 @@ internal void KeepAlive() { msg.SetInternalCall(); Multiplexer.Trace("Enqueue: " + msg); - Multiplexer.OnInfoMessage($"heartbeat ({physical?.LastWriteSecondsAgo}s >= {ServerEndPoint?.WriteEverySeconds}s, {physical?.GetSentAwaitingResponseCount()} waiting) '{msg.CommandAndKey}' on '{PhysicalName}' (v{features.Version})"); - physical?.UpdateLastWriteTime(); // pre-emptively -#pragma warning disable CS0618 + Multiplexer.OnInfoMessage($"heartbeat ({physical?.LastWriteSecondsAgo}s >= {ServerEndPoint.WriteEverySeconds}s, {physical?.GetSentAwaitingResponseCount()} waiting) '{msg.CommandAndKey}' on '{PhysicalName}' (v{features.Version})"); + physical?.UpdateLastWriteTime(); // preemptively +#pragma warning disable CS0618 // Type or member is obsolete var result = TryWriteSync(msg, ServerEndPoint.IsReplica); #pragma warning restore CS0618 @@ -371,13 +436,14 @@ internal void KeepAlive() } } - internal async Task OnConnectedAsync(PhysicalConnection connection, LogProxy log) + internal async Task OnConnectedAsync(PhysicalConnection connection, ILogger? log) { Trace("OnConnected"); if (physical == connection && !isDisposed && ChangeState(State.Connecting, State.ConnectedEstablishing)) { + ConnectedAt ??= DateTime.UtcNow; await ServerEndPoint.OnEstablishingAsync(connection, log).ForAwait(); - log?.WriteLine($"{Format.ToString(ServerEndPoint)}: OnEstablishingAsync complete"); + log?.LogInformationOnEstablishingComplete(new(ServerEndPoint.EndPoint)); } else { @@ -399,10 +465,23 @@ internal void ResetNonConnected() TryConnect(null); } - internal void OnConnectionFailed(PhysicalConnection connection, ConnectionFailureType failureType, Exception innerException) + internal void OnConnectionFailed(PhysicalConnection connection, ConnectionFailureType failureType, Exception innerException, bool wasRequested) { + if (wasRequested) + { + Multiplexer.Logger?.LogInformationConnectionFailureRequested(innerException, innerException.Message); + } + else + { + Multiplexer.Logger?.LogErrorConnectionIssue(innerException, innerException.Message); + } Trace($"OnConnectionFailed: {connection}"); - AbandonPendingBacklog(innerException); + // If we're configured to, fail all pending backlogged messages + if (Multiplexer.RawConfig.BacklogPolicy?.AbortPendingOnConnectionFailure == true) + { + AbandonPendingBacklog(innerException); + } + if (reportNextFailure) { LastException = innerException; @@ -412,19 +491,20 @@ internal void OnConnectionFailed(PhysicalConnection connection, ConnectionFailur } } - internal void OnDisconnected(ConnectionFailureType failureType, PhysicalConnection connection, out bool isCurrent, out State oldState) + internal void OnDisconnected(ConnectionFailureType failureType, PhysicalConnection? connection, out bool isCurrent, out State oldState) { Trace($"OnDisconnected: {failureType}"); oldState = default(State); // only defined when isCurrent = true - if (isCurrent = (physical == connection)) + ConnectedAt = default; + if (isCurrent = physical == connection) { Trace("Bridge noting disconnect from active connection" + (isDisposed ? " (disposed)" : "")); oldState = ChangeState(State.Disconnected); physical = null; if (oldState == State.ConnectedEstablished && !ServerEndPoint.IsReplica) { - // if the disconnected endpoint was a master endpoint run info replication + // if the disconnected endpoint was a primary endpoint run info replication // more frequently on it's replica with exponential increments foreach (var r in ServerEndPoint.Replicas) { @@ -450,16 +530,17 @@ internal void OnDisconnected(ConnectionFailureType failureType, PhysicalConnecti private void AbandonPendingBacklog(Exception ex) { - while (_backlog.TryDequeue(out Message next)) + while (BacklogTryDequeue(out Message? next)) { - Multiplexer?.OnMessageFaulted(next, ex); + Multiplexer.OnMessageFaulted(next, ex); next.SetExceptionAndComplete(ex, this); } } + internal void OnFullyEstablished(PhysicalConnection connection, string source) { Trace("OnFullyEstablished"); - connection?.SetIdle(); + connection.SetIdle(); if (physical == connection && !isDisposed && ChangeState(State.ConnectedEstablishing, State.ConnectedEstablished)) { reportNextFailure = reconfigureNextFailure = true; @@ -468,8 +549,10 @@ internal void OnFullyEstablished(PhysicalConnection connection, string source) ServerEndPoint.OnFullyEstablished(connection, source); // do we have pending system things to do? - bool createWorker = !_backlog.IsEmpty; - if (createWorker) StartBacklogProcessor(); + if (BacklogHasItems) + { + StartBacklogProcessor(); + } if (ConnectionType == ConnectionType.Interactive) ServerEndPoint.CheckInfoReplication(); } @@ -482,12 +565,25 @@ internal void OnFullyEstablished(PhysicalConnection connection, string source) private int connectStartTicks; private long connectTimeoutRetryCount = 0; + private bool DueForConnectRetry() + { + int connectTimeMilliseconds = unchecked(Environment.TickCount - Volatile.Read(ref connectStartTicks)); + return Multiplexer.RawConfig.ReconnectRetryPolicy.ShouldRetry(Interlocked.Read(ref connectTimeoutRetryCount), connectTimeMilliseconds); + } + internal void OnHeartbeat(bool ifConnectedOnly) { bool runThisTime = false; try { - CheckBacklogForTimeouts(); + if (BacklogHasItems) + { + // If we have a backlog, kickoff the processing + // This will first timeout any messages that have sat too long and either: + // A: Abort if we're still not connected yet (we should be in this path) + // or B: Process the backlog and send those messages through the pipe + StartBacklogProcessor(); + } runThisTime = !isDisposed && Interlocked.CompareExchange(ref beating, 1, 0) == 0; if (!runThisTime) return; @@ -500,22 +596,29 @@ internal void OnHeartbeat(bool ifConnectedOnly) switch (state) { case (int)State.Connecting: - int connectTimeMilliseconds = unchecked(Environment.TickCount - Thread.VolatileRead(ref connectStartTicks)); - bool shouldRetry = Multiplexer.RawConfig.ReconnectRetryPolicy.ShouldRetry(Interlocked.Read(ref connectTimeoutRetryCount), connectTimeMilliseconds); - if (shouldRetry) + if (DueForConnectRetry()) { Interlocked.Increment(ref connectTimeoutRetryCount); - LastException = ExceptionFactory.UnableToConnect(Multiplexer, "ConnectTimeout"); + var ex = ExceptionFactory.UnableToConnect(Multiplexer, "ConnectTimeout", Name); + LastException = ex; + Multiplexer.Logger?.LogErrorConnectionIssue(ex, ex.Message); Trace("Aborting connect"); // abort and reconnect var snapshot = physical; OnDisconnected(ConnectionFailureType.UnableToConnect, snapshot, out bool isCurrent, out State oldState); - using (snapshot) { } // dispose etc + snapshot?.Dispose(); // Cleanup the existing connection/socket if any, otherwise it will wait reading indefinitely TryConnect(null); } break; case (int)State.ConnectedEstablishing: + // (Fall through) Happens when we successfully connected via TCP, but no Redis handshake completion yet. + // This can happen brief (usual) or when the server never answers (rare). When we're in this state, + // a socket is open and reader likely listening indefinitely for incoming data on an async background task. + // We need to time that out and cleanup the PhysicalConnection if needed, otherwise that reader and socket will remain open + // for the lifetime of the application due to being orphaned, yet still referenced by the active task doing the pipe read. case (int)State.ConnectedEstablished: + // Track that we should reset the count on the next disconnect, but not do so in a loop + shouldResetConnectionRetryCount = true; var tmp = physical; if (tmp != null) { @@ -524,12 +627,27 @@ internal void OnHeartbeat(bool ifConnectedOnly) Interlocked.Exchange(ref connectTimeoutRetryCount, 0); tmp.BridgeCouldBeNull?.ServerEndPoint?.ClearUnselectable(UnselectableFlags.DidNotRespond); } - tmp.OnBridgeHeartbeat(); - int writeEverySeconds = ServerEndPoint.WriteEverySeconds, - checkConfigSeconds = ServerEndPoint.ConfigCheckSeconds; + int timedOutThisHeartbeat = tmp.OnBridgeHeartbeat(); + int writeEverySeconds = ServerEndPoint.WriteEverySeconds; + bool configCheckDue = ServerEndPoint.ConfigCheckSeconds > 0 && ServerEndPoint.LastInfoReplicationCheckSecondsAgo >= ServerEndPoint.ConfigCheckSeconds; if (state == (int)State.ConnectedEstablished && ConnectionType == ConnectionType.Interactive - && checkConfigSeconds > 0 && ServerEndPoint.LastInfoReplicationCheckSecondsAgo >= checkConfigSeconds + && tmp.BridgeCouldBeNull?.Multiplexer.RawConfig.HeartbeatConsistencyChecks == true) + { + // If HeartbeatConsistencyChecks are enabled, we're sending a PING (expecting PONG) or ECHO (expecting UniqueID back) every single + // heartbeat as an opt-in measure to react to any network stream drop ASAP to terminate the connection as faulted. + // If we don't get the expected response to that command, then the connection is terminated. + // This is to prevent the case of things like 100% string command usage where a protocol error isn't otherwise encountered. + KeepAlive(forceRun: true); + + // If we're configured to check info replication, perform that too + if (configCheckDue) + { + ServerEndPoint.CheckInfoReplication(); + } + } + else if (state == (int)State.ConnectedEstablished && ConnectionType == ConnectionType.Interactive + && configCheckDue && ServerEndPoint.CheckInfoReplication()) { // that serves as a keep-alive, if it is accepted @@ -544,25 +662,49 @@ internal void OnHeartbeat(bool ifConnectedOnly) else { OnDisconnected(ConnectionFailureType.SocketFailure, tmp, out bool ignore, out State oldState); + tmp.Dispose(); // Cleanup the existing connection/socket if any, otherwise it will wait reading indefinitely } } - else if (writeEverySeconds <= 0 && tmp.IsIdle() + else if (writeEverySeconds <= 0 + && tmp.IsIdle() && tmp.LastWriteSecondsAgo > 2 && tmp.GetSentAwaitingResponseCount() != 0) { - // there's a chance this is a dead socket; sending data will shake that - // up a bit, so if we have an empty unsent queue and a non-empty sent - // queue, test the socket + // There's a chance this is a dead socket; sending data will shake that up a bit, + // so if we have an empty unsent queue and a non-empty sent queue, test the socket. KeepAlive(); } + + // This is an "always" check - we always want to evaluate a dead connection from a non-responsive sever regardless of the need to heartbeat above + if (timedOutThisHeartbeat > 0 + && tmp.LastReadSecondsAgo * 1_000 > (tmp.BridgeCouldBeNull?.Multiplexer.AsyncTimeoutMilliseconds * 4)) + { + // If we've received *NOTHING* on the pipe in 4 timeouts worth of time and we're timing out commands, issue a connection failure so that we reconnect + // This is meant to address the scenario we see often in Linux configs where TCP retries will happen for 15 minutes. + // To us as a client, we'll see the socket as green/open/fine when writing but we'll bet getting nothing back. + // Since we can't depend on the pipe to fail in that case, we want to error here based on the criteria above so we reconnect broken clients much faster. + tmp.BridgeCouldBeNull?.Multiplexer.Logger?.LogWarningDeadSocketDetected(tmp.LastReadSecondsAgo, timedOutThisHeartbeat); + OnDisconnected(ConnectionFailureType.SocketFailure, tmp, out _, out State oldState); + tmp.Dispose(); // Cleanup the existing connection/socket if any, otherwise it will wait reading indefinitely + } } break; case (int)State.Disconnected: - Interlocked.Exchange(ref connectTimeoutRetryCount, 0); - if (!ifConnectedOnly) + // Only if we should reset the connection count + // This should only happen after a successful reconnection, and not every time we bounce from BeginConnectAsync -> Disconnected + // in a failure loop case that happens when a node goes missing forever. + if (shouldResetConnectionRetryCount) + { + shouldResetConnectionRetryCount = false; + Interlocked.Exchange(ref connectTimeoutRetryCount, 0); + } + if (!ifConnectedOnly && DueForConnectRetry()) { - Multiplexer.Trace("Resurrecting " + ToString()); - Multiplexer.OnResurrecting(ServerEndPoint?.EndPoint, ConnectionType); + // Increment count here, so that we don't re-enter in Connecting case up top - we don't want to re-enter and log there. + Interlocked.Increment(ref connectTimeoutRetryCount); + + Multiplexer.Logger?.LogInformationResurrecting(this, connectTimeoutRetryCount); + Multiplexer.OnResurrecting(ServerEndPoint.EndPoint, ConnectionType); TryConnect(null); } break; @@ -582,18 +724,8 @@ internal void OnHeartbeat(bool ifConnectedOnly) } } - internal void RemovePhysical(PhysicalConnection connection) - { -#pragma warning disable 0420 - Interlocked.CompareExchange(ref physical, null, connection); -#pragma warning restore 0420 - } - [Conditional("VERBOSE")] - internal void Trace(string message) - { - Multiplexer.Trace(message, ToString()); - } + internal void Trace(string message) => Multiplexer.Trace(message, ToString()); [Conditional("VERBOSE")] internal void Trace(bool condition, string message) @@ -615,10 +747,11 @@ internal bool TryEnqueue(List messages, bool isReplica) var physical = this.physical; if (physical == null) return false; foreach (var message in messages) - { // deliberately not taking a single lock here; we don't care if + { + // deliberately not taking a single lock here; we don't care if // other threads manage to interleave - in fact, it would be desirable // (to avoid a batch monopolising the connection) -#pragma warning disable CS0618 +#pragma warning disable CS0618 // Type or member is obsolete WriteMessageTakingWriteLockSync(physical, message); #pragma warning restore CS0618 LogNonPreferred(message.Flags, isReplica); @@ -626,9 +759,7 @@ internal bool TryEnqueue(List messages, bool isReplica) return true; } - private readonly MutexSlim _singleWriterMutex; - - private Message _activeMessage; + private Message? _activeMessage; private WriteResult WriteMessageInsideLock(PhysicalConnection physical, Message message) { @@ -636,64 +767,43 @@ private WriteResult WriteMessageInsideLock(PhysicalConnection physical, Message var existingMessage = Interlocked.CompareExchange(ref _activeMessage, message, null); if (existingMessage != null) { - Multiplexer?.OnInfoMessage($"reentrant call to WriteMessageTakingWriteLock for {message.CommandAndKey}, {existingMessage.CommandAndKey} is still active"); + Multiplexer.OnInfoMessage($"Reentrant call to WriteMessageTakingWriteLock for {message.CommandAndKey}, {existingMessage.CommandAndKey} is still active"); return WriteResult.NoConnectionAvailable; } -#if DEBUG - int startWriteTime = Environment.TickCount; - try -#endif + + physical.SetWriting(); + if (message is IMultiMessage multiMessage) { - physical.SetWriting(); var messageIsSent = false; - if (message is IMultiMessage) + SelectDatabaseInsideWriteLock(physical, message); // need to switch database *before* the transaction + foreach (var subCommand in multiMessage.GetMessages(physical)) { - SelectDatabaseInsideWriteLock(physical, message); // need to switch database *before* the transaction - foreach (var subCommand in ((IMultiMessage)message).GetMessages(physical)) + result = WriteMessageToServerInsideWriteLock(physical, subCommand); + if (result != WriteResult.Success) { - result = WriteMessageToServerInsideWriteLock(physical, subCommand); - if (result != WriteResult.Success) - { - // we screwed up; abort; note that WriteMessageToServer already - // killed the underlying connection - Trace("Unable to write to server"); - message.Fail(ConnectionFailureType.ProtocolFailure, null, "failure before write: " + result.ToString()); - message.Complete(); - return result; - } - //The parent message (next) may be returned from GetMessages - //and should not be marked as sent again below - messageIsSent = messageIsSent || subCommand == message; + // we screwed up; abort; note that WriteMessageToServer already + // killed the underlying connection + Trace("Unable to write to server"); + message.Fail(ConnectionFailureType.ProtocolFailure, null, "failure before write: " + result.ToString(), Multiplexer); + message.Complete(); + return result; } - if (!messageIsSent) - { - message.SetRequestSent(); // well, it was attempted, at least... - } - - return WriteResult.Success; + // The parent message (next) may be returned from GetMessages + // and should not be marked as sent again below. + messageIsSent = messageIsSent || subCommand == message; } - else + if (!messageIsSent) { - return WriteMessageToServerInsideWriteLock(physical, message); + message.SetRequestSent(); // well, it was attempted, at least... } + + return WriteResult.Success; } -#if DEBUG - finally + else { - int endWriteTime = Environment.TickCount; - int writeDuration = unchecked(endWriteTime - startWriteTime); - if (writeDuration > _maxWriteTime) - { - _maxWriteTime = writeDuration; - _maxWriteCommand = message?.Command ?? default; - } + return WriteMessageToServerInsideWriteLock(physical, message); } -#endif } -#if DEBUG - private volatile int _maxWriteTime = -1; - private RedisCommand _maxWriteCommand; -#endif [Obsolete("prefer async")] internal WriteResult WriteMessageTakingWriteLockSync(PhysicalConnection physical, Message message) @@ -701,30 +811,52 @@ internal WriteResult WriteMessageTakingWriteLockSync(PhysicalConnection physical Trace("Writing: " + message); message.SetEnqueued(physical); // this also records the read/write stats at this point + // AVOID REORDERING MESSAGES + // Prefer to add it to the backlog if this thread can see that there might already be a message backlog. + // We do this before attempting to take the write lock, because we won't actually write, we'll just let the backlog get processed in due course + if (TryPushToBacklog(message, onlyIfExists: true)) + { + return WriteResult.Success; // queued counts as success + } + +#if NET + bool gotLock = false; +#else LockToken token = default; +#endif try { +#if NET + gotLock = _singleWriterMutex.Wait(0); + if (!gotLock) +#else token = _singleWriterMutex.TryWait(WaitOptions.NoDelay); if (!token.Success) +#endif { - // we can't get it *instantaneously*; is there - // perhaps a backlog and active backlog processor? - if (PushToBacklog(message, onlyIfExists: true)) return WriteResult.Success; // queued counts as success + // If we can't get it *instantaneously*, pass it to the backlog for throughput + if (TryPushToBacklog(message, onlyIfExists: false)) + { + return WriteResult.Success; // queued counts as success + } // no backlog... try to wait with the timeout; // if we *still* can't get it: that counts as // an actual timeout +#if NET + gotLock = _singleWriterMutex.Wait(TimeoutMilliseconds); + if (!gotLock) return TimedOutBeforeWrite(message); +#else token = _singleWriterMutex.TryWait(); if (!token.Success) return TimedOutBeforeWrite(message); +#endif } var result = WriteMessageInsideLock(physical, message); if (result == WriteResult.Success) { -#pragma warning disable CS0618 result = physical.FlushSync(false, TimeoutMilliseconds); -#pragma warning restore CS0618 } physical.SetIdle(); @@ -734,24 +866,40 @@ internal WriteResult WriteMessageTakingWriteLockSync(PhysicalConnection physical finally { UnmarkActiveMessage(message); +#if NET + if (gotLock) + { + _singleWriterMutex.Release(); + } +#else token.Dispose(); +#endif } - } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private bool PushToBacklog(Message message, bool onlyIfExists) + private bool TryPushToBacklog(Message message, bool onlyIfExists, bool bypassBacklog = false) { - // Note, for deciding emptyness for whether to push onlyIfExists, and start worker, - // we only need care if WE are able to - // see the queue when its empty. Not whether anyone else sees it as empty. + // In the handshake case: send the command directly through. + // If we're disconnected *in the middle of a handshake*, we've bombed a brand new socket and failing, + // backing off, and retrying next heartbeat is best anyway. + // Internal calls also shouldn't queue - try immediately. If these aren't errors (most aren't), we + // won't alert the user. + if (bypassBacklog || message.IsInternalCall) + { + return false; + } + + // Note, for deciding emptiness for whether to push onlyIfExists, and start worker, + // we only need care if WE are able to see the queue when its empty. + // Not whether anyone else sees it as empty. // So strong synchronization is not required. - if (_backlog.IsEmpty & onlyIfExists) return false; + if (onlyIfExists && Volatile.Read(ref _backlogCurrentEnqueued) == 0) + { + return false; + } - - int count = _backlog.Count; - message.SetBacklogState(count, physical); - _backlog.Enqueue(message); + BacklogEnqueue(message); // The correct way to decide to start backlog process is not based on previously empty // but based on a) not empty now (we enqueued!) and b) no backlog processor already running. @@ -759,62 +907,137 @@ private bool PushToBacklog(Message message, bool onlyIfExists) StartBacklogProcessor(); return true; } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private void BacklogEnqueue(Message message) + { + bool wasEmpty = _backlog.IsEmpty; + // important that this *precedes* enqueue, to play well with HasPendingCallerFacingItems + Interlocked.Increment(ref _backlogCurrentEnqueued); + Interlocked.Increment(ref _backlogTotalEnqueued); + _backlog.Enqueue(message); + message.SetBacklogged(); + + if (wasEmpty) + { + // it is important to do this *after* adding, so that we can't + // get into a thread-race where the heartbeat checks too fast; + // the fact that we're accessing Multiplexer down here means that + // we're rooting it ourselves via the stack, so we don't need + // to worry about it being collected until at least after this + Multiplexer.Root(); + } + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private bool BacklogTryDequeue([NotNullWhen(true)] out Message? message) + { + if (_backlog.TryDequeue(out message)) + { + Interlocked.Decrement(ref _backlogCurrentEnqueued); + return true; + } + return false; + } + [MethodImpl(MethodImplOptions.AggressiveInlining)] private void StartBacklogProcessor() { if (Interlocked.CompareExchange(ref _backlogProcessorIsRunning, 1, 0) == 0) { - -#if DEBUG - _backlogProcessorRequestedTime = Environment.TickCount; -#endif - Task.Run(ProcessBacklogAsync); + var successfullyStarted = false; + try + { + _backlogStatus = BacklogStatus.Activating; + + // Start the backlog processor; this is a bit unorthodox, as you would *expect* this to just + // be Task.Run; that would work fine when healthy, but when we're falling on our face, it is + // easy to get into a thread-pool-starvation "spiral of death" if we rely on the thread-pool + // to unblock the thread-pool when there could be sync-over-async callers. Note that in reality, + // the initial "enough" of the back-log processor is typically sync, which means that the thread + // we start is actually useful, despite thinking "but that will just go async and back to the pool" + var thread = new Thread(s => ((PhysicalBridge)s!).ProcessBacklog()) + { + IsBackground = true, // don't keep process alive (also: act like the thread-pool used to) + Name = "StackExchange.Redis Backlog", // help anyone looking at thread-dumps + }; + + thread.Start(this); + successfullyStarted = true; + } + catch (Exception ex) + { + OnInternalError(ex); + Trace("StartBacklogProcessor failed to start backlog processor thread: " + ex.Message); + } + finally + { + // If thread failed to start - reset flag to ensure next call doesn't erroneously think backlog process is running + if (!successfullyStarted) + { + _backlogStatus = BacklogStatus.Inactive; + Interlocked.Exchange(ref _backlogProcessorIsRunning, 0); + } + } + } + else + { + _backlogAutoReset.Set(); } } -#if DEBUG - private volatile int _backlogProcessorRequestedTime; -#endif - private void CheckBacklogForTimeouts() // check the head of the backlog queue, consuming anything that looks dead + /// + /// Crawls from the head of the backlog queue, consuming anything that should have timed out + /// and pruning it accordingly (these messages will get timeout exceptions). + /// + private void CheckBacklogForTimeouts() { var now = Environment.TickCount; var timeout = TimeoutMilliseconds; - // Because peeking at the backlog, checking message and then dequeueing, is not thread-safe, we do have to use + // Because peeking at the backlog, checking message and then dequeuing, is not thread-safe, we do have to use // a lock here, for mutual exclusion of backlog DEQUEUERS. Unfortunately. // But we reduce contention by only locking if we see something that looks timed out. - Message message; - while (_backlog.TryPeek(out message)) + while (_backlog.TryPeek(out Message? message)) { - if (message.IsInternalCall) break; // don't stomp these (not that they should have the async timeout flag, but...) - if (!message.HasAsyncTimedOut(now, timeout, out var _)) break; // not a timeout - we can stop looking + // See if the message has pass our async timeout threshold + // Note: All timed out messages must be dequeued, even when no completion is needed, to be able to dequeue and complete other timed out messages. + if (!message.HasTimedOut(now, timeout, out var _)) break; // not a timeout - we can stop looking lock (_backlog) { - // peek again since we didn't have lock before... + // Peek again since we didn't have lock before... // and rerun the exact same checks as above, note that it may be a different message now if (!_backlog.TryPeek(out message)) break; - if (message.IsInternalCall) break; - if (!message.HasAsyncTimedOut(now, timeout, out var _)) break; + if (!message.HasTimedOut(now, timeout, out var _)) break; - if (!_backlog.TryDequeue(out var message2) || (message != message2)) // consume it for real + if (!BacklogTryDequeue(out var message2) || (message != message2)) // consume it for real { throw new RedisException("Thread safety bug detected! A queue message disappeared while we had the backlog lock"); } } - // Tell the message it has failed - // Note: Attempting to *avoid* reentrancy/deadlock issues by not holding the lock while completing messages. - var ex = Multiplexer.GetException(WriteResult.TimeoutBeforeWrite, message, ServerEndPoint); - message.SetExceptionAndComplete(ex, this); + // We only handle async timeouts here, synchronous timeouts are handled upstream. + // Those sync timeouts happen in ConnectionMultiplexer.ExecuteSyncImpl() via Monitor.Wait. + if (message.ResultBoxIsAsync) + { + // Tell the message it has failed + // Note: Attempting to *avoid* reentrancy/deadlock issues by not holding the lock while completing messages. + var ex = Multiplexer.GetException(WriteResult.TimeoutBeforeWrite, message, ServerEndPoint, this); + message.SetExceptionAndComplete(ex, this); + } } } + internal enum BacklogStatus : byte { Inactive, + Activating, Starting, Started, CheckingForWork, + SpinningDown, CheckingForTimeout, + CheckingForTimeoutComplete, RecordingTimeout, WritingMessage, Flushing, @@ -823,87 +1046,173 @@ internal enum BacklogStatus : byte RecordingFault, SettingIdle, Faulted, + NotifyingDisposed, } + private volatile BacklogStatus _backlogStatus; - private async Task ProcessBacklogAsync() + + /// + /// Process the backlog(s) in play if any. + /// This means flushing commands to an available/active connection (if any) or spinning until timeout if not. + /// + private void ProcessBacklog() { - LockToken token = default; + _backlogStatus = BacklogStatus.Starting; try { -#if DEBUG - int tryToAcquireTime = Environment.TickCount; - var msToStartWorker = unchecked(tryToAcquireTime - _backlogProcessorRequestedTime); - int failureCount = 0; + while (!isDisposed) + { + if (!_backlog.IsEmpty) + { + // TODO: vNext handoff this backlog to another primary ("can handle everything") connection + // and remove any per-server commands. This means we need to track a bit of whether something + // was server-endpoint-specific in PrepareToPushMessageToBridge (was the server ref null or not) + ProcessBridgeBacklog(); + } + + // The cost of starting a new thread is high, and we can bounce in and out of the backlog a lot. + // So instead of just exiting, keep this thread waiting for 5 seconds to see if we got another backlog item. + _backlogStatus = BacklogStatus.SpinningDown; + // Note this is happening *outside* the lock + var gotMore = _backlogAutoReset.WaitOne(5000); + if (!gotMore) + { + break; + } + } + // If we're being disposed but have items in the backlog, we need to complete them or async messages can linger forever. + if (isDisposed && BacklogHasItems) + { + _backlogStatus = BacklogStatus.NotifyingDisposed; + // Because peeking at the backlog, checking message and then dequeuing, is not thread-safe, we do have to use + // a lock here, for mutual exclusion of backlog DEQUEUERS. Unfortunately. + // But we reduce contention by only locking if we see something that looks timed out. + while (BacklogHasItems) + { + Message? message = null; + lock (_backlog) + { + if (!BacklogTryDequeue(out message)) + { + break; + } + } + + var ex = ExceptionFactory.Timeout(Multiplexer, "The message was in the backlog when connection was disposed", message, ServerEndPoint, WriteResult.TimeoutBeforeWrite, this); + message.SetExceptionAndComplete(ex, this); + } + } + } + catch (ObjectDisposedException) when (!BacklogHasItems) + { + // We're being torn down and we have no backlog to process - all good. + } + catch (Exception) + { + _backlogStatus = BacklogStatus.Faulted; + } + finally + { + // Do this in finally block, so that thread aborts can't convince us the backlog processor is running forever + if (Interlocked.CompareExchange(ref _backlogProcessorIsRunning, 0, 1) != 1) + { + throw new RedisException("Bug detection, couldn't indicate shutdown of backlog processor"); + } + + // Now that nobody is processing the backlog, we should consider starting a new backlog processor + // in case a new message came in after we ended this loop. + if (BacklogHasItems) + { + // Check for faults mainly to prevent unlimited tasks spawning in a fault scenario + // This won't cause a StackOverflowException due to the Task.Run() handoff + if (_backlogStatus != BacklogStatus.Faulted) + { + StartBacklogProcessor(); + } + } + } + } + + /// + /// Reset event for monitoring backlog additions mid-run. + /// This allows us to keep the thread around for a full flush and prevent "feathering the throttle" trying + /// to flush it. In short, we don't start and stop so many threads with a bit of linger. + /// + private readonly AutoResetEvent _backlogAutoReset = new AutoResetEvent(false); + + private void ProcessBridgeBacklog() + { + // Importantly: don't assume we have a physical connection here + // We are very likely to hit a state where it's not re-established or even referenced here +#if NET + bool gotLock = false; +#else + LockToken token = default; #endif + _backlogAutoReset.Reset(); + try + { _backlogStatus = BacklogStatus.Starting; - while (true) + + // First eliminate any messages that have timed out already. + _backlogStatus = BacklogStatus.CheckingForTimeout; + CheckBacklogForTimeouts(); + _backlogStatus = BacklogStatus.CheckingForTimeoutComplete; + + // For the rest of the backlog, if we're not connected there's no point - abort out + while (IsConnected) { // check whether the backlog is empty *before* even trying to get the lock if (_backlog.IsEmpty) return; // nothing to do // try and get the lock; if unsuccessful, retry - token = await _singleWriterMutex.TryWaitAsync().ConfigureAwait(false); +#if NET + gotLock = _singleWriterMutex.Wait(TimeoutMilliseconds); + if (gotLock) break; // got the lock; now go do something with it +#else + token = _singleWriterMutex.TryWait(); if (token.Success) break; // got the lock; now go do something with it - -#if DEBUG - failureCount++; #endif } _backlogStatus = BacklogStatus.Started; -#if DEBUG - int acquiredTime = Environment.TickCount; - var msToGetLock = unchecked(acquiredTime - tryToAcquireTime); -#endif - // so now we are the writer; write some things! - Message message; - var timeout = TimeoutMilliseconds; - while(true) + // Only execute if we're connected. + // Timeouts are handled above, so we're exclusively into backlog items eligible to write at this point. + // If we can't write them, abort and wait for the next heartbeat or activation to try this again. + while (IsConnected && physical?.HasOutputPipe == true) { + Message? message; _backlogStatus = BacklogStatus.CheckingForWork; - // We need to lock _backlog when dequeueing because of - // races with timeout processing logic + lock (_backlog) { - if (!_backlog.TryDequeue(out message)) break; // all done + // Note that we're actively taking it off the queue here, not peeking + // If there's nothing left in queue, we're done. + if (!BacklogTryDequeue(out message)) + { + break; + } } try { - _backlogStatus = BacklogStatus.CheckingForTimeout; - if (message.HasAsyncTimedOut(Environment.TickCount, timeout, out var _)) + _backlogStatus = BacklogStatus.WritingMessage; + var result = WriteMessageInsideLock(physical, message); + + if (result == WriteResult.Success) { - _backlogStatus = BacklogStatus.RecordingTimeout; - var ex = Multiplexer.GetException(WriteResult.TimeoutBeforeWrite, message, ServerEndPoint); -#if DEBUG // additional tracking - ex.Data["Redis-BacklogStartDelay"] = msToStartWorker; - ex.Data["Redis-BacklogGetLockDelay"] = msToGetLock; - if (failureCount != 0) ex.Data["Redis-BacklogFailCount"] = failureCount; - if (_maxWriteTime >= 0) ex.Data["Redis-MaxWrite"] = _maxWriteTime.ToString() + "ms, " + _maxWriteCommand.ToString(); - var maxFlush = physical?.MaxFlushTime ?? -1; - if (maxFlush >= 0) ex.Data["Redis-MaxFlush"] = maxFlush.ToString() + "ms, " + (physical?.MaxFlushBytes ?? -1).ToString(); - if (_maxLockDuration >= 0) ex.Data["Redis-MaxLockDuration"] = _maxLockDuration; -#endif - message.SetExceptionAndComplete(ex, this); + _backlogStatus = BacklogStatus.Flushing; +#pragma warning disable CS0618 // Type or member is obsolete + result = physical.FlushSync(false, TimeoutMilliseconds); +#pragma warning restore CS0618 // Type or member is obsolete } - else - { - _backlogStatus = BacklogStatus.WritingMessage; - var result = WriteMessageInsideLock(physical, message); - - if (result == WriteResult.Success) - { - _backlogStatus = BacklogStatus.Flushing; - result = await physical.FlushAsync(false).ConfigureAwait(false); - } - _backlogStatus = BacklogStatus.MarkingInactive; - if (result != WriteResult.Success) - { - _backlogStatus = BacklogStatus.RecordingWriteFailure; - var ex = Multiplexer.GetException(result, message, ServerEndPoint); - HandleWriteException(message, ex); - } + _backlogStatus = BacklogStatus.MarkingInactive; + if (result != WriteResult.Success) + { + _backlogStatus = BacklogStatus.RecordingWriteFailure; + var ex = Multiplexer.GetException(result, message, ServerEndPoint); + HandleWriteException(message, ex); } } catch (Exception ex) @@ -917,162 +1226,180 @@ private async Task ProcessBacklogAsync() } } _backlogStatus = BacklogStatus.SettingIdle; - physical.SetIdle(); + physical?.SetIdle(); _backlogStatus = BacklogStatus.Inactive; } - catch - { - _backlogStatus = BacklogStatus.Faulted; - } finally - { - token.Dispose(); - - // Do this in finally block, so that thread aborts can't convince us the backlog processor is running forever - if (Interlocked.CompareExchange(ref _backlogProcessorIsRunning, 0, 1) != 1) + { +#if NET + if (gotLock) { - throw new RedisException("Bug detection, couldn't indicate shutdown of backlog processor"); + _singleWriterMutex.Release(); } +#else + token.Dispose(); +#endif + } + } - // Now that nobody is processing the backlog, we should consider starting a new backlog processor - // in case a new message came in after we ended this loop. - if (!_backlog.IsEmpty) + public bool HasPendingCallerFacingItems() + { + if (BacklogHasItems) + { + foreach (var item in _backlog) // non-consuming, thread-safe, etc { - // Check for faults mainly to prevent unlimited tasks spawning in a fault scenario - // - it isn't StackOverflowException due to the Task.Run() - if (_backlogStatus != BacklogStatus.Faulted) - { - StartBacklogProcessor(); - } + if (!item.IsInternalCall) return true; } } + return physical?.HasPendingCallerFacingItems() ?? false; } private WriteResult TimedOutBeforeWrite(Message message) { message.Cancel(); - Multiplexer?.OnMessageFaulted(message, null); + Multiplexer.OnMessageFaulted(message, null); message.Complete(); return WriteResult.TimeoutBeforeWrite; } /// - /// This writes a message to the output stream + /// This writes a message to the output stream. /// - /// The phsyical connection to write to. + /// The physical connection to write to. /// The message to be written. - internal ValueTask WriteMessageTakingWriteLockAsync(PhysicalConnection physical, Message message) + /// Whether this message should bypass the backlog, going straight to the pipe or failing. + internal ValueTask WriteMessageTakingWriteLockAsync(PhysicalConnection physical, Message message, bool bypassBacklog = false) { - /* design decision/choice; the code works fine either way, but if this is - * set to *true*, then when we can't take the writer-lock *right away*, - * we push the message to the backlog (starting a worker if needed) - * - * otherwise, we go for a TryWaitAsync and rely on the await machinery - * - * "true" seems to give faster times *when under heavy contention*, based on profiling - * but it involves the backlog concept; "false" works well under low contention, and - * makes more use of async - */ - const bool ALWAYS_USE_BACKLOG_IF_CANNOT_GET_SYNC_LOCK = true; - Trace("Writing: " + message); message.SetEnqueued(physical); // this also records the read/write stats at this point + // AVOID REORDERING MESSAGES + // Prefer to add it to the backlog if this thread can see that there might already be a message backlog. + // We do this before attempting to take the write lock, because we won't actually write, we'll just let the backlog get processed in due course + if (TryPushToBacklog(message, onlyIfExists: true, bypassBacklog: bypassBacklog)) + { + return new ValueTask(WriteResult.Success); // queued counts as success + } + bool releaseLock = true; // fine to default to true, as it doesn't matter until token is a "success" - int lockTaken = 0; +#if NET + bool gotLock = false; +#else LockToken token = default; +#endif try { // try to acquire it synchronously +#if NET + gotLock = _singleWriterMutex.Wait(0); + if (!gotLock) +#else // note: timeout is specified in mutex-constructor token = _singleWriterMutex.TryWait(options: WaitOptions.NoDelay); if (!token.Success) +#endif { - // we can't get it *instantaneously*; is there - // perhaps a backlog and active backlog processor? - if (PushToBacklog(message, onlyIfExists: !ALWAYS_USE_BACKLOG_IF_CANNOT_GET_SYNC_LOCK)) + // If we can't get it *instantaneously*, pass it to the backlog for throughput + if (TryPushToBacklog(message, onlyIfExists: false, bypassBacklog: bypassBacklog)) + { return new ValueTask(WriteResult.Success); // queued counts as success + } // no backlog... try to wait with the timeout; // if we *still* can't get it: that counts as // an actual timeout +#if NET + var pending = _singleWriterMutex.WaitAsync(TimeoutMilliseconds); + if (pending.Status != TaskStatus.RanToCompletion) return WriteMessageTakingWriteLockAsync_Awaited(pending, physical, message); + + gotLock = pending.Result; // fine since we know we got a result + if (!gotLock) return new ValueTask(TimedOutBeforeWrite(message)); +#else var pending = _singleWriterMutex.TryWaitAsync(options: WaitOptions.DisableAsyncContext); if (!pending.IsCompletedSuccessfully) return WriteMessageTakingWriteLockAsync_Awaited(pending, physical, message); token = pending.Result; // fine since we know we got a result if (!token.Success) return new ValueTask(TimedOutBeforeWrite(message)); +#endif } - lockTaken = Environment.TickCount; - var result = WriteMessageInsideLock(physical, message); - if (result == WriteResult.Success) { var flush = physical.FlushAsync(false); if (!flush.IsCompletedSuccessfully) { releaseLock = false; // so we don't release prematurely - return CompleteWriteAndReleaseLockAsync(token, flush, message, lockTaken); +#if NET + return CompleteWriteAndReleaseLockAsync(flush, message); +#else + return CompleteWriteAndReleaseLockAsync(token, flush, message); +#endif } - result = flush.Result; // we know it was completed, this is fine + result = flush.Result; // .Result: we know it was completed, so this is fine } - + physical.SetIdle(); return new ValueTask(result); } - catch (Exception ex) { return new ValueTask(HandleWriteException(message, ex)); } + catch (Exception ex) + { + return new ValueTask(HandleWriteException(message, ex)); + } finally { +#if NET + if (gotLock) +#else if (token.Success) +#endif { UnmarkActiveMessage(message); if (releaseLock) { -#if DEBUG - RecordLockDuration(lockTaken); -#endif +#if NET + _singleWriterMutex.Release(); +#else token.Dispose(); +#endif } } } } -#if DEBUG - private void RecordLockDuration(int lockTaken) - { - var lockDuration = unchecked(Environment.TickCount - lockTaken); - if (lockDuration > _maxLockDuration) _maxLockDuration = lockDuration; - } - volatile int _maxLockDuration = -1; + private async ValueTask WriteMessageTakingWriteLockAsync_Awaited( +#if NET + Task pending, +#else + ValueTask pending, #endif - - private async ValueTask WriteMessageTakingWriteLockAsync_Awaited(ValueTask pending, PhysicalConnection physical, Message message) + PhysicalConnection physical, + Message message) { +#if NET + bool gotLock = false; +#endif + try { - using (var token = await pending.ForAwait()) - { - if (!token.Success) return TimedOutBeforeWrite(message); -#if DEBUG - int lockTaken = Environment.TickCount; +#if NET + gotLock = await pending.ForAwait(); + if (!gotLock) return TimedOutBeforeWrite(message); +#else + using var token = await pending.ForAwait(); #endif - var result = WriteMessageInsideLock(physical, message); - - if (result == WriteResult.Success) - { - result = await physical.FlushAsync(false).ForAwait(); - } - - physical.SetIdle(); + var result = WriteMessageInsideLock(physical, message); -#if DEBUG - RecordLockDuration(lockTaken); -#endif - return result; + if (result == WriteResult.Success) + { + result = await physical.FlushAsync(false).ForAwait(); } + + physical.SetIdle(); + + return result; } catch (Exception ex) { @@ -1081,22 +1408,40 @@ private async ValueTask WriteMessageTakingWriteLockAsync_Awaited(Va finally { UnmarkActiveMessage(message); +#if NET + if (gotLock) + { + _singleWriterMutex.Release(); + } +#endif } } - private async ValueTask CompleteWriteAndReleaseLockAsync(LockToken lockToken, ValueTask flush, Message message, int lockTaken) + [SuppressMessage("StyleCop.CSharp.LayoutRules", "SA1519:Braces should not be omitted from multi-line child statement", Justification = "Detector is confused with the #ifdefs here")] + private async ValueTask CompleteWriteAndReleaseLockAsync( +#if !NETCOREAPP + LockToken lockToken, +#endif + ValueTask flush, + Message message) { +#if !NETCOREAPP using (lockToken) +#endif + try { - try - { - var result = await flush.ForAwait(); - physical.SetIdle(); - return result; - } - catch (Exception ex) { return HandleWriteException(message, ex); } -#if DEBUG - finally { RecordLockDuration(lockTaken); } + var result = await flush.ForAwait(); + physical?.SetIdle(); + return result; + } + catch (Exception ex) + { + return HandleWriteException(message, ex); + } + finally + { +#if NET + _singleWriterMutex.Release(); #endif } } @@ -1114,9 +1459,7 @@ private void UnmarkActiveMessage(Message message) private State ChangeState(State newState) { -#pragma warning disable 0420 var oldState = (State)Interlocked.Exchange(ref state, (int)newState); -#pragma warning restore 0420 if (oldState != newState) { Multiplexer.Trace(ConnectionType + " state changed from " + oldState + " to " + newState); @@ -1126,9 +1469,7 @@ private State ChangeState(State newState) private bool ChangeState(State oldState, State newState) { -#pragma warning disable 0420 bool result = Interlocked.CompareExchange(ref state, (int)newState, (int)oldState) == (int)oldState; -#pragma warning restore 0420 if (result) { Multiplexer.Trace(ConnectionType + " state changed from " + oldState + " to " + newState); @@ -1136,7 +1477,7 @@ private bool ChangeState(State oldState, State newState) return result; } - public PhysicalConnection TryConnect(LogProxy log) + public PhysicalConnection? TryConnect(ILogger? log) { if (state == (int)State.Disconnected) { @@ -1144,10 +1485,12 @@ public PhysicalConnection TryConnect(LogProxy log) { if (!Multiplexer.IsDisposed) { - log?.WriteLine($"{Name}: Connecting..."); + log?.LogInformationConnecting(Name); Multiplexer.Trace("Connecting...", Name); if (ChangeState(State.Disconnected, State.Connecting)) { + // Clear the reconnect flag as we're starting a new connection + Volatile.Write(ref _needsReconnect, false); Interlocked.Increment(ref socketCount); Interlocked.Exchange(ref connectStartTicks, Environment.TickCount); // separate creation and connection for case when connection completes synchronously @@ -1161,7 +1504,7 @@ public PhysicalConnection TryConnect(LogProxy log) } catch (Exception ex) { - log?.WriteLine($"{Name}: Connect failed: {ex.Message}"); + log?.LogErrorConnectFailed(ex, Name, ex.Message); Multiplexer.Trace("Connect failed: " + ex.Message, Name); ChangeState(State.Disconnected); OnInternalError(ex); @@ -1177,18 +1520,18 @@ private void LogNonPreferred(CommandFlags flags, bool isReplica) { if (isReplica) { - if (Message.GetMasterReplicaFlags(flags) == CommandFlags.PreferMaster) + if (Message.GetPrimaryReplicaFlags(flags) == CommandFlags.PreferMaster) Interlocked.Increment(ref nonPreferredEndpointCount); } else { - if (Message.GetMasterReplicaFlags(flags) == CommandFlags.PreferReplica) + if (Message.GetPrimaryReplicaFlags(flags) == CommandFlags.PreferReplica) Interlocked.Increment(ref nonPreferredEndpointCount); } } } - private void OnInternalError(Exception exception, [CallerMemberName] string origin = null) + private void OnInternalError(Exception exception, [CallerMemberName] string? origin = null) { Multiplexer.OnInternalError(exception, ServerEndPoint.EndPoint, ConnectionType, origin); } @@ -1211,20 +1554,23 @@ private void SelectDatabaseInsideWriteLock(PhysicalConnection connection, Messag private WriteResult WriteMessageToServerInsideWriteLock(PhysicalConnection connection, Message message) { - if (message == null) return WriteResult.Success; // for some definition of success + if (message == null) + { + return WriteResult.Success; // for some definition of success + } bool isQueued = false; try { var cmd = message.Command; LastCommand = cmd; - bool isMasterOnly = message.IsMasterOnly(); + bool isPrimaryOnly = message.IsPrimaryOnly(); - if (isMasterOnly && ServerEndPoint.IsReplica && (ServerEndPoint.ReplicaReadOnly || !ServerEndPoint.AllowReplicaWrites)) + if (isPrimaryOnly && !ServerEndPoint.SupportsPrimaryWrites) { - throw ExceptionFactory.MasterOnly(Multiplexer.IncludeDetailInExceptions, message.Command, message, ServerEndPoint); + throw ExceptionFactory.PrimaryOnly(Multiplexer.RawConfig.IncludeDetailInExceptions, message.Command, message, ServerEndPoint); } - switch(cmd) + switch (cmd) { case RedisCommand.QUIT: connection.RecordQuit(); @@ -1240,10 +1586,10 @@ private WriteResult WriteMessageToServerInsideWriteLock(PhysicalConnection conne { // If we are executing AUTH, it means we are still unauthenticated // Setting READONLY before AUTH always fails but we think it succeeded since - // we run it as Fire and Forget. - if (cmd != RedisCommand.AUTH) + // we run it as Fire and Forget. + if (cmd != RedisCommand.AUTH && cmd != RedisCommand.HELLO) { - var readmode = connection.GetReadModeCommand(isMasterOnly); + var readmode = connection.GetReadModeCommand(isPrimaryOnly); if (readmode != null) { connection.EnqueueInsideWriteLock(readmode); @@ -1274,15 +1620,39 @@ private WriteResult WriteMessageToServerInsideWriteLock(PhysicalConnection conne break; } + if (_nextHighIntegrityToken is not 0 + && !connection.TransactionActive // validated in the UNWATCH/EXEC/DISCARD + && message.Command is not RedisCommand.AUTH // if auth fails, later commands may also fail; avoid confusion + && message.Command is not RedisCommand.HELLO) + { + // note on the Command match above: curiously, .NET 10 and .NET 11 SDKs emit *opposite* errors here + // re "CS9336: The pattern is redundant." ("fixing" one "breaks" the other); possibly a fixed bool inversion + // in the analyzer? to avoid pain, we'll just use the most obviously correct form + + // make sure this value exists early to avoid a race condition + // if the response comes back super quickly + message.WithHighIntegrity(NextHighIntegrityTokenInsideLock()); + Debug.Assert(message.IsHighIntegrity, "message should be high integrity"); + } + else + { + Debug.Assert(!message.IsHighIntegrity, "prior high integrity message found during transaction?"); + } connection.EnqueueInsideWriteLock(message); isQueued = true; message.WriteTo(connection); + if (message.IsHighIntegrity) + { + message.WriteHighIntegrityChecksumRequest(connection); + IncrementOpCount(); + } + message.SetRequestSent(); IncrementOpCount(); - // some commands smash our ability to trust the database; some commands - // demand an immediate flush + // Some commands smash our ability to trust the database + // and some commands demand an immediate flush switch (cmd) { case RedisCommand.EVAL: @@ -1295,7 +1665,10 @@ private WriteResult WriteMessageToServerInsideWriteLock(PhysicalConnection conne case RedisCommand.UNKNOWN: case RedisCommand.DISCARD: case RedisCommand.EXEC: - connection.SetUnknownDatabase(); + if (ServerEndPoint.SupportsDatabases) + { + connection.SetUnknownDatabase(); + } break; } return WriteResult.Success; @@ -1303,13 +1676,13 @@ private WriteResult WriteMessageToServerInsideWriteLock(PhysicalConnection conne catch (RedisCommandException ex) when (!isQueued) { Trace("Write failed: " + ex.Message); - message.Fail(ConnectionFailureType.InternalFailure, ex, null); + message.Fail(ConnectionFailureType.InternalFailure, ex, null, Multiplexer); message.Complete(); - // this failed without actually writing; we're OK with that... unless there's a transaction + // This failed without actually writing; we're OK with that... unless there's a transaction if (connection?.TransactionActive == true) { - // we left it in a broken state; need to kill the connection + // We left it in a broken state - need to kill the connection connection.RecordConnectionFailed(ConnectionFailureType.ProtocolFailure, ex); return WriteResult.WriteFailure; } @@ -1318,25 +1691,40 @@ private WriteResult WriteMessageToServerInsideWriteLock(PhysicalConnection conne catch (Exception ex) { Trace("Write failed: " + ex.Message); - message.Fail(ConnectionFailureType.InternalFailure, ex, null); + message.Fail(ConnectionFailureType.InternalFailure, ex, null, Multiplexer); message.Complete(); - // we're not sure *what* happened here; probably an IOException; kill the connection + // We're not sure *what* happened here - probably an IOException; kill the connection connection?.RecordConnectionFailed(ConnectionFailureType.InternalFailure, ex); return WriteResult.WriteFailure; } } + private uint NextHighIntegrityTokenInsideLock() + { + // inside lock: no concurrency concerns here + switch (_nextHighIntegrityToken) + { + case 0: return 0; // disabled + case uint.MaxValue: + // avoid leaving the value at zero due to wrap-around + _nextHighIntegrityToken = 1; + return ushort.MaxValue; + default: + return _nextHighIntegrityToken++; + } + } + /// - /// For testing only + /// For testing only. /// - internal void SimulateConnectionFailure() + internal void SimulateConnectionFailure(SimulatedFailureType failureType) { if (!Multiplexer.RawConfig.AllowAdmin) { - throw ExceptionFactory.AdminModeNotEnabled(Multiplexer.IncludeDetailInExceptions, RedisCommand.DEBUG, null, ServerEndPoint); // close enough + throw ExceptionFactory.AdminModeNotEnabled(Multiplexer.RawConfig.IncludeDetailInExceptions, RedisCommand.DEBUG, null, ServerEndPoint); // close enough } - physical?.RecordConnectionFailed(ConnectionFailureType.SocketFailure); + physical?.SimulateConnectionFailure(failureType); } internal RedisCommand? GetActiveMessage() => Volatile.Read(ref _activeMessage)?.Command; diff --git a/src/StackExchange.Redis/PhysicalConnection.cs b/src/StackExchange.Redis/PhysicalConnection.cs index ebb80e977..6ba8b4cde 100644 --- a/src/StackExchange.Redis/PhysicalConnection.cs +++ b/src/StackExchange.Redis/PhysicalConnection.cs @@ -1,8 +1,9 @@ using System; using System.Buffers; -using System.Buffers.Text; +using System.Buffers.Binary; using System.Collections.Generic; using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; using System.IO; using System.IO.Pipelines; using System.Linq; @@ -10,26 +11,25 @@ using System.Net.Security; using System.Net.Sockets; using System.Runtime.CompilerServices; -using System.Runtime.InteropServices; using System.Security.Authentication; using System.Security.Cryptography.X509Certificates; using System.Text; using System.Threading; using System.Threading.Tasks; +using Microsoft.Extensions.Logging; using Pipelines.Sockets.Unofficial; using Pipelines.Sockets.Unofficial.Arenas; -using static StackExchange.Redis.ConnectionMultiplexer; +using RESPite; +using static StackExchange.Redis.Message; namespace StackExchange.Redis { internal sealed partial class PhysicalConnection : IDisposable { - internal readonly byte[] ChannelPrefix; + internal readonly byte[]? ChannelPrefix; private const int DefaultRedisDatabaseCount = 16; - private static readonly CommandBytes message = "message", pmessage = "pmessage"; - private static readonly Message[] ReusableChangeDatabaseCommands = Enumerable.Range(0, DefaultRedisDatabaseCount).Select( i => Message.Create(i, CommandFlags.FireAndForget, RedisCommand.SELECT)).ToArray(); @@ -44,6 +44,8 @@ private static readonly Message // things sent to this physical, but not yet received private readonly Queue _writtenAwaitingResponse = new Queue(); + private Message? _awaitingToken; + private readonly string _physicalName; private volatile int currentDatabase = 0; @@ -52,8 +54,14 @@ private static readonly Message private int failureReported; + private int clientSentQuit; + private int lastWriteTickCount, lastReadTickCount, lastBeatTickCount; + private long bytesLastResult; + private long bytesInBuffer; + internal long? ConnectionId { get; set; } + internal void GetBytes(out long sent, out long received) { if (_ioPipe is IMeasuredDuplexPipe sc) @@ -67,10 +75,15 @@ internal void GetBytes(out long sent, out long received) } } - private IDuplexPipe _ioPipe; + /// + /// Nullable because during simulation of failure, we'll null out. + /// ...but in those cases, we'll accept any null ref in a race - it's fine. + /// + private IDuplexPipe? _ioPipe; + internal bool HasOutputPipe => _ioPipe?.Output != null; - private Socket _socket; - private Socket VolatileSocket => Volatile.Read(ref _socket); + private Socket? _socket; + internal Socket? VolatileSocket => Volatile.Read(ref _socket); public PhysicalConnection(PhysicalBridge bridge) { @@ -78,7 +91,7 @@ public PhysicalConnection(PhysicalBridge bridge) lastBeatTickCount = 0; connectionType = bridge.ConnectionType; _bridge = new WeakReference(bridge); - ChannelPrefix = bridge.Multiplexer.RawConfig.ChannelPrefix; + ChannelPrefix = bridge.Multiplexer.ChannelPrefix; if (ChannelPrefix?.Length == 0) ChannelPrefix = null; // null tests are easier than null+empty var endpoint = bridge.ServerEndPoint.EndPoint; _physicalName = connectionType + "#" + Interlocked.Increment(ref totalCount) + "@" + Format.ToString(endpoint); @@ -86,40 +99,65 @@ public PhysicalConnection(PhysicalBridge bridge) OnCreateEcho(); } - internal async Task BeginConnectAsync(LogProxy log) + // *definitely* multi-database; this can help identify some unusual config scenarios + internal bool MultiDatabasesOverride { get; set; } // switch to flags-enum if more needed later + + internal async Task BeginConnectAsync(ILogger? log) { var bridge = BridgeCouldBeNull; var endpoint = bridge?.ServerEndPoint?.EndPoint; - if (endpoint == null) + if (bridge == null || endpoint == null) { - log?.WriteLine("No endpoint"); + log?.LogErrorNoEndpoint(new ArgumentNullException(nameof(endpoint))); + return; } Trace("Connecting..."); - _socket = SocketManager.CreateSocket(endpoint); + var tunnel = bridge.Multiplexer.RawConfig.Tunnel; + var connectTo = endpoint; + if (tunnel is not null) + { + connectTo = await tunnel.GetSocketConnectEndpointAsync(endpoint, CancellationToken.None).ForAwait(); + } + if (connectTo is not null) + { + _socket = SocketManager.CreateSocket(connectTo); + } + + if (_socket is not null) + { + bridge.Multiplexer.RawConfig.BeforeSocketConnect?.Invoke(endpoint, bridge.ConnectionType, _socket); + if (tunnel is not null) + { + // same functionality as part of a tunnel + await tunnel.BeforeSocketConnectAsync(endpoint, bridge.ConnectionType, _socket, CancellationToken.None).ForAwait(); + } + } bridge.Multiplexer.OnConnecting(endpoint, bridge.ConnectionType); - log?.WriteLine($"{Format.ToString(endpoint)}: BeginConnectAsync"); + log?.LogInformationBeginConnectAsync(new(endpoint)); - CancellationTokenSource timeoutSource = null; + CancellationTokenSource? timeoutSource = null; try { - using (var args = new SocketAwaitableEventArgs + using (var args = connectTo is null ? null : new SocketAwaitableEventArgs { - RemoteEndPoint = endpoint, + RemoteEndPoint = connectTo, }) { var x = VolatileSocket; if (x == null) { - args.Abort(); + args?.Abort(); } - else if (x.ConnectAsync(args)) - { // asynchronous operation is pending + else if (args is not null && x.ConnectAsync(args)) + { + // asynchronous operation is pending timeoutSource = ConfigureTimeout(args, bridge.Multiplexer.RawConfig.ConnectTimeout); } else - { // completed synchronously - args.Complete(); + { + // completed synchronously + args?.Complete(); } // Complete connection @@ -128,7 +166,10 @@ internal async Task BeginConnectAsync(LogProxy log) // If we're told to ignore connect, abort here if (BridgeCouldBeNull?.Multiplexer?.IgnoreConnect ?? false) return; - await args; // wait for the connect to complete or fail (will throw) + if (args is not null) + { + await args; // wait for the connect to complete or fail (will throw) + } if (timeoutSource != null) { timeoutSource.Cancel(); @@ -136,13 +177,13 @@ internal async Task BeginConnectAsync(LogProxy log) } x = VolatileSocket; - if (x == null) + if (x == null && args is not null) { ConnectionMultiplexer.TraceWithoutContext("Socket was already aborted"); } - else if (await ConnectedAsync(x, log, bridge.Multiplexer.SocketManager).ForAwait()) + else if (await ConnectedAsync(x, log, bridge.Multiplexer.SocketManager!).ForAwait()) { - log?.WriteLine($"{Format.ToString(endpoint)}: Starting read"); + log?.LogInformationStartingRead(new(endpoint)); try { StartReading(); @@ -160,9 +201,9 @@ internal async Task BeginConnectAsync(LogProxy log) Shutdown(); } } - catch (ObjectDisposedException) + catch (ObjectDisposedException ex) { - log?.WriteLine($"{Format.ToString(endpoint)}: (socket shutdown)"); + log?.LogErrorSocketShutdown(ex, new(endpoint)); try { RecordConnectionFailed(ConnectionFailureType.UnableToConnect, isInitialConnect: true); } catch (Exception inner) { @@ -180,7 +221,7 @@ internal async Task BeginConnectAsync(LogProxy log) } } } - catch (NotImplementedException ex) when (!(endpoint is IPEndPoint)) + catch (NotImplementedException ex) when (endpoint is not IPEndPoint) { throw new InvalidOperationException("BeginConnect failed with NotImplementedException; consider using IP endpoints, or enable ResolveDns in the configuration", ex); } @@ -194,16 +235,18 @@ private static CancellationTokenSource ConfigureTimeout(SocketAwaitableEventArgs { var cts = new CancellationTokenSource(); var timeout = Task.Delay(timeoutMilliseconds, cts.Token); - timeout.ContinueWith((_, state) => - { - try + timeout.ContinueWith( + (_, state) => { - var a = (SocketAwaitableEventArgs)state; - a.Abort(SocketError.TimedOut); - Socket.CancelConnectAsync(a); - } - catch { } - }, args); + try + { + var a = (SocketAwaitableEventArgs)state!; + a.Abort(SocketError.TimedOut); + Socket.CancelConnectAsync(a); + } + catch { } + }, + args); return cts; } @@ -211,15 +254,16 @@ private enum ReadMode : byte { NotSpecified, ReadOnly, - ReadWrite + ReadWrite, } private readonly WeakReference _bridge; - public PhysicalBridge BridgeCouldBeNull => (PhysicalBridge)_bridge.Target; + public PhysicalBridge? BridgeCouldBeNull => (PhysicalBridge?)_bridge.Target; - public long LastWriteSecondsAgo => unchecked(Environment.TickCount - Thread.VolatileRead(ref lastWriteTickCount)) / 1000; + public long LastReadSecondsAgo => unchecked(Environment.TickCount - Volatile.Read(ref lastReadTickCount)) / 1000; + public long LastWriteSecondsAgo => unchecked(Environment.TickCount - Volatile.Read(ref lastWriteTickCount)) / 1000; - private bool IncludeDetailInExceptions => BridgeCouldBeNull?.Multiplexer.IncludeDetailInExceptions ?? false; + private bool IncludeDetailInExceptions => BridgeCouldBeNull?.Multiplexer.RawConfig.IncludeDetailInExceptions ?? false; [Conditional("VERBOSE")] internal void Trace(string message) => BridgeCouldBeNull?.Multiplexer?.Trace(message, ToString()); @@ -228,7 +272,16 @@ private enum ReadMode : byte public bool TransactionActive { get; internal set; } - [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Usage", "CA2202:Do not dispose objects multiple times")] + private RedisProtocol _protocol; // note starts at **zero**, not RESP2 + public RedisProtocol? Protocol => _protocol == 0 ? null : _protocol; + + internal void SetProtocol(RedisProtocol value) + { + _protocol = value; + BridgeCouldBeNull?.SetProtocol(value); + } + + [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Usage", "CA2202:Do not dispose objects multiple times", Justification = "Trust me yo")] internal void Shutdown() { var ioPipe = Interlocked.Exchange(ref _ioPipe, null); // compare to the critical read @@ -242,7 +295,6 @@ internal void Shutdown() try { ioPipe.Input?.Complete(); } catch { } try { ioPipe.Output?.CancelPendingFlush(); } catch { } try { ioPipe.Output?.Complete(); } catch { } - try { using (ioPipe as IDisposable) { } } catch { } } @@ -283,27 +335,74 @@ public Task FlushAsync() { _writeStatus = WriteStatus.Flushing; var flush = tmp.FlushAsync(); - if (!flush.IsCompletedSuccessfully) return AwaitedFlush(flush); + if (!flush.IsCompletedSuccessfully) + { + return AwaitedFlush(flush); + } _writeStatus = WriteStatus.Flushed; UpdateLastWriteTime(); } return Task.CompletedTask; } - public void RecordConnectionFailed(ConnectionFailureType failureType, Exception innerException = null, [CallerMemberName] string origin = null, - bool isInitialConnect = false, IDuplexPipe connectingPipe = null - ) + internal void SimulateConnectionFailure(SimulatedFailureType failureType) + { + var raiseFailed = false; + if (connectionType == ConnectionType.Interactive) + { + if (failureType.HasFlag(SimulatedFailureType.InteractiveInbound)) + { + _ioPipe?.Input.Complete(new Exception("Simulating interactive input failure")); + raiseFailed = true; + } + if (failureType.HasFlag(SimulatedFailureType.InteractiveOutbound)) + { + _ioPipe?.Output.Complete(new Exception("Simulating interactive output failure")); + raiseFailed = true; + } + } + else if (connectionType == ConnectionType.Subscription) + { + if (failureType.HasFlag(SimulatedFailureType.SubscriptionInbound)) + { + _ioPipe?.Input.Complete(new Exception("Simulating subscription input failure")); + raiseFailed = true; + } + if (failureType.HasFlag(SimulatedFailureType.SubscriptionOutbound)) + { + _ioPipe?.Output.Complete(new Exception("Simulating subscription output failure")); + raiseFailed = true; + } + } + if (raiseFailed) + { + RecordConnectionFailed(ConnectionFailureType.SocketFailure); + } + } + + public void RecordConnectionFailed( + ConnectionFailureType failureType, + Exception? innerException = null, + [CallerMemberName] string? origin = null, + bool isInitialConnect = false, + IDuplexPipe? connectingPipe = null) { - Exception outerException = innerException; + bool weAskedForThis; + Exception? outerException = innerException; IdentifyFailureType(innerException, ref failureType); var bridge = BridgeCouldBeNull; + Message? nextMessage; + if (_ioPipe != null || isInitialConnect) // if *we* didn't burn the pipe: flag it { - if (failureType == ConnectionFailureType.InternalFailure) OnInternalError(innerException, origin); + if (failureType == ConnectionFailureType.InternalFailure && innerException is not null) + { + OnInternalError(innerException, origin); + } // stop anything new coming in... bridge?.Trace("Failed: " + failureType); - long @in = -1, @toRead = -1, @toWrite = -1; + ConnectionStatus connStatus = ConnectionStatus.Default; PhysicalBridge.State oldState = PhysicalBridge.State.Disconnected; bool isCurrent = false; bridge?.OnDisconnected(failureType, this, out isCurrent, out oldState); @@ -311,40 +410,42 @@ public void RecordConnectionFailed(ConnectionFailureType failureType, Exception { try { - @in = GetSocketBytes(out toRead, out toWrite); + connStatus = GetStatus(); } catch { /* best effort only */ } } if (isCurrent && Interlocked.CompareExchange(ref failureReported, 1, 0) == 0) { - int now = Environment.TickCount, lastRead = Thread.VolatileRead(ref lastReadTickCount), lastWrite = Thread.VolatileRead(ref lastWriteTickCount), - lastBeat = Thread.VolatileRead(ref lastBeatTickCount); + int now = Environment.TickCount, lastRead = Volatile.Read(ref lastReadTickCount), lastWrite = Volatile.Read(ref lastWriteTickCount), + lastBeat = Volatile.Read(ref lastBeatTickCount); int unansweredWriteTime = 0; lock (_writtenAwaitingResponse) { // find oldest message awaiting a response - if (_writtenAwaitingResponse.Count != 0) + if (_writtenAwaitingResponse.TryPeek(out nextMessage)) { - var next = _writtenAwaitingResponse.Peek(); - unansweredWriteTime = next.GetWriteTime(); + unansweredWriteTime = nextMessage.GetWriteTime(); } } var exMessage = new StringBuilder(failureType.ToString()); + // If the reason for the shutdown was we asked for the socket to die, don't log it as an error (only informational) + weAskedForThis = Volatile.Read(ref clientSentQuit) != 0; + var pipe = connectingPipe ?? _ioPipe; if (pipe is SocketConnection sc) { exMessage.Append(" (").Append(sc.ShutdownKind); if (sc.SocketError != SocketError.Success) { - exMessage.Append("/").Append(sc.SocketError); + exMessage.Append('/').Append(sc.SocketError); } if (sc.BytesRead == 0) exMessage.Append(", 0-read"); if (sc.BytesSent == 0) exMessage.Append(", 0-sent"); - exMessage.Append(", last-recv: ").Append(sc.LastReceived).Append(")"); + exMessage.Append(", last-recv: ").Append(sc.LastReceived).Append(')'); } else if (pipe is IMeasuredDuplexPipe mdp) { @@ -354,8 +455,8 @@ public void RecordConnectionFailed(ConnectionFailureType failureType, Exception else if (recd == 0) { exMessage.Append(" (0-read)"); } } - var data = new List>(); - void add(string lk, string sk, string v) + var data = new List>(); + void AddData(string lk, string sk, string? v) { if (lk != null) data.Add(Tuple.Create(lk, v)); if (sk != null) exMessage.Append(", ").Append(sk).Append(": ").Append(v); @@ -365,78 +466,96 @@ void add(string lk, string sk, string v) { if (bridge != null) { - exMessage.Append(" on ").Append(Format.ToString(bridge.ServerEndPoint?.EndPoint)).Append("/").Append(connectionType) - .Append(", ").Append(_writeStatus).Append("/").Append(_readStatus) + exMessage.Append(" on ").Append(Format.ToString(bridge.ServerEndPoint?.EndPoint)).Append('/').Append(connectionType) + .Append(", ").Append(_writeStatus).Append('/').Append(_readStatus) .Append(", last: ").Append(bridge.LastCommand); - data.Add(Tuple.Create("FailureType", failureType.ToString())); - data.Add(Tuple.Create("EndPoint", Format.ToString(bridge.ServerEndPoint?.EndPoint))); + data.Add(Tuple.Create("FailureType", failureType.ToString())); + data.Add(Tuple.Create("EndPoint", Format.ToString(bridge.ServerEndPoint?.EndPoint))); - add("Origin", "origin", origin); + AddData("Origin", "origin", origin); // add("Input-Buffer", "input-buffer", _ioPipe.Input); - add("Outstanding-Responses", "outstanding", GetSentAwaitingResponseCount().ToString()); - add("Last-Read", "last-read", (unchecked(now - lastRead) / 1000) + "s ago"); - add("Last-Write", "last-write", (unchecked(now - lastWrite) / 1000) + "s ago"); - if(unansweredWriteTime != 0) add("Unanswered-Write", "unanswered-write", (unchecked(now - unansweredWriteTime) / 1000) + "s ago"); - add("Keep-Alive", "keep-alive", bridge.ServerEndPoint?.WriteEverySeconds + "s"); - add("Previous-Physical-State", "state", oldState.ToString()); - add("Manager", "mgr", bridge.Multiplexer.SocketManager?.GetState()); - if (@in >= 0) add("Inbound-Bytes", "in", @in.ToString()); - if (toRead >= 0) add("Inbound-Pipe-Bytes", "in-pipe", toRead.ToString()); - if (toWrite >= 0) add("Outbound-Pipe-Bytes", "out-pipe", toWrite.ToString()); - - add("Last-Heartbeat", "last-heartbeat", (lastBeat == 0 ? "never" : ((unchecked(now - lastBeat) / 1000) + "s ago")) + (BridgeCouldBeNull.IsBeating ? " (mid-beat)" : "")); + AddData("Outstanding-Responses", "outstanding", GetSentAwaitingResponseCount().ToString()); + AddData("Last-Read", "last-read", (unchecked(now - lastRead) / 1000) + "s ago"); + AddData("Last-Write", "last-write", (unchecked(now - lastWrite) / 1000) + "s ago"); + if (unansweredWriteTime != 0) AddData("Unanswered-Write", "unanswered-write", (unchecked(now - unansweredWriteTime) / 1000) + "s ago"); + AddData("Keep-Alive", "keep-alive", bridge.ServerEndPoint?.WriteEverySeconds + "s"); + AddData("Previous-Physical-State", "state", oldState.ToString()); + AddData("Manager", "mgr", bridge.Multiplexer.SocketManager?.GetState()); + if (connStatus.BytesAvailableOnSocket >= 0) AddData("Inbound-Bytes", "in", connStatus.BytesAvailableOnSocket.ToString()); + if (connStatus.BytesInReadPipe >= 0) AddData("Inbound-Pipe-Bytes", "in-pipe", connStatus.BytesInReadPipe.ToString()); + if (connStatus.BytesInWritePipe >= 0) AddData("Outbound-Pipe-Bytes", "out-pipe", connStatus.BytesInWritePipe.ToString()); + + AddData("Last-Heartbeat", "last-heartbeat", (lastBeat == 0 ? "never" : ((unchecked(now - lastBeat) / 1000) + "s ago")) + (bridge.IsBeating ? " (mid-beat)" : "")); var mbeat = bridge.Multiplexer.LastHeartbeatSecondsAgo; if (mbeat >= 0) { - add("Last-Multiplexer-Heartbeat", "last-mbeat", mbeat + "s ago"); + AddData("Last-Multiplexer-Heartbeat", "last-mbeat", mbeat + "s ago"); } - add("Last-Global-Heartbeat", "global", ConnectionMultiplexer.LastGlobalHeartbeatSecondsAgo + "s ago"); + AddData("Last-Global-Heartbeat", "global", ConnectionMultiplexer.LastGlobalHeartbeatSecondsAgo + "s ago"); } } - add("Version", "v", ExceptionFactory.GetLibVersion()); + AddData("Version", "v", Utils.GetLibVersion()); - outerException = innerException == null - ? new RedisConnectionException(failureType, exMessage.ToString()) - : new RedisConnectionException(failureType, exMessage.ToString(), innerException); + outerException = new RedisConnectionException(failureType, exMessage.ToString(), innerException); foreach (var kv in data) { outerException.Data["Redis-" + kv.Item1] = kv.Item2; } - bridge?.OnConnectionFailed(this, failureType, outerException); + bridge?.OnConnectionFailed(this, failureType, outerException, wasRequested: weAskedForThis); } } - // cleanup + // clean up (note: avoid holding the lock when we complete things, even if this means taking + // the lock multiple times; this is fine here - we shouldn't be fighting anyone, and we're already toast) lock (_writtenAwaitingResponse) { bridge?.Trace(_writtenAwaitingResponse.Count != 0, "Failing outstanding messages: " + _writtenAwaitingResponse.Count); - while (_writtenAwaitingResponse.Count != 0) - { - var next = _writtenAwaitingResponse.Dequeue(); + } - if (next.Command == RedisCommand.QUIT && next.TrySetResult(true)) - { - // fine, death of a socket is close enough - next.Complete(); - } - else - { - var ex = innerException is RedisException ? innerException : outerException; - if (bridge != null) - { - bridge.Trace("Failing: " + next); - bridge.Multiplexer?.OnMessageFaulted(next, ex, origin); - } - next.SetExceptionAndComplete(ex, bridge); - } - } + var ex = innerException is RedisException ? innerException : outerException; + + nextMessage = Interlocked.Exchange(ref _awaitingToken, null); + if (nextMessage is not null) + { + RecordMessageFailed(nextMessage, ex, origin, bridge); + } + + while (TryDequeueLocked(_writtenAwaitingResponse, out nextMessage)) + { + RecordMessageFailed(nextMessage, ex, origin, bridge); } // burn the socket Shutdown(); + + static bool TryDequeueLocked(Queue queue, [NotNullWhen(true)] out Message? message) + { + lock (queue) + { + return queue.TryDequeue(out message); + } + } + } + + private void RecordMessageFailed(Message next, Exception? ex, string? origin, PhysicalBridge? bridge) + { + if (next.Command == RedisCommand.QUIT && next.TrySetResult(true)) + { + // fine, death of a socket is close enough + next.Complete(); + } + else + { + if (bridge != null) + { + bridge.Trace("Failing: " + next); + bridge.Multiplexer?.OnMessageFaulted(next, ex, origin); + } + next.SetExceptionAndComplete(ex!, bridge); + } } internal bool IsIdle() => _writeStatus == WriteStatus.Idle; @@ -462,24 +581,52 @@ internal enum WriteStatus /// A string that represents the current object. public override string ToString() => $"{_physicalName} ({_writeStatus})"; - internal static void IdentifyFailureType(Exception exception, ref ConnectionFailureType failureType) + internal static void IdentifyFailureType(Exception? exception, ref ConnectionFailureType failureType) { if (exception != null && failureType == ConnectionFailureType.InternalFailure) { - if (exception is AggregateException) exception = exception.InnerException ?? exception; - if (exception is AuthenticationException) failureType = ConnectionFailureType.AuthenticationFailure; - else if (exception is EndOfStreamException) failureType = ConnectionFailureType.SocketClosed; - else if (exception is SocketException || exception is IOException) failureType = ConnectionFailureType.SocketFailure; - else if (exception is ObjectDisposedException) failureType = ConnectionFailureType.SocketClosed; + if (exception is AggregateException) + { + exception = exception.InnerException ?? exception; + } + + failureType = exception switch + { + AuthenticationException => ConnectionFailureType.AuthenticationFailure, + EndOfStreamException or ObjectDisposedException => ConnectionFailureType.SocketClosed, + SocketException or IOException => ConnectionFailureType.SocketFailure, + _ => failureType, + }; } } internal void EnqueueInsideWriteLock(Message next) { + var multiplexer = BridgeCouldBeNull?.Multiplexer; + if (multiplexer is null) + { + // multiplexer already collected? then we're almost certainly doomed; + // we can still process it to avoid making things worse/more complex, + // but: we can't reliably assume this works, so: shout now! + next.Cancel(); + next.Complete(); + } + + bool wasEmpty; lock (_writtenAwaitingResponse) { + wasEmpty = _writtenAwaitingResponse.Count == 0; _writtenAwaitingResponse.Enqueue(next); } + if (wasEmpty) + { + // it is important to do this *after* adding, so that we can't + // get into a thread-race where the heartbeat checks too fast; + // the fact that we're accessing Multiplexer down here means that + // we're rooting it ourselves via the stack, so we don't need + // to worry about it being collected until at least after this + multiplexer?.Root(); + } } internal void GetCounters(ConnectionCounters counters) @@ -491,12 +638,11 @@ internal void GetCounters(ConnectionCounters counters) counters.Subscriptions = SubscriptionCount; } - internal Message GetReadModeCommand(bool isMasterOnly) + internal Message? GetReadModeCommand(bool isPrimaryOnly) { - var serverEndpoint = BridgeCouldBeNull?.ServerEndPoint; - if (serverEndpoint != null && serverEndpoint.RequiresReadMode) + if (BridgeCouldBeNull?.ServerEndPoint?.RequiresReadMode == true) { - ReadMode requiredReadMode = isMasterOnly ? ReadMode.ReadWrite : ReadMode.ReadOnly; + ReadMode requiredReadMode = isPrimaryOnly ? ReadMode.ReadWrite : ReadMode.ReadOnly; if (requiredReadMode != currentReadMode) { currentReadMode = requiredReadMode; @@ -508,7 +654,8 @@ internal Message GetReadModeCommand(bool isMasterOnly) } } else if (currentReadMode == ReadMode.ReadOnly) - { // we don't need it (because we're not a cluster, or not a replica), + { + // we don't need it (because we're not a cluster, or not a replica), // but we are in read-only mode; switch to read-write currentReadMode = ReadMode.ReadWrite; return ReusableReadWriteCommand; @@ -516,53 +663,59 @@ internal Message GetReadModeCommand(bool isMasterOnly) return null; } - internal Message GetSelectDatabaseCommand(int targetDatabase, Message message) + internal Message? GetSelectDatabaseCommand(int targetDatabase, Message message) { - if (targetDatabase < 0) return null; - if (targetDatabase != currentDatabase) + if (targetDatabase < 0 || targetDatabase == currentDatabase) { - var serverEndpoint = BridgeCouldBeNull?.ServerEndPoint; - if (serverEndpoint == null) return null; - int available = serverEndpoint.Databases; + return null; + } - if (!serverEndpoint.HasDatabases) // only db0 is available on cluster/twemproxy - { - if (targetDatabase != 0) - { // should never see this, since the API doesn't allow it; thus not too worried about ExceptionFactory - throw new RedisCommandException("Multiple databases are not supported on this server; cannot switch to database: " + targetDatabase); - } - return null; - } + if (BridgeCouldBeNull?.ServerEndPoint is not ServerEndPoint serverEndpoint) + { + return null; + } + int available = serverEndpoint.Databases; - if (message.Command == RedisCommand.SELECT) + // Only db0 is available on cluster/twemproxy/envoyproxy + if (!serverEndpoint.SupportsDatabases) + { + if (targetDatabase != 0) { - // this could come from an EVAL/EVALSHA inside a transaction, for example; we'll accept it - BridgeCouldBeNull?.Trace("Switching database: " + targetDatabase); - currentDatabase = targetDatabase; - return null; - } - - if (TransactionActive) - {// should never see this, since the API doesn't allow it; thus not too worried about ExceptionFactory - throw new RedisCommandException("Multiple databases inside a transaction are not currently supported: " + targetDatabase); + // We should never see this, since the API doesn't allow it; thus not too worried about ExceptionFactory + throw new RedisCommandException("Multiple databases are not supported on this server; cannot switch to database: " + targetDatabase); } + return null; + } - if (available != 0 && targetDatabase >= available) // we positively know it is out of range - { - throw ExceptionFactory.DatabaseOutfRange(IncludeDetailInExceptions, targetDatabase, message, serverEndpoint); - } + if (message.Command == RedisCommand.SELECT) + { + // This could come from an EVAL/EVALSHA inside a transaction, for example; we'll accept it BridgeCouldBeNull?.Trace("Switching database: " + targetDatabase); currentDatabase = targetDatabase; - return GetSelectDatabaseCommand(targetDatabase); + return null; } - return null; + + if (TransactionActive) + { + // Should never see this, since the API doesn't allow it, thus not too worried about ExceptionFactory + throw new RedisCommandException("Multiple databases inside a transaction are not currently supported: " + targetDatabase); + } + + // We positively know it is out of range + if (available != 0 && targetDatabase >= available) + { + throw ExceptionFactory.DatabaseOutfRange(IncludeDetailInExceptions, targetDatabase, message, serverEndpoint); + } + BridgeCouldBeNull?.Trace("Switching database: " + targetDatabase); + currentDatabase = targetDatabase; + return GetSelectDatabaseCommand(targetDatabase); } internal static Message GetSelectDatabaseCommand(int targetDatabase) { return targetDatabase < DefaultRedisDatabaseCount - ? ReusableChangeDatabaseCommands[targetDatabase] // 0-15 by default - : Message.Create(targetDatabase, CommandFlags.FireAndForget, RedisCommand.SELECT); + ? ReusableChangeDatabaseCommands[targetDatabase] // 0-15 by default + : Message.Create(targetDatabase, CommandFlags.FireAndForget, RedisCommand.SELECT); } internal int GetSentAwaitingResponseCount() @@ -589,51 +742,68 @@ internal void GetStormLog(StringBuilder sb) } } - internal void OnBridgeHeartbeat() + /// + /// Runs on every heartbeat for a bridge, timing out any commands that are overdue and returning an integer of how many we timed out. + /// + /// How many commands were overdue and threw timeout exceptions. + internal int OnBridgeHeartbeat() { + var result = 0; var now = Environment.TickCount; Interlocked.Exchange(ref lastBeatTickCount, now); lock (_writtenAwaitingResponse) { - if (_writtenAwaitingResponse.Count != 0) + if (_writtenAwaitingResponse.Count != 0 && BridgeCouldBeNull is PhysicalBridge bridge) { - var bridge = BridgeCouldBeNull; - if (bridge == null) return; - - var server = bridge?.ServerEndPoint; - var timeout = bridge.Multiplexer.AsyncTimeoutMilliseconds; + var server = bridge.ServerEndPoint; + var multiplexer = bridge.Multiplexer; + var timeout = multiplexer.AsyncTimeoutMilliseconds; foreach (var msg in _writtenAwaitingResponse) { - if (msg.HasAsyncTimedOut(now, timeout, out var elapsed)) + // We only handle async timeouts here, synchronous timeouts are handled upstream. + // Those sync timeouts happen in ConnectionMultiplexer.ExecuteSyncImpl() via Monitor.Wait. + if (msg.HasTimedOut(now, timeout, out var elapsed)) + { + if (msg.ResultBoxIsAsync) + { + bool haveDeltas = msg.TryGetPhysicalState(out _, out _, out long sentDelta, out var receivedDelta) && sentDelta >= 0 && receivedDelta >= 0; + var baseErrorMessage = haveDeltas + ? $"Timeout awaiting response (outbound={sentDelta >> 10}KiB, inbound={receivedDelta >> 10}KiB, {elapsed}ms elapsed, timeout is {timeout}ms)" + : $"Timeout awaiting response ({elapsed}ms elapsed, timeout is {timeout}ms)"; + var timeoutEx = ExceptionFactory.Timeout(multiplexer, baseErrorMessage, msg, server); + multiplexer.OnMessageFaulted(msg, timeoutEx); + msg.SetExceptionAndComplete(timeoutEx, bridge); // tell the message that it is doomed + multiplexer.OnAsyncTimeout(); + result++; + } + } + else { - bool haveDeltas = msg.TryGetPhysicalState(out _, out _, out long sentDelta, out var receivedDelta) && sentDelta >= 0 && receivedDelta >= 0; - var timeoutEx = ExceptionFactory.Timeout(bridge.Multiplexer, haveDeltas - ? $"Timeout awaiting response (outbound={sentDelta >> 10}KiB, inbound={receivedDelta >> 10}KiB, {elapsed}ms elapsed, timeout is {timeout}ms)" - : $"Timeout awaiting response ({elapsed}ms elapsed, timeout is {timeout}ms)", msg, server); - bridge.Multiplexer?.OnMessageFaulted(msg, timeoutEx); - msg.SetExceptionAndComplete(timeoutEx, bridge); // tell the message that it is doomed - bridge.Multiplexer.OnAsyncTimeout(); + // This is a head-of-line queue, which means the first thing we hit that *hasn't* timed out means no more will timeout + // and we can stop looping and release the lock early. + break; } - // note: it is important that we **do not** remove the message unless we're tearing down the socket; that - // would disrupt the chain for MatchResult; we just pre-emptively abort the message from the caller's + // Note: it is important that we **do not** remove the message unless we're tearing down the socket; that + // would disrupt the chain for MatchResult; we just preemptively abort the message from the caller's // perspective, and set a flag on the message so we don't keep doing it } } } + return result; } - internal void OnInternalError(Exception exception, [CallerMemberName] string origin = null) + internal void OnInternalError(Exception exception, [CallerMemberName] string? origin = null) { - var bridge = BridgeCouldBeNull; - if (bridge != null) + if (BridgeCouldBeNull is PhysicalBridge bridge) { bridge.Multiplexer.OnInternalError(exception, bridge.ServerEndPoint.EndPoint, connectionType, origin); } } internal void SetUnknownDatabase() - { // forces next db-specific command to issue a select + { + // forces next db-specific command to issue a select currentDatabase = -1; } @@ -642,51 +812,70 @@ internal void Write(in RedisKey key) var val = key.KeyValue; if (val is string s) { - WriteUnifiedPrefixedString(_ioPipe.Output, key.KeyPrefix, s); + WriteUnifiedPrefixedString(_ioPipe?.Output, key.KeyPrefix, s); } else { - WriteUnifiedPrefixedBlob(_ioPipe.Output, key.KeyPrefix, (byte[])val); + WriteUnifiedPrefixedBlob(_ioPipe?.Output, key.KeyPrefix, (byte[]?)val); } } internal void Write(in RedisChannel channel) - => WriteUnifiedPrefixedBlob(_ioPipe.Output, ChannelPrefix, channel.Value); + => WriteUnifiedPrefixedBlob(_ioPipe?.Output, channel.IgnoreChannelPrefix ? null : ChannelPrefix, channel.Value); [MethodImpl(MethodImplOptions.AggressiveInlining)] internal void WriteBulkString(in RedisValue value) - => WriteBulkString(value, _ioPipe.Output); - internal static void WriteBulkString(in RedisValue value, PipeWriter output) + => WriteBulkString(value, _ioPipe?.Output); + internal static void WriteBulkString(in RedisValue value, IBufferWriter? maybeNullWriter) { + if (maybeNullWriter is not { } writer) + { + return; // Prevent null refs during disposal + } + switch (value.Type) { case RedisValue.StorageType.Null: - WriteUnifiedBlob(output, (byte[])null); + WriteUnifiedBlob(writer, (byte[]?)null); break; case RedisValue.StorageType.Int64: - WriteUnifiedInt64(output, value.OverlappedValueInt64); + WriteUnifiedInt64(writer, value.OverlappedValueInt64); break; case RedisValue.StorageType.UInt64: - WriteUnifiedUInt64(output, value.OverlappedValueUInt64); + WriteUnifiedUInt64(writer, value.OverlappedValueUInt64); + break; + case RedisValue.StorageType.Double: + WriteUnifiedDouble(writer, value.OverlappedValueDouble); break; - case RedisValue.StorageType.Double: // use string case RedisValue.StorageType.String: - WriteUnifiedPrefixedString(output, null, (string)value); + WriteUnifiedPrefixedString(writer, null, (string?)value); break; case RedisValue.StorageType.Raw: - WriteUnifiedSpan(output, ((ReadOnlyMemory)value).Span); + WriteUnifiedSpan(writer, ((ReadOnlyMemory)value).Span); break; default: throw new InvalidOperationException($"Unexpected {value.Type} value: '{value}'"); } } + internal void WriteBulkString(ReadOnlySpan value) + { + if (_ioPipe?.Output is { } writer) + { + WriteUnifiedSpan(writer, value); + } + } + internal const int REDIS_MAX_ARGS = 1024 * 1024; // there is a <= 1024*1024 max constraint inside redis itself: https://github.com/antirez/redis/blob/6c60526db91e23fb2d666fc52facc9a11780a2a3/src/networking.c#L1024 internal void WriteHeader(RedisCommand command, int arguments, CommandBytes commandBytes = default) { - var bridge = BridgeCouldBeNull; - if (bridge == null) throw new ObjectDisposedException(ToString()); + if (_ioPipe?.Output is not PipeWriter writer) + { + return; // Prevent null refs during disposal + } + + var bridge = BridgeCouldBeNull ?? throw new ObjectDisposedException(ToString()); if (command == RedisCommand.UNKNOWN) { @@ -709,31 +898,57 @@ internal void WriteHeader(RedisCommand command, int arguments, CommandBytes comm // *{argCount}\r\n = 3 + MaxInt32TextLen // ${cmd-len}\r\n = 3 + MaxInt32TextLen // {cmd}\r\n = 2 + commandBytes.Length - var span = _ioPipe.Output.GetSpan(commandBytes.Length + 8 + MaxInt32TextLen + MaxInt32TextLen); + var span = writer.GetSpan(commandBytes.Length + 8 + Format.MaxInt32TextLen + Format.MaxInt32TextLen); span[0] = (byte)'*'; int offset = WriteRaw(span, arguments + 1, offset: 1); offset = AppendToSpanCommand(span, commandBytes, offset: offset); - _ioPipe.Output.Advance(offset); + writer.Advance(offset); } - internal void RecordQuit() // don't blame redis if we fired the first shot - => (_ioPipe as SocketConnection)?.TrySetProtocolShutdown(PipeShutdownKind.ProtocolExitClient); + internal void WriteRaw(ReadOnlySpan bytes) => _ioPipe?.Output?.Write(bytes); + + internal void RecordQuit() + { + // don't blame redis if we fired the first shot + Volatile.Write(ref clientSentQuit, 1); + (_ioPipe as SocketConnection)?.TrySetProtocolShutdown(PipeShutdownKind.ProtocolExitClient); + } - internal static void WriteMultiBulkHeader(PipeWriter output, long count) + internal static void WriteMultiBulkHeader(IBufferWriter output, long count) { // *{count}\r\n = 3 + MaxInt32TextLen - var span = output.GetSpan(3 + MaxInt32TextLen); + var span = output.GetSpan(3 + Format.MaxInt32TextLen); span[0] = (byte)'*'; int offset = WriteRaw(span, count, offset: 1); output.Advance(offset); } - internal const int - MaxInt32TextLen = 11, // -2,147,483,648 (not including the commas) - MaxInt64TextLen = 20; // -9,223,372,036,854,775,808 (not including the commas) + internal static void WriteMultiBulkHeader(IBufferWriter output, long count, ResultType type) + { + // *{count}\r\n = 3 + MaxInt32TextLen + var span = output.GetSpan(3 + Format.MaxInt32TextLen); + span[0] = type switch + { + ResultType.Push => (byte)'>', + ResultType.Attribute => (byte)'|', + ResultType.Map => (byte)'%', + ResultType.Set => (byte)'~', + _ => (byte)'*', + }; + if ((type is ResultType.Map or ResultType.Attribute) & count > 0) + { + if ((count & 1) != 0) Throw(type, count); + count >>= 1; + static void Throw(ResultType type, long count) => throw new ArgumentOutOfRangeException( + paramName: nameof(count), + message: $"{type} data must be in pairs; got {count}"); + } + int offset = WriteRaw(span, count, offset: 1); + output.Advance(offset); + } [MethodImpl(MethodImplOptions.AggressiveInlining)] internal static int WriteCrlf(Span span, int offset) @@ -744,7 +959,7 @@ internal static int WriteCrlf(Span span, int offset) } [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal static void WriteCrlf(PipeWriter writer) + internal static void WriteCrlf(IBufferWriter writer) { var span = writer.GetSpan(2); span[0] = (byte)'\r'; @@ -752,7 +967,6 @@ internal static void WriteCrlf(PipeWriter writer) writer.Advance(2); } - internal static int WriteRaw(Span span, long value, bool withLengthPrefix = false, int offset = 0) { if (value >= 0 && value <= 9) @@ -815,25 +1029,16 @@ internal static int WriteRaw(Span span, long value, bool withLengthPrefix { // we're going to write it, but *to the wrong place* var availableChunk = span.Slice(offset); - if (!Utf8Formatter.TryFormat(value, availableChunk, out int formattedLength)) - { - throw new InvalidOperationException("TryFormat failed"); - } + var formattedLength = Format.FormatInt64(value, availableChunk); if (withLengthPrefix) { // now we know how large the prefix is: write the prefix, then write the value - if (!Utf8Formatter.TryFormat(formattedLength, availableChunk, out int prefixLength)) - { - throw new InvalidOperationException("TryFormat failed"); - } + var prefixLength = Format.FormatInt32(formattedLength, availableChunk); offset += prefixLength; offset = WriteCrlf(span, offset); availableChunk = span.Slice(offset); - if (!Utf8Formatter.TryFormat(value, availableChunk, out int finalLength)) - { - throw new InvalidOperationException("TryFormat failed"); - } + var finalLength = Format.FormatInt64(value, availableChunk); offset += finalLength; Debug.Assert(finalLength == formattedLength); } @@ -846,18 +1051,12 @@ internal static int WriteRaw(Span span, long value, bool withLengthPrefix return WriteCrlf(span, offset); } - private async ValueTask FlushAsync_Awaited(PhysicalConnection connection, ValueTask flush, bool throwOnFailure -#if DEBUG - , int startFlush, long flushBytes -#endif - ) + [System.Diagnostics.CodeAnalysis.SuppressMessage("Performance", "CA1822:Mark members as static", Justification = "DEBUG uses instance data")] + private async ValueTask FlushAsync_Awaited(PhysicalConnection connection, ValueTask flush, bool throwOnFailure) { try { await flush.ForAwait(); -#if DEBUG - RecordEndFlush(startFlush, flushBytes); -#endif connection._writeStatus = WriteStatus.Flushed; connection.UpdateLastWriteTime(); return WriteResult.Success; @@ -869,8 +1068,9 @@ private async ValueTask FlushAsync_Awaited(PhysicalConnection conne } } - CancellationTokenSource _reusableFlushSyncTokenSource; + private CancellationTokenSource? _reusableFlushSyncTokenSource; [Obsolete("this is an anti-pattern; work to reduce reliance on this is in progress")] + [System.Diagnostics.CodeAnalysis.SuppressMessage("Style", "IDE0062:Make local function 'static'", Justification = "DEBUG uses instance data")] internal WriteResult FlushSync(bool throwOnFailure, int millisecondsTimeout) { var cts = _reusableFlushSyncTokenSource ??= new CancellationTokenSource(); @@ -898,9 +1098,6 @@ internal WriteResult FlushSync(bool throwOnFailure, int millisecondsTimeout) void ThrowTimeout() { -#if DEBUG - if (millisecondsTimeout > _maxFlushTime) _maxFlushTime = millisecondsTimeout; // a fair bet even if we didn't measure -#endif throw new TimeoutException("timeout while synchronously flushing"); } } @@ -911,20 +1108,8 @@ internal ValueTask FlushAsync(bool throwOnFailure, CancellationToke try { _writeStatus = WriteStatus.Flushing; -#if DEBUG - int startFlush = Environment.TickCount; - long flushBytes = -1; - if (_ioPipe is SocketConnection sc) flushBytes = sc.GetCounters().BytesWaitingToBeSent; -#endif var flush = tmp.FlushAsync(cancellationToken); - if (!flush.IsCompletedSuccessfully) return FlushAsync_Awaited(this, flush, throwOnFailure -#if DEBUG - , startFlush, flushBytes -#endif - ); -#if DEBUG - RecordEndFlush(startFlush, flushBytes); -#endif + if (!flush.IsCompletedSuccessfully) return FlushAsync_Awaited(this, flush, throwOnFailure); _writeStatus = WriteStatus.Flushed; UpdateLastWriteTime(); return new ValueTask(WriteResult.Success); @@ -935,27 +1120,10 @@ internal ValueTask FlushAsync(bool throwOnFailure, CancellationToke return new ValueTask(WriteResult.WriteFailure); } } -#if DEBUG - private void RecordEndFlush(int start, long bytes) - { - - var end = Environment.TickCount; - int taken = unchecked(end - start); - if (taken > _maxFlushTime) - { - _maxFlushTime = taken; - if (bytes >= 0) _maxFlushBytes = bytes; - } - } - private volatile int _maxFlushTime = -1; - private long _maxFlushBytes = -1; - internal int MaxFlushTime => _maxFlushTime; - internal long MaxFlushBytes => _maxFlushBytes; -#endif - private static readonly ReadOnlyMemory NullBulkString = Encoding.ASCII.GetBytes("$-1\r\n"), EmptyBulkString = Encoding.ASCII.GetBytes("$0\r\n\r\n"); + private static readonly ReadOnlyMemory NullBulkString = Encoding.ASCII.GetBytes("$-1\r\n"), EmptyBulkString = Encoding.ASCII.GetBytes("$0\r\n\r\n"); - private static void WriteUnifiedBlob(PipeWriter writer, byte[] value) + private static void WriteUnifiedBlob(IBufferWriter writer, byte[]? value) { if (value == null) { @@ -968,13 +1136,10 @@ private static void WriteUnifiedBlob(PipeWriter writer, byte[] value) } } -#pragma warning disable RCS1231 // Make parameter ref read-only. - private static void WriteUnifiedSpan(PipeWriter writer, ReadOnlySpan value) -#pragma warning restore RCS1231 // Make parameter ref read-only. + private static void WriteUnifiedSpan(IBufferWriter writer, ReadOnlySpan value) { // ${len}\r\n = 3 + MaxInt32TextLen // {value}\r\n = 2 + value.Length - const int MaxQuickSpanSize = 512; if (value.Length == 0) { @@ -983,7 +1148,7 @@ private static void WriteUnifiedSpan(PipeWriter writer, ReadOnlySpan value } else if (value.Length <= MaxQuickSpanSize) { - var span = writer.GetSpan(5 + MaxInt32TextLen + value.Length); + var span = writer.GetSpan(5 + Format.MaxInt32TextLen + value.Length); span[0] = (byte)'$'; int bytes = AppendToSpan(span, value, 1); writer.Advance(bytes); @@ -991,7 +1156,7 @@ private static void WriteUnifiedSpan(PipeWriter writer, ReadOnlySpan value else { // too big to guarantee can do in a single span - var span = writer.GetSpan(3 + MaxInt32TextLen); + var span = writer.GetSpan(3 + Format.MaxInt32TextLen); span[0] = (byte)'$'; int bytes = WriteRaw(span, value.Length, offset: 1); writer.Advance(bytes); @@ -1012,9 +1177,7 @@ private static int AppendToSpanCommand(Span span, in CommandBytes value, i return WriteCrlf(span, offset); } -#pragma warning disable RCS1231 // Make parameter ref read-only. - spans are tiny private static int AppendToSpan(Span span, ReadOnlySpan value, int offset = 0) -#pragma warning restore RCS1231 // Make parameter ref read-only. { offset = WriteRaw(span, value.Length, offset: offset); value.CopyTo(span.Slice(offset, value.Length)); @@ -1024,7 +1187,11 @@ private static int AppendToSpan(Span span, ReadOnlySpan value, int o internal void WriteSha1AsHex(byte[] value) { - var writer = _ioPipe.Output; + if (_ioPipe?.Output is not PipeWriter writer) + { + return; // Prevent null refs during disposal + } + if (value == null) { writer.Write(NullBulkString.Span); @@ -1033,7 +1200,6 @@ internal void WriteSha1AsHex(byte[] value) { // $40\r\n = 5 // {40 bytes}\r\n = 42 - var span = writer.GetSpan(47); span[0] = (byte)'$'; span[1] = (byte)'4'; @@ -1064,8 +1230,13 @@ internal static byte ToHexNibble(int value) return value < 10 ? (byte)('0' + value) : (byte)('a' - 10 + value); } - internal static void WriteUnifiedPrefixedString(PipeWriter writer, byte[] prefix, string value) + internal static void WriteUnifiedPrefixedString(IBufferWriter? maybeNullWriter, byte[]? prefix, string? value) { + if (maybeNullWriter is not { } writer) + { + return; // Prevent null refs during disposal + } + if (value == null) { // special case @@ -1086,7 +1257,7 @@ internal static void WriteUnifiedPrefixedString(PipeWriter writer, byte[] prefix } else { - var span = writer.GetSpan(3 + MaxInt32TextLen); + var span = writer.GetSpan(3 + Format.MaxInt32TextLen); span[0] = (byte)'$'; int bytes = WriteRaw(span, totalLength, offset: 1); writer.Advance(bytes); @@ -1099,7 +1270,7 @@ internal static void WriteUnifiedPrefixedString(PipeWriter writer, byte[] prefix } [ThreadStatic] - private static Encoder s_PerThreadEncoder; + private static Encoder? s_PerThreadEncoder; internal static Encoder GetPerThreadEncoder() { var encoder = s_PerThreadEncoder; @@ -1114,7 +1285,7 @@ internal static Encoder GetPerThreadEncoder() return encoder; } - unsafe static internal void WriteRaw(PipeWriter writer, string value, int expectedLength) + internal static unsafe void WriteRaw(IBufferWriter writer, string value, int expectedLength) { const int MaxQuickEncodeSize = 512; @@ -1167,18 +1338,24 @@ unsafe static internal void WriteRaw(PipeWriter writer, string value, int expect } } - private static void WriteUnifiedPrefixedBlob(PipeWriter writer, byte[] prefix, byte[] value) + private static void WriteUnifiedPrefixedBlob(PipeWriter? maybeNullWriter, byte[]? prefix, byte[]? value) { - // ${total-len}\r\n + if (maybeNullWriter is not PipeWriter writer) + { + return; // Prevent null refs during disposal + } + + // ${total-len}\r\n // {prefix}{value}\r\n if (prefix == null || prefix.Length == 0 || value == null) - { // if no prefix, just use the non-prefixed version; + { + // if no prefix, just use the non-prefixed version; // even if prefixed, a null value writes as null, so can use the non-prefixed version WriteUnifiedBlob(writer, value); } else { - var span = writer.GetSpan(3 + MaxInt32TextLen); // note even with 2 max-len, we're still in same text range + var span = writer.GetSpan(3 + Format.MaxInt32TextLen); // note even with 2 max-len, we're still in same text range span[0] = (byte)'$'; int bytes = WriteRaw(span, prefix.LongLength + value.LongLength, offset: 1); writer.Advance(bytes); @@ -1192,32 +1369,30 @@ private static void WriteUnifiedPrefixedBlob(PipeWriter writer, byte[] prefix, b } } - private static void WriteUnifiedInt64(PipeWriter writer, long value) + private static void WriteUnifiedInt64(IBufferWriter writer, long value) { // note from specification: A client sends to the Redis server a RESP Array consisting of just Bulk Strings. // (i.e. we can't just send ":123\r\n", we need to send "$3\r\n123\r\n" - // ${asc-len}\r\n = 3 + MaxInt32TextLen + // ${asc-len}\r\n = 4/5 (asc-len at most 2 digits) // {asc}\r\n = MaxInt64TextLen + 2 - var span = writer.GetSpan(5 + MaxInt32TextLen + MaxInt64TextLen); + var span = writer.GetSpan(7 + Format.MaxInt64TextLen); span[0] = (byte)'$'; var bytes = WriteRaw(span, value, withLengthPrefix: true, offset: 1); writer.Advance(bytes); } - private static void WriteUnifiedUInt64(PipeWriter writer, ulong value) + private static void WriteUnifiedUInt64(IBufferWriter writer, ulong value) { // note from specification: A client sends to the Redis server a RESP Array consisting of just Bulk Strings. // (i.e. we can't just send ":123\r\n", we need to send "$3\r\n123\r\n" + Span valueSpan = stackalloc byte[Format.MaxInt64TextLen]; - // ${asc-len}\r\n = 3 + MaxInt32TextLen - // {asc}\r\n = MaxInt64TextLen + 2 - var span = writer.GetSpan(5 + MaxInt32TextLen + MaxInt64TextLen); - - Span valueSpan = stackalloc byte[MaxInt64TextLen]; - if (!Utf8Formatter.TryFormat(value, valueSpan, out var len)) - throw new InvalidOperationException("TryFormat failed"); + var len = Format.FormatUInt64(value, valueSpan); + // ${asc-len}\r\n = 4/5 (asc-len at most 2 digits) + // {asc}\r\n = {len} + 2 + var span = writer.GetSpan(7 + len); span[0] = (byte)'$'; int offset = WriteRaw(span, len, withLengthPrefix: false, offset: 1); valueSpan.Slice(0, len).CopyTo(span.Slice(offset)); @@ -1225,32 +1400,152 @@ private static void WriteUnifiedUInt64(PipeWriter writer, ulong value) offset = WriteCrlf(span, offset); writer.Advance(offset); } - internal static void WriteInteger(PipeWriter writer, long value) + + private static void WriteUnifiedDouble(IBufferWriter writer, double value) { - //note: client should never write integer; only server does this +#if NET8_0_OR_GREATER + Span valueSpan = stackalloc byte[Format.MaxDoubleTextLen]; + var len = Format.FormatDouble(value, valueSpan); + // ${asc-len}\r\n = 4/5 (asc-len at most 2 digits) + // {asc}\r\n = {len} + 2 + var span = writer.GetSpan(7 + len); + span[0] = (byte)'$'; + int offset = WriteRaw(span, len, withLengthPrefix: false, offset: 1); + valueSpan.Slice(0, len).CopyTo(span.Slice(offset)); + offset += len; + offset = WriteCrlf(span, offset); + writer.Advance(offset); +#else + // fallback: drop to string + WriteUnifiedPrefixedString(writer, null, Format.ToString(value)); +#endif + } + + internal static void WriteInteger(IBufferWriter writer, long value) + { + // note: client should never write integer; only server does this // :{asc}\r\n = MaxInt64TextLen + 3 - var span = writer.GetSpan(3 + MaxInt64TextLen); + var span = writer.GetSpan(3 + Format.MaxInt64TextLen); span[0] = (byte)':'; var bytes = WriteRaw(span, value, withLengthPrefix: false, offset: 1); writer.Advance(bytes); } - internal long GetSocketBytes(out long readCount, out long writeCount) + internal readonly struct ConnectionStatus + { + /// + /// Number of messages sent outbound, but we don't yet have a response for. + /// + public int MessagesSentAwaitingResponse { get; init; } + + /// + /// Bytes available on the socket, not yet read into the pipe. + /// + public long BytesAvailableOnSocket { get; init; } + + /// + /// Bytes read from the socket, pending in the reader pipe. + /// + public long BytesInReadPipe { get; init; } + + /// + /// Bytes in the writer pipe, waiting to be written to the socket. + /// + public long BytesInWritePipe { get; init; } + + /// + /// Byte size of the last result we processed. + /// + public long BytesLastResult { get; init; } + + /// + /// Byte size on the buffer that isn't processed yet. + /// + public long BytesInBuffer { get; init; } + + /// + /// The inbound pipe reader status. + /// + public ReadStatus ReadStatus { get; init; } + + /// + /// The outbound pipe writer status. + /// + public WriteStatus WriteStatus { get; init; } + + public override string ToString() => + $"SentAwaitingResponse: {MessagesSentAwaitingResponse}, AvailableOnSocket: {BytesAvailableOnSocket} byte(s), InReadPipe: {BytesInReadPipe} byte(s), InWritePipe: {BytesInWritePipe} byte(s), ReadStatus: {ReadStatus}, WriteStatus: {WriteStatus}"; + + /// + /// The default connection stats, notable *not* the same as default since initializers don't run. + /// + public static ConnectionStatus Default { get; } = new() + { + BytesAvailableOnSocket = -1, + BytesInReadPipe = -1, + BytesInWritePipe = -1, + ReadStatus = ReadStatus.NA, + WriteStatus = WriteStatus.NA, + }; + + /// + /// The zeroed connection stats, which we want to display as zero for default exception cases. + /// + public static ConnectionStatus Zero { get; } = new() + { + BytesAvailableOnSocket = 0, + BytesInReadPipe = 0, + BytesInWritePipe = 0, + ReadStatus = ReadStatus.NA, + WriteStatus = WriteStatus.NA, + }; + } + + public ConnectionStatus GetStatus() { if (_ioPipe is SocketConnection conn) { var counters = conn.GetCounters(); - readCount = counters.BytesWaitingToBeRead; - writeCount = counters.BytesWaitingToBeSent; - return counters.BytesAvailableOnSocket; + return new ConnectionStatus() + { + MessagesSentAwaitingResponse = GetSentAwaitingResponseCount(), + BytesAvailableOnSocket = counters.BytesAvailableOnSocket, + BytesInReadPipe = counters.BytesWaitingToBeRead, + BytesInWritePipe = counters.BytesWaitingToBeSent, + ReadStatus = _readStatus, + WriteStatus = _writeStatus, + BytesLastResult = bytesLastResult, + BytesInBuffer = bytesInBuffer, + }; + } + + // Fall back to bytes waiting on the socket if we can + int fallbackBytesAvailable; + try + { + fallbackBytesAvailable = VolatileSocket?.Available ?? -1; + } + catch + { + // If this fails, we're likely in a race disposal situation and do not want to blow sky high here. + fallbackBytesAvailable = -1; } - readCount = writeCount = -1; - return VolatileSocket?.Available ?? -1; + + return new ConnectionStatus() + { + BytesAvailableOnSocket = fallbackBytesAvailable, + BytesInReadPipe = -1, + BytesInWritePipe = -1, + ReadStatus = _readStatus, + WriteStatus = _writeStatus, + BytesLastResult = bytesLastResult, + BytesInBuffer = bytesInBuffer, + }; } - private RemoteCertificateValidationCallback GetAmbientIssuerCertificateCallback() + internal static RemoteCertificateValidationCallback? GetAmbientIssuerCertificateCallback() { try { @@ -1263,24 +1558,32 @@ private RemoteCertificateValidationCallback GetAmbientIssuerCertificateCallback( } return null; } - private static LocalCertificateSelectionCallback GetAmbientClientCertificateCallback() + internal static LocalCertificateSelectionCallback? GetAmbientClientCertificateCallback() { try { - var pfxPath = Environment.GetEnvironmentVariable("SERedis_ClientCertPfxPath"); - var pfxPassword = Environment.GetEnvironmentVariable("SERedis_ClientCertPassword"); - var pfxStorageFlags = Environment.GetEnvironmentVariable("SERedis_ClientCertStorageFlags"); - - X509KeyStorageFlags? flags = null; - if (!string.IsNullOrEmpty(pfxStorageFlags)) + var certificatePath = Environment.GetEnvironmentVariable("SERedis_ClientCertPfxPath"); + if (!string.IsNullOrEmpty(certificatePath) && File.Exists(certificatePath)) { - flags = Enum.Parse(typeof(X509KeyStorageFlags), pfxStorageFlags) as X509KeyStorageFlags?; + var password = Environment.GetEnvironmentVariable("SERedis_ClientCertPassword"); + var pfxStorageFlags = Environment.GetEnvironmentVariable("SERedis_ClientCertStorageFlags"); + X509KeyStorageFlags storageFlags = X509KeyStorageFlags.DefaultKeySet; + if (!string.IsNullOrEmpty(pfxStorageFlags) && Enum.TryParse(pfxStorageFlags, true, out var typedFlags)) + { + storageFlags = typedFlags; + } + + return ConfigurationOptions.CreatePfxUserCertificateCallback(certificatePath, password, storageFlags); } - if (!string.IsNullOrEmpty(pfxPath) && File.Exists(pfxPath)) +#if NET + certificatePath = Environment.GetEnvironmentVariable("SERedis_ClientCertPemPath"); + if (!string.IsNullOrEmpty(certificatePath) && File.Exists(certificatePath)) { - return delegate { return new X509Certificate2(pfxPath, pfxPassword ?? "", flags ?? X509KeyStorageFlags.DefaultKeySet); }; + var passwordPath = Environment.GetEnvironmentVariable("SERedis_ClientCertPasswordPath"); + return ConfigurationOptions.CreatePemUserCertificateCallback(certificatePath, passwordPath); } +#endif } catch (Exception ex) { @@ -1289,12 +1592,12 @@ private static LocalCertificateSelectionCallback GetAmbientClientCertificateCall return null; } - internal async ValueTask ConnectedAsync(Socket socket, LogProxy log, SocketManager manager) + internal async ValueTask ConnectedAsync(Socket? socket, ILogger? log, SocketManager manager) { var bridge = BridgeCouldBeNull; if (bridge == null) return false; - IDuplexPipe pipe = null; + IDuplexPipe? pipe = null; try { // disallow connection in some cases @@ -1303,32 +1606,57 @@ internal async ValueTask ConnectedAsync(Socket socket, LogProxy log, Socke // the order is important here: // non-TLS: [Socket]<==[SocketConnection:IDuplexPipe] // TLS: [Socket]<==[NetworkStream]<==[SslStream]<==[StreamConnection:IDuplexPipe] - var config = bridge.Multiplexer.RawConfig; + var tunnel = config.Tunnel; + Stream? stream = null; + if (tunnel is not null) + { + stream = await tunnel.BeforeAuthenticateAsync(bridge.ServerEndPoint.EndPoint, bridge.ConnectionType, socket, CancellationToken.None).ForAwait(); + } + if (config.Ssl) { - log?.WriteLine("Configuring TLS"); + log?.LogInformationConfiguringTLS(); var host = config.SslHost; - if (string.IsNullOrWhiteSpace(host)) host = Format.ToStringHostOnly(bridge.ServerEndPoint.EndPoint); + if (host.IsNullOrWhiteSpace()) + { + host = Format.ToStringHostOnly(bridge.ServerEndPoint.EndPoint); + } - var ssl = new SslStream(new NetworkStream(socket), false, - config.CertificateValidationCallback ?? GetAmbientIssuerCertificateCallback(), - config.CertificateSelectionCallback ?? GetAmbientClientCertificateCallback(), - EncryptionPolicy.RequireEncryption); + stream ??= new NetworkStream(socket ?? throw new InvalidOperationException("No socket or stream available - possibly a tunnel error")); + var ssl = new SslStream( + innerStream: stream, + leaveInnerStreamOpen: false, + userCertificateValidationCallback: config.CertificateValidationCallback ?? GetAmbientIssuerCertificateCallback(), + userCertificateSelectionCallback: config.CertificateSelectionCallback ?? GetAmbientClientCertificateCallback(), + encryptionPolicy: EncryptionPolicy.RequireEncryption); try { try { - ssl.AuthenticateAsClient(host, config.SslProtocols, config.CheckCertificateRevocation); +#if NET + var configOptions = config.SslClientAuthenticationOptions?.Invoke(host); + if (configOptions is not null) + { + await ssl.AuthenticateAsClientAsync(configOptions).ForAwait(); + } + else + { + await ssl.AuthenticateAsClientAsync(host, config.SslProtocols, config.CheckCertificateRevocation).ForAwait(); + } +#else + await ssl.AuthenticateAsClientAsync(host, config.SslProtocols, config.CheckCertificateRevocation).ForAwait(); +#endif } catch (Exception ex) { Debug.WriteLine(ex.Message); - bridge.Multiplexer?.SetAuthSuspect(); + bridge.Multiplexer.SetAuthSuspect(ex); + bridge.Multiplexer.Logger?.LogErrorConnectionIssue(ex, ex.Message); throw; } - log?.WriteLine($"TLS connection established successfully using protocol: {ssl.SslProtocol}"); + log?.LogInformationTLSConnectionEstablished(ssl.SslProtocol); } catch (AuthenticationException authexception) { @@ -1336,7 +1664,12 @@ internal async ValueTask ConnectedAsync(Socket socket, LogProxy log, Socke bridge.Multiplexer.Trace("Encryption failure"); return false; } - pipe = StreamConnection.GetDuplex(ssl, manager.SendPipeOptions, manager.ReceivePipeOptions, name: bridge.Name); + stream = ssl; + } + + if (stream is not null) + { + pipe = StreamConnection.GetDuplex(stream, manager.SendPipeOptions, manager.ReceivePipeOptions, name: bridge.Name); } else { @@ -1346,7 +1679,7 @@ internal async ValueTask ConnectedAsync(Socket socket, LogProxy log, Socke _ioPipe = pipe; - log?.WriteLine($"{bridge?.Name}: Connected "); + log?.LogInformationConnected(bridge.Name); await bridge.OnConnectedAsync(this, log).ForAwait(); return true; @@ -1359,73 +1692,209 @@ internal async ValueTask ConnectedAsync(Socket socket, LogProxy log, Socke } } + internal enum PushKind + { + [AsciiHash("")] + None, + [AsciiHash("message")] + Message, + [AsciiHash("pmessage")] + PMessage, + [AsciiHash("smessage")] + SMessage, + [AsciiHash("subscribe")] + Subscribe, + [AsciiHash("psubscribe")] + PSubscribe, + [AsciiHash("ssubscribe")] + SSubscribe, + [AsciiHash("unsubscribe")] + Unsubscribe, + [AsciiHash("punsubscribe")] + PUnsubscribe, + [AsciiHash("sunsubscribe")] + SUnsubscribe, + } + + internal static partial class PushKindMetadata + { + [AsciiHash] + internal static partial bool TryParse(ReadOnlySpan value, out PushKind result); + } + + private PushKind GetPushKind(in Sequence result, out RedisChannel channel) + { + var len = result.Length; + if (len < 2) + { + // for supported cases, we demand at least the kind and the subscription channel + channel = default; + return PushKind.None; + } + + if (result[0].TryParse(PushKindMetadata.TryParse, out PushKind kind) && kind is not PushKind.None) + { + RedisChannel.RedisChannelOptions channelOptions = RedisChannel.RedisChannelOptions.None; + switch (kind) + { + case PushKind.Message when len >= 3: + break; + case PushKind.PMessage when len >= 4: + channelOptions = RedisChannel.RedisChannelOptions.Pattern; + break; + case PushKind.SMessage when len >= 3: + channelOptions = RedisChannel.RedisChannelOptions.Sharded; + break; + case PushKind.Subscribe: + break; + case PushKind.PSubscribe: + channelOptions = RedisChannel.RedisChannelOptions.Pattern; + break; + case PushKind.SSubscribe: + channelOptions = RedisChannel.RedisChannelOptions.Sharded; + break; + case PushKind.Unsubscribe: + break; + case PushKind.PUnsubscribe: + channelOptions = RedisChannel.RedisChannelOptions.Pattern; + break; + case PushKind.SUnsubscribe: + channelOptions = RedisChannel.RedisChannelOptions.Sharded; + break; + default: + kind = PushKind.None; + break; + } + + if (kind != PushKind.None) + { + // the channel is always the second element + channel = result[1].AsRedisChannel(ChannelPrefix, channelOptions); + return kind; + } + } + channel = default; + return PushKind.None; + } + private void MatchResult(in RawResult result) { // check to see if it could be an out-of-band pubsub message - if (connectionType == ConnectionType.Subscription && result.Type == ResultType.MultiBulk) + if ((connectionType == ConnectionType.Subscription && result.Resp2TypeArray == ResultType.Array) || result.Resp3Type == ResultType.Push) { var muxer = BridgeCouldBeNull?.Multiplexer; if (muxer == null) return; // out of band message does not match to a queued message var items = result.GetItems(); - if (items.Length >= 3 && items[0].IsEqual(message)) + var kind = GetPushKind(items, out var subscriptionChannel); + switch (kind) { - _readStatus = ReadStatus.PubSubMessage; + case PushKind.Message: + case PushKind.SMessage: + _readStatus = kind is PushKind.Message ? ReadStatus.PubSubMessage : ReadStatus.PubSubSMessage; - // special-case the configuration change broadcasts (we don't keep that in the usual pub/sub registry) - var configChanged = muxer.ConfigurationChangedChannel; - if (configChanged != null && items[1].IsEqual(configChanged)) - { - EndPoint blame = null; - try + // special-case the configuration change broadcasts (we don't keep that in the usual pub/sub registry) + var configChanged = muxer.ConfigurationChangedChannel; + if (configChanged != null && items[1].IsEqual(configChanged)) { - if (!items[2].IsEqual(CommonReplies.wildcard)) + EndPoint? blame = null; + try { - blame = Format.TryParseEndPoint(items[2].GetString()); + if (!items[2].IsEqual(CommonReplies.wildcard)) + { + // We don't want to fail here, just trying to identify + _ = Format.TryParseEndPoint(items[2].GetString(), out blame); + } } + catch + { + /* no biggie */ + } + + Trace("Configuration changed: " + Format.ToString(blame)); + _readStatus = ReadStatus.Reconfigure; + muxer.ReconfigureIfNeeded(blame, true, "broadcast"); } - catch { /* no biggie */ } - Trace("Configuration changed: " + Format.ToString(blame)); - _readStatus = ReadStatus.Reconfigure; - muxer.ReconfigureIfNeeded(blame, true, "broadcast"); - } - // invoke the handlers - var channel = items[1].AsRedisChannel(ChannelPrefix, RedisChannel.PatternMode.Literal); - Trace("MESSAGE: " + channel); - if (!channel.IsNull) - { - _readStatus = ReadStatus.InvokePubSub; - muxer.OnMessage(channel, channel, items[2].AsRedisValue()); - } - return; // AND STOP PROCESSING! - } - else if (items.Length >= 4 && items[0].IsEqual(pmessage)) - { - _readStatus = ReadStatus.PubSubPMessage; + // invoke the handlers + if (!subscriptionChannel.IsNull) + { + Trace($"{kind}: {subscriptionChannel}"); + if (TryGetPubSubPayload(items[2], out var payload)) + { + _readStatus = ReadStatus.InvokePubSub; + muxer.OnMessage(subscriptionChannel, subscriptionChannel, payload); + } + // could be multi-message: https://github.com/StackExchange/StackExchange.Redis/issues/2507 + else if (TryGetMultiPubSubPayload(items[2], out var payloads)) + { + _readStatus = ReadStatus.InvokePubSub; + muxer.OnMessage(subscriptionChannel, subscriptionChannel, payloads); + } + } + return; // and stop processing + case PushKind.PMessage: + _readStatus = ReadStatus.PubSubPMessage; - var channel = items[2].AsRedisChannel(ChannelPrefix, RedisChannel.PatternMode.Literal); - Trace("PMESSAGE: " + channel); - if (!channel.IsNull) - { - var sub = items[1].AsRedisChannel(ChannelPrefix, RedisChannel.PatternMode.Pattern); - _readStatus = ReadStatus.InvokePubSub; - muxer.OnMessage(sub, channel, items[3].AsRedisValue()); - } - return; // AND STOP PROCESSING! + var messageChannel = items[2].AsRedisChannel(ChannelPrefix, RedisChannel.RedisChannelOptions.None); + if (!messageChannel.IsNull) + { + Trace($"{kind}: {messageChannel} via {subscriptionChannel}"); + if (TryGetPubSubPayload(items[3], out var payload)) + { + _readStatus = ReadStatus.InvokePubSub; + muxer.OnMessage(subscriptionChannel, messageChannel, payload); + } + else if (TryGetMultiPubSubPayload(items[3], out var payloads)) + { + _readStatus = ReadStatus.InvokePubSub; + muxer.OnMessage(subscriptionChannel, messageChannel, payloads); + } + } + return; // and stop processing + case PushKind.SUnsubscribe when !PeekChannelMessage(RedisCommand.SUNSUBSCRIBE, subscriptionChannel): + // then it was *unsolicited* - this probably means the slot was migrated + // (otherwise, we'll let the command-processor deal with it) + _readStatus = ReadStatus.PubSubUnsubscribe; + var server = BridgeCouldBeNull?.ServerEndPoint; + if (server is not null && muxer.TryGetSubscription(subscriptionChannel, out var subscription)) + { + // wipe and reconnect; but: to where? + // counter-intuitively, the only server we *know* already knows the new route is: + // the outgoing server, since it had to change to MIGRATING etc; the new INCOMING server + // knows, but *we don't know who that is*, and other nodes: aren't guaranteed to know (yet) + muxer.DefaultSubscriber.ResubscribeToServer(subscription, subscriptionChannel, server, cause: "sunsubscribe"); + } + return; // and STOP PROCESSING; unsolicited } - - // if it didn't look like "[p]message", then we still need to process the pending queue } Trace("Matching result..."); - Message msg; + + Message? msg = null; + // check whether we're waiting for a high-integrity mode post-response checksum (using cheap null-check first) + if (_awaitingToken is not null && (msg = Interlocked.Exchange(ref _awaitingToken, null)) is not null) + { + _readStatus = ReadStatus.ResponseSequenceCheck; + if (!ProcessHighIntegrityResponseToken(msg, in result, BridgeCouldBeNull)) + { + RecordConnectionFailed(ConnectionFailureType.ResponseIntegrityFailure, origin: nameof(ReadStatus.ResponseSequenceCheck)); + } + return; + } + _readStatus = ReadStatus.DequeueResult; lock (_writtenAwaitingResponse) { - if (_writtenAwaitingResponse.Count == 0) + if (msg is not null) + { + _awaitingToken = null; + } + + if (!_writtenAwaitingResponse.TryDequeue(out msg)) + { throw new InvalidOperationException("Received response with no message waiting: " + result.ToString()); - msg = _writtenAwaitingResponse.Dequeue(); + } } _activeMessage = msg; @@ -1434,20 +1903,123 @@ private void MatchResult(in RawResult result) if (msg.ComputeResult(this, result)) { _readStatus = msg.ResultBoxIsAsync ? ReadStatus.CompletePendingMessageAsync : ReadStatus.CompletePendingMessageSync; - msg.Complete(); + if (!msg.IsHighIntegrity) + { + // can't complete yet if needs checksum + msg.Complete(); + } + } + if (msg.IsHighIntegrity) + { + // stash this for the next non-OOB response + Volatile.Write(ref _awaitingToken, msg); } + _readStatus = ReadStatus.MatchResultComplete; _activeMessage = null; + + static bool ProcessHighIntegrityResponseToken(Message message, in RawResult result, PhysicalBridge? bridge) + { + bool isValid = false; + if (result.Resp2TypeBulkString == ResultType.BulkString) + { + var payload = result.Payload; + if (payload.Length == 4) + { + uint interpreted; + if (payload.IsSingleSegment) + { + interpreted = BinaryPrimitives.ReadUInt32LittleEndian(payload.First.Span); + } + else + { + Span span = stackalloc byte[4]; + payload.CopyTo(span); + interpreted = BinaryPrimitives.ReadUInt32LittleEndian(span); + } + isValid = interpreted == message.HighIntegrityToken; + } + } + if (isValid) + { + message.Complete(); + return true; + } + else + { + message.SetExceptionAndComplete(new InvalidOperationException("High-integrity mode detected possible protocol de-sync"), bridge); + return false; + } + } + + static bool TryGetPubSubPayload(in RawResult value, out RedisValue parsed, bool allowArraySingleton = true) + { + if (value.IsNull) + { + parsed = RedisValue.Null; + return true; + } + switch (value.Resp2TypeBulkString) + { + case ResultType.Integer: + case ResultType.SimpleString: + case ResultType.BulkString: + parsed = value.AsRedisValue(); + return true; + case ResultType.Array when allowArraySingleton && value.ItemsCount == 1: + return TryGetPubSubPayload(in value[0], out parsed, allowArraySingleton: false); + } + parsed = default; + return false; + } + + static bool TryGetMultiPubSubPayload(in RawResult value, out Sequence parsed) + { + if (value.Resp2TypeArray == ResultType.Array && value.ItemsCount != 0) + { + parsed = value.GetItems(); + return true; + } + parsed = default; + return false; + } } - private volatile Message _activeMessage; + private bool PeekChannelMessage(RedisCommand command, in RedisChannel channel) + { + Message? msg; + bool haveMsg; + lock (_writtenAwaitingResponse) + { + haveMsg = _writtenAwaitingResponse.TryPeek(out msg); + } + + return haveMsg && msg is CommandChannelBase typed + && typed.Command == command && typed.Channel == channel; + } - internal void GetHeadMessages(out Message now, out Message next) + private volatile Message? _activeMessage; + + internal void GetHeadMessages(out Message? now, out Message? next) { now = _activeMessage; - lock(_writtenAwaitingResponse) + bool haveLock = false; + try { - next = _writtenAwaitingResponse.Count == 0 ? null : _writtenAwaitingResponse.Peek(); + // careful locking here; a: don't try too hard (this is error info only), b: avoid deadlock (see #2376) + Monitor.TryEnter(_writtenAwaitingResponse, 10, ref haveLock); + if (haveLock) + { + _writtenAwaitingResponse.TryPeek(out next); + } + else + { + next = UnknownMessage.Instance; + } + } + finally + { + if (haveLock) Monitor.Exit(_writtenAwaitingResponse); } } @@ -1460,7 +2032,7 @@ private void OnDebugAbort() var bridge = BridgeCouldBeNull; if (bridge == null || !bridge.Multiplexer.AllowConnect) { - throw new RedisConnectionException(ConnectionFailureType.InternalFailure, "debugging"); + throw new RedisConnectionException(ConnectionFailureType.InternalFailure, "Aborting (AllowConnect: False)"); } } @@ -1505,9 +2077,9 @@ private async Task ReadFromPipe() Trace($"Processed {handled} messages"); input.AdvanceTo(buffer.Start, buffer.End); - if (handled == 0 && readResult.IsCompleted) + if ((handled == 0 && readResult.IsCompleted) || BridgeCouldBeNull?.NeedsReconnect == true) { - break; // no more data, or trailing incomplete messages + break; // no more data, trailing incomplete messages, or reconnection required } } Trace("EOF"); @@ -1541,21 +2113,19 @@ private async Task ReadFromPipe() } } - private static readonly ArenaOptions s_arenaOptions = new ArenaOptions( -#if DEBUG - blockSizeBytes: Unsafe.SizeOf() * 8 // force an absurdly small page size to trigger bugs -#endif - ); + private static readonly ArenaOptions s_arenaOptions = new ArenaOptions(); private readonly Arena _arena = new Arena(s_arenaOptions); + private int ProcessBuffer(ref ReadOnlySequence buffer) { int messageCount = 0; + bytesInBuffer = buffer.Length; while (!buffer.IsEmpty) { _readStatus = ReadStatus.TryParseResult; var reader = new BufferReader(buffer); - var result = TryParseResult(_arena, in buffer, ref reader, IncludeDetailInExceptions, BridgeCouldBeNull?.ServerEndPoint); + var result = TryParseResult(_protocol >= RedisProtocol.Resp3, _arena, in buffer, ref reader, IncludeDetailInExceptions, this); try { if (result.HasValue) @@ -1566,6 +2136,10 @@ private int ProcessBuffer(ref ReadOnlySequence buffer) Trace(result.ToString()); _readStatus = ReadStatus.MatchResult; MatchResult(result); + + // Track the last result size *after* processing for the *next* error message + bytesInBuffer = buffer.Length; + bytesLastResult = result.Payload.Length; } else { @@ -1581,59 +2155,47 @@ private int ProcessBuffer(ref ReadOnlySequence buffer) _readStatus = ReadStatus.ProcessBufferComplete; return messageCount; } - //void ISocketCallback.Read() - //{ - // Interlocked.Increment(ref haveReader); - // try - // { - // do - // { - // int space = EnsureSpaceAndComputeBytesToRead(); - // int bytesRead = netStream?.Read(ioBuffer, ioBufferBytes, space) ?? 0; - - // if (!ProcessReadBytes(bytesRead)) return; // EOF - // } while (socketToken.Available != 0); - // Multiplexer.Trace("Buffer exhausted", physicalName); - // // ^^^ note that the socket manager will call us again when there is something to do - // } - // catch (Exception ex) - // { - // RecordConnectionFailed(ConnectionFailureType.InternalFailure, ex); - // } - // finally - // { - // Interlocked.Decrement(ref haveReader); - // } - //} - - private static RawResult ReadArray(Arena arena, in ReadOnlySequence buffer, ref BufferReader reader, bool includeDetailInExceptions, ServerEndPoint server) - { - var itemCount = ReadLineTerminatedString(ResultType.Integer, ref reader); + + private static RawResult.ResultFlags AsNull(RawResult.ResultFlags flags) => flags & ~RawResult.ResultFlags.NonNull; + + private static RawResult ReadArray(ResultType resultType, RawResult.ResultFlags flags, Arena arena, in ReadOnlySequence buffer, ref BufferReader reader, bool includeDetailInExceptions, ServerEndPoint? server) + { + var itemCount = ReadLineTerminatedString(ResultType.Integer, flags, ref reader); if (itemCount.HasValue) { - if (!itemCount.TryGetInt64(out long i64)) throw ExceptionFactory.ConnectionFailure(includeDetailInExceptions, ConnectionFailureType.ProtocolFailure, "Invalid array length", server); + if (!itemCount.TryGetInt64(out long i64)) + { + throw ExceptionFactory.ConnectionFailure( + includeDetailInExceptions, + ConnectionFailureType.ProtocolFailure, + itemCount.Is('?') ? "Streamed aggregate types not yet implemented" : "Invalid array length", + server); + } + int itemCountActual = checked((int)i64); if (itemCountActual < 0) { - //for null response by command like EXEC, RESP array: *-1\r\n - return RawResult.NullMultiBulk; + // for null response by command like EXEC, RESP array: *-1\r\n + return new RawResult(resultType, items: default, AsNull(flags)); } else if (itemCountActual == 0) { - //for zero array response by command like SCAN, Resp array: *0\r\n - return RawResult.EmptyMultiBulk; + // for zero array response by command like SCAN, Resp array: *0\r\n + return new RawResult(resultType, items: default, flags); } + if (resultType == ResultType.Map) itemCountActual <<= 1; // if it says "3", it means 3 pairs, i.e. 6 values + var oversized = arena.Allocate(itemCountActual); - var result = new RawResult(oversized, false); + var result = new RawResult(resultType, oversized, flags); if (oversized.IsSingleSegment) { var span = oversized.FirstSpan; - for(int i = 0; i < span.Length; i++) + for (int i = 0; i < span.Length; i++) { - if (!(span[i] = TryParseResult(arena, in buffer, ref reader, includeDetailInExceptions, server)).HasValue) + if (!(span[i] = TryParseResult(flags, arena, in buffer, ref reader, includeDetailInExceptions, server)).HasValue) { return RawResult.Nil; } @@ -1641,11 +2203,11 @@ private static RawResult ReadArray(Arena arena, in ReadOnlySequence arena, in ReadOnlySequence.Empty, true); + return new RawResult(type, ReadOnlySequence.Empty, AsNull(flags)); } if (reader.TryConsumeAsBuffer(bodySize, out var payload)) @@ -1676,7 +2245,7 @@ private static RawResult ReadBulkString(ref BufferReader reader, bool includeDet case ConsumeResult.NeedMoreData: break; // see NilResult below case ConsumeResult.Success: - return new RawResult(ResultType.BulkString, payload, false); + return new RawResult(type, payload, flags); default: throw ExceptionFactory.ConnectionFailure(includeDetailInExceptions, ConnectionFailureType.ProtocolFailure, "Invalid bulk string terminator", server); } @@ -1685,7 +2254,7 @@ private static RawResult ReadBulkString(ref BufferReader reader, bool includeDet return RawResult.Nil; } - private static RawResult ReadLineTerminatedString(ResultType type, ref BufferReader reader) + private static RawResult ReadLineTerminatedString(ResultType type, RawResult.ResultFlags flags, ref BufferReader reader) { int crlfOffsetFromCurrent = BufferReader.FindNextCrLf(reader); if (crlfOffsetFromCurrent < 0) return RawResult.Nil; @@ -1693,7 +2262,7 @@ private static RawResult ReadLineTerminatedString(ResultType type, ref BufferRea var payload = reader.ConsumeAsBuffer(crlfOffsetFromCurrent); reader.Consume(2); - return new RawResult(type, payload, false); + return new RawResult(type, payload, flags); } internal enum ReadStatus @@ -1711,8 +2280,10 @@ internal enum ReadStatus MatchResult, PubSubMessage, PubSubPMessage, + PubSubSMessage, Reconfigure, InvokePubSub, + ResponseSequenceCheck, // high-integrity mode only DequeueResult, ComputeResult, CompletePendingMessageSync, @@ -1720,6 +2291,7 @@ internal enum ReadStatus MatchResultComplete, ResetArena, ProcessBufferComplete, + PubSubUnsubscribe, NA = -1, } private volatile ReadStatus _readStatus; @@ -1727,51 +2299,146 @@ internal enum ReadStatus internal void StartReading() => ReadFromPipe().RedisFireAndForget(); - internal static RawResult TryParseResult(Arena arena, in ReadOnlySequence buffer, ref BufferReader reader, - bool includeDetilInExceptions, ServerEndPoint server, bool allowInlineProtocol = false) - { - var prefix = reader.PeekByte(); - if (prefix < 0) return RawResult.Nil; // EOF - switch (prefix) - { - case '+': // simple string - reader.Consume(1); - return ReadLineTerminatedString(ResultType.SimpleString, ref reader); - case '-': // error - reader.Consume(1); - return ReadLineTerminatedString(ResultType.Error, ref reader); - case ':': // integer - reader.Consume(1); - return ReadLineTerminatedString(ResultType.Integer, ref reader); - case '$': // bulk string - reader.Consume(1); - return ReadBulkString(ref reader, includeDetilInExceptions, server); - case '*': // array - reader.Consume(1); - return ReadArray(arena, in buffer, ref reader, includeDetilInExceptions, server); - default: - // string s = Format.GetString(buffer); - if (allowInlineProtocol) return ParseInlineProtocol(arena, ReadLineTerminatedString(ResultType.SimpleString, ref reader)); - throw new InvalidOperationException("Unexpected response prefix: " + (char)prefix); + internal static RawResult TryParseResult( + bool isResp3, + Arena arena, + in ReadOnlySequence buffer, + ref BufferReader reader, + bool includeDetilInExceptions, + PhysicalConnection? connection, + bool allowInlineProtocol = false) + { + return TryParseResult( + isResp3 ? (RawResult.ResultFlags.Resp3 | RawResult.ResultFlags.NonNull) : RawResult.ResultFlags.NonNull, + arena, + buffer, + ref reader, + includeDetilInExceptions, + connection?.BridgeCouldBeNull?.ServerEndPoint, + allowInlineProtocol); + } + + private static RawResult TryParseResult( + RawResult.ResultFlags flags, + Arena arena, + in ReadOnlySequence buffer, + ref BufferReader reader, + bool includeDetilInExceptions, + ServerEndPoint? server, + bool allowInlineProtocol = false) + { + int prefix; + do // this loop is just to allow us to parse (skip) attributes without doing a stack-dive + { + prefix = reader.PeekByte(); + if (prefix < 0) return RawResult.Nil; // EOF + switch (prefix) + { + // RESP2 + case '+': // simple string + reader.Consume(1); + return ReadLineTerminatedString(ResultType.SimpleString, flags, ref reader); + case '-': // error + reader.Consume(1); + return ReadLineTerminatedString(ResultType.Error, flags, ref reader); + case ':': // integer + reader.Consume(1); + return ReadLineTerminatedString(ResultType.Integer, flags, ref reader); + case '$': // bulk string + reader.Consume(1); + return ReadBulkString(ResultType.BulkString, flags, ref reader, includeDetilInExceptions, server); + case '*': // array + reader.Consume(1); + return ReadArray(ResultType.Array, flags, arena, in buffer, ref reader, includeDetilInExceptions, server); + // RESP3 + case '_': // null + reader.Consume(1); + return ReadLineTerminatedString(ResultType.Null, flags, ref reader); + case ',': // double + reader.Consume(1); + return ReadLineTerminatedString(ResultType.Double, flags, ref reader); + case '#': // boolean + reader.Consume(1); + return ReadLineTerminatedString(ResultType.Boolean, flags, ref reader); + case '!': // blob error + reader.Consume(1); + return ReadBulkString(ResultType.BlobError, flags, ref reader, includeDetilInExceptions, server); + case '=': // verbatim string + reader.Consume(1); + return ReadBulkString(ResultType.VerbatimString, flags, ref reader, includeDetilInExceptions, server); + case '(': // big number + reader.Consume(1); + return ReadLineTerminatedString(ResultType.BigInteger, flags, ref reader); + case '%': // map + reader.Consume(1); + return ReadArray(ResultType.Map, flags, arena, in buffer, ref reader, includeDetilInExceptions, server); + case '~': // set + reader.Consume(1); + return ReadArray(ResultType.Set, flags, arena, in buffer, ref reader, includeDetilInExceptions, server); + case '|': // attribute + reader.Consume(1); + var arr = ReadArray(ResultType.Attribute, flags, arena, in buffer, ref reader, includeDetilInExceptions, server); + if (!arr.HasValue) return RawResult.Nil; // failed to parse attribute data + + // for now, we want to just skip attribute data; so + // drop whatever we parsed on the floor and keep looking + break; // exits the SWITCH, not the DO/WHILE + case '>': // push + reader.Consume(1); + return ReadArray(ResultType.Push, flags, arena, in buffer, ref reader, includeDetilInExceptions, server); + } } + while (prefix == '|'); + + if (allowInlineProtocol) return ParseInlineProtocol(flags, arena, ReadLineTerminatedString(ResultType.SimpleString, flags, ref reader)); + throw new InvalidOperationException("Unexpected response prefix: " + (char)prefix); } - private static RawResult ParseInlineProtocol(Arena arena, in RawResult line) + private static RawResult ParseInlineProtocol(RawResult.ResultFlags flags, Arena arena, in RawResult line) { if (!line.HasValue) return RawResult.Nil; // incomplete line int count = 0; -#pragma warning disable IDE0059 foreach (var _ in line.GetInlineTokenizer()) count++; -#pragma warning restore IDE0059 var block = arena.Allocate(count); var iter = block.GetEnumerator(); foreach (var token in line.GetInlineTokenizer()) - { // this assigns *via a reference*, returned via the iterator; just... sweet - iter.GetNext() = new RawResult(line.Type, token, false); + { + // this assigns *via a reference*, returned via the iterator; just... sweet + iter.GetNext() = new RawResult(line.Resp3Type, token, flags); // spoof RESP2 from RESP1 + } + return new RawResult(ResultType.Array, block, flags); // spoof RESP2 from RESP1 + } + + internal bool HasPendingCallerFacingItems() + { + bool lockTaken = false; + try + { + Monitor.TryEnter(_writtenAwaitingResponse, 0, ref lockTaken); + if (lockTaken) + { + if (_writtenAwaitingResponse.Count != 0) + { + foreach (var item in _writtenAwaitingResponse) + { + if (!item.IsInternalCall) return true; + } + } + return false; + } + else + { + // don't contend the lock; *presume* that something + // qualifies; we can check again next heartbeat + return true; + } + } + finally + { + if (lockTaken) Monitor.Exit(_writtenAwaitingResponse); } - return new RawResult(block, false); } } } diff --git a/src/StackExchange.Redis/Profiling/IProfiledCommand.cs b/src/StackExchange.Redis/Profiling/IProfiledCommand.cs index c89b039cc..2f9c3cb54 100644 --- a/src/StackExchange.Redis/Profiling/IProfiledCommand.cs +++ b/src/StackExchange.Redis/Profiling/IProfiledCommand.cs @@ -80,12 +80,12 @@ public interface IProfiledCommand /// /// This can only be set if redis is configured as a cluster. /// - IProfiledCommand RetransmissionOf { get; } + IProfiledCommand? RetransmissionOf { get; } /// /// If RetransmissionOf is not null, this property will be set to either Ask or Moved to indicate /// what sort of response triggered the retransmission. - /// + /// /// This can be useful for determining the root cause of extra commands. /// RetransmissionReasonType? RetransmissionReason { get; } diff --git a/src/StackExchange.Redis/Profiling/ProfiledCommand.cs b/src/StackExchange.Redis/Profiling/ProfiledCommand.cs index 7d6a8fcfe..84ea23f08 100644 --- a/src/StackExchange.Redis/Profiling/ProfiledCommand.cs +++ b/src/StackExchange.Redis/Profiling/ProfiledCommand.cs @@ -1,5 +1,6 @@ using System; using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; using System.Net; using System.Runtime.CompilerServices; using System.Threading; @@ -10,16 +11,15 @@ internal sealed class ProfiledCommand : IProfiledCommand { private static readonly double TimestampToTicks = TimeSpan.TicksPerSecond / (double)Stopwatch.Frequency; - #region IProfiledCommand Impl public EndPoint EndPoint => Server.EndPoint; - public int Db => Message.Db; + public int Db => Message!.Db; - public string Command => Message is RedisDatabase.ExecuteMessage em ? em.Command.ToString() : Message.Command.ToString(); + public string Command => Message!.CommandString; - public CommandFlags Flags => Message.Flags; + public CommandFlags Flags => Message!.Flags; - public DateTime CommandCreated => MessageCreatedDateTime; + public DateTime CommandCreated { get; private set; } public TimeSpan CreationToEnqueued => GetElapsedTime(EnqueuedTimeStamp - MessageCreatedTimeStamp); @@ -37,28 +37,25 @@ private static TimeSpan GetElapsedTime(long timestampDelta) return new TimeSpan((long)(TimestampToTicks * timestampDelta)); } - public IProfiledCommand RetransmissionOf => OriginalProfiling; + public IProfiledCommand? RetransmissionOf => OriginalProfiling; public RetransmissionReasonType? RetransmissionReason { get; } - #endregion + public ProfiledCommand? NextElement { get; set; } - public ProfiledCommand NextElement { get; set; } - - private Message Message; + private Message? Message; private readonly ServerEndPoint Server; - private readonly ProfiledCommand OriginalProfiling; - - private DateTime MessageCreatedDateTime; + private readonly ProfiledCommand? OriginalProfiling; private long MessageCreatedTimeStamp; private long EnqueuedTimeStamp; private long RequestSentTimeStamp; private long ResponseReceivedTimeStamp; private long CompletedTimeStamp; + private ConnectionType? ConnectionType; private readonly ProfilingSession PushToWhenFinished; - private ProfiledCommand(ProfilingSession pushTo, ServerEndPoint server, ProfiledCommand resentFor, RetransmissionReasonType? reason) + private ProfiledCommand(ProfilingSession pushTo, ServerEndPoint server, ProfiledCommand? resentFor, RetransmissionReasonType? reason) { PushToWhenFinished = pushTo; OriginalProfiling = resentFor; @@ -76,17 +73,25 @@ public static ProfiledCommand NewAttachedToSameContext(ProfiledCommand resentFor return new ProfiledCommand(resentFor.PushToWhenFinished, server, resentFor, isMoved ? RetransmissionReasonType.Moved : RetransmissionReasonType.Ask); } + [MemberNotNull(nameof(Message))] public void SetMessage(Message msg) { // This method should never be called twice - if (Message != null) throw new InvalidOperationException($"{nameof(SetMessage)} called more than once"); + if (Message is not null) + { + throw new InvalidOperationException($"{nameof(SetMessage)} called more than once"); + } Message = msg; - MessageCreatedDateTime = msg.CreatedDateTime; + CommandCreated = msg.CreatedDateTime; MessageCreatedTimeStamp = msg.CreatedTimestamp; } - public void SetEnqueued() => SetTimestamp(ref EnqueuedTimeStamp); + public void SetEnqueued(ConnectionType? connType) + { + SetTimestamp(ref EnqueuedTimeStamp); + ConnectionType = connType; + } public void SetRequestSent() => SetTimestamp(ref RequestSentTimeStamp); @@ -103,7 +108,6 @@ public void SetCompleted() { // this method can be called multiple times, depending on how the task completed (async vs not) // so we actually have to guard against it. - var now = Stopwatch.GetTimestamp(); var oldVal = Interlocked.CompareExchange(ref CompletedTimeStamp, now, 0); @@ -116,20 +120,11 @@ public void SetCompleted() } } - public override string ToString() - { - return - $@"EndPoint = {EndPoint} -Db = {Db} -Command = {Command} -CommandCreated = {CommandCreated:u} -CreationToEnqueued = {CreationToEnqueued} -EnqueuedToSending = {EnqueuedToSending} -SentToResponse = {SentToResponse} -ResponseToCompletion = {ResponseToCompletion} -ElapsedTime = {ElapsedTime} -Flags = {Flags} -RetransmissionOf = ({RetransmissionOf})"; - } + public override string ToString() => +$@"{Command} (DB: {Db}, Flags: {Flags}) + EndPoint = {EndPoint} ({ConnectionType}) + Created = {CommandCreated:HH:mm:ss.ffff} + ElapsedTime = {ElapsedTime.TotalMilliseconds} ms (CreationToEnqueued: {CreationToEnqueued.TotalMilliseconds} ms, EnqueuedToSending: {EnqueuedToSending.TotalMilliseconds} ms, SentToResponse: {SentToResponse.TotalMilliseconds} ms, ResponseToCompletion = {ResponseToCompletion.TotalMilliseconds} ms){(RetransmissionOf != null ? @" + RetransmissionOf = " + RetransmissionOf : "")}"; } } diff --git a/src/StackExchange.Redis/Profiling/ProfiledCommandEnumerable.cs b/src/StackExchange.Redis/Profiling/ProfiledCommandEnumerable.cs index 9ae3cd0d4..4505410a3 100644 --- a/src/StackExchange.Redis/Profiling/ProfiledCommandEnumerable.cs +++ b/src/StackExchange.Redis/Profiling/ProfiledCommandEnumerable.cs @@ -26,12 +26,12 @@ namespace StackExchange.Redis.Profiling /// public struct Enumerator : IEnumerator { - private ProfiledCommand Head, CurrentBacker; + private ProfiledCommand? Head, CurrentBacker; private bool IsEmpty => Head == null; private bool IsUnstartedOrFinished => CurrentBacker == null; - internal Enumerator(ProfiledCommand head) + internal Enumerator(ProfiledCommand? head) { Head = head; CurrentBacker = null; @@ -40,9 +40,9 @@ internal Enumerator(ProfiledCommand head) /// /// The current element. /// - public IProfiledCommand Current => CurrentBacker; + public IProfiledCommand Current => CurrentBacker!; - object System.Collections.IEnumerator.Current => CurrentBacker; + object System.Collections.IEnumerator.Current => CurrentBacker!; /// /// Advances the enumeration, returning true if there is a new element to consume and false @@ -58,7 +58,7 @@ public bool MoveNext() } else { - CurrentBacker = CurrentBacker.NextElement; + CurrentBacker = CurrentBacker!.NextElement; } return CurrentBacker != null; @@ -76,15 +76,16 @@ public bool MoveNext() public void Dispose() => CurrentBacker = Head = null; } - private readonly ProfiledCommand _head; + private readonly ProfiledCommand? _head; private readonly int _count; + /// - /// Returns the number of commands captured in this snapshot + /// Returns the number of commands captured in this snapshot. /// public int Count() => _count; /// - /// Returns the number of commands captured in this snapshot that match a condition + /// Returns the number of commands captured in this snapshot that match a condition. /// /// The predicate to match. public int Count(Func predicate) @@ -96,36 +97,38 @@ public int Count(Func predicate) var cur = _head; for (int i = 0; i < _count; i++) { - if (predicate(cur)) result++; - cur = cur.NextElement; + if (cur != null && predicate(cur)) result++; + cur = cur!.NextElement; } return result; } /// - /// Returns the captured commands as an array + /// Returns the captured commands as an array. /// public IProfiledCommand[] ToArray() - { // exploit the fact that we know the length + { + // exploit the fact that we know the length if (_count == 0) return Array.Empty(); var arr = new IProfiledCommand[_count]; - var cur = _head; - for(int i = 0; i < _count; i++) + ProfiledCommand? cur = _head; + for (int i = 0; i < _count; i++) { - arr[i] = cur; - cur = cur.NextElement; + arr[i] = cur!; + cur = cur!.NextElement; } return arr; } /// - /// Returns the captured commands as a list + /// Returns the captured commands as a list. /// public List ToList() - { // exploit the fact that we know the length + { + // exploit the fact that we know the length var list = new List(_count); - var cur = _head; + ProfiledCommand? cur = _head; while (cur != null) { list.Add(cur); @@ -133,7 +136,7 @@ public List ToList() } return list; } - internal ProfiledCommandEnumerable(int count, ProfiledCommand head) + internal ProfiledCommandEnumerable(int count, ProfiledCommand? head) { _count = count; _head = head; diff --git a/src/StackExchange.Redis/Profiling/ProfilingSession.cs b/src/StackExchange.Redis/Profiling/ProfilingSession.cs index fd6b67def..3bc3caf38 100644 --- a/src/StackExchange.Redis/Profiling/ProfilingSession.cs +++ b/src/StackExchange.Redis/Profiling/ProfilingSession.cs @@ -10,23 +10,24 @@ public sealed class ProfilingSession /// /// Caller-defined state object. /// - public object UserToken { get; } + public object? UserToken { get; } + /// /// Create a new profiling session, optionally including a caller-defined state object. /// /// The state object to use for this session. - public ProfilingSession(object userToken = null) => UserToken = userToken; + public ProfilingSession(object? userToken = null) => UserToken = userToken; - private object _untypedHead; + private object? _untypedHead; internal void Add(ProfiledCommand command) { if (command == null) return; - object cur = Thread.VolatileRead(ref _untypedHead); + object? cur = Volatile.Read(ref _untypedHead); while (true) { - command.NextElement = (ProfiledCommand)cur; + command.NextElement = (ProfiledCommand?)cur; var got = Interlocked.CompareExchange(ref _untypedHead, command, cur); if (ReferenceEquals(got, cur)) break; // successful update cur = got; // retry; no need to re-fetch the field, we just did that @@ -39,12 +40,12 @@ internal void Add(ProfiledCommand command) /// public ProfiledCommandEnumerable FinishProfiling() { - var head = (ProfiledCommand)Interlocked.Exchange(ref _untypedHead, null); + var head = (ProfiledCommand?)Interlocked.Exchange(ref _untypedHead, null); // reverse the list so everything is ordered the way the consumer expected them int count = 0; - ProfiledCommand previous = null, current = head, next; - while(current != null) + ProfiledCommand? previous = null, current = head, next; + while (current != null) { next = current.NextElement; current.NextElement = previous; diff --git a/src/StackExchange.Redis/PublicAPI/PublicAPI.Shipped.txt b/src/StackExchange.Redis/PublicAPI/PublicAPI.Shipped.txt new file mode 100644 index 000000000..d474fc98d --- /dev/null +++ b/src/StackExchange.Redis/PublicAPI/PublicAPI.Shipped.txt @@ -0,0 +1,2293 @@ +#nullable enable +abstract StackExchange.Redis.RedisResult.IsNull.get -> bool +override StackExchange.Redis.ChannelMessage.Equals(object? obj) -> bool +override StackExchange.Redis.ChannelMessage.GetHashCode() -> int +override StackExchange.Redis.ChannelMessage.ToString() -> string! +override StackExchange.Redis.ChannelMessageQueue.ToString() -> string? +override StackExchange.Redis.ClientInfo.ToString() -> string! +override StackExchange.Redis.ClusterNode.Equals(object? obj) -> bool +override StackExchange.Redis.ClusterNode.GetHashCode() -> int +override StackExchange.Redis.ClusterNode.ToString() -> string! +override StackExchange.Redis.CommandMap.ToString() -> string! +override StackExchange.Redis.Configuration.AzureOptionsProvider.AbortOnConnectFail.get -> bool +override StackExchange.Redis.Configuration.AzureOptionsProvider.AfterConnectAsync(StackExchange.Redis.ConnectionMultiplexer! muxer, System.Action! log) -> System.Threading.Tasks.Task! +override StackExchange.Redis.Configuration.AzureOptionsProvider.DefaultVersion.get -> System.Version! +override StackExchange.Redis.Configuration.AzureOptionsProvider.GetDefaultSsl(StackExchange.Redis.EndPointCollection! endPoints) -> bool +override StackExchange.Redis.Configuration.AzureOptionsProvider.IsMatch(System.Net.EndPoint! endpoint) -> bool +override StackExchange.Redis.Configuration.AzureManagedRedisOptionsProvider.AbortOnConnectFail.get -> bool +override StackExchange.Redis.Configuration.AzureManagedRedisOptionsProvider.AfterConnectAsync(StackExchange.Redis.ConnectionMultiplexer! muxer, System.Action! log) -> System.Threading.Tasks.Task! +override StackExchange.Redis.Configuration.AzureManagedRedisOptionsProvider.DefaultVersion.get -> System.Version! +override StackExchange.Redis.Configuration.AzureManagedRedisOptionsProvider.GetDefaultSsl(StackExchange.Redis.EndPointCollection! endPoints) -> bool +override StackExchange.Redis.Configuration.AzureManagedRedisOptionsProvider.IsMatch(System.Net.EndPoint! endpoint) -> bool +override StackExchange.Redis.ConfigurationOptions.ToString() -> string! +override StackExchange.Redis.ConnectionCounters.ToString() -> string! +override StackExchange.Redis.ConnectionFailedEventArgs.ToString() -> string! +override StackExchange.Redis.ConnectionMultiplexer.ToString() -> string! +override StackExchange.Redis.GeoEntry.Equals(object? obj) -> bool +override StackExchange.Redis.GeoEntry.GetHashCode() -> int +override StackExchange.Redis.GeoEntry.ToString() -> string! +override StackExchange.Redis.GeoPosition.Equals(object? obj) -> bool +override StackExchange.Redis.GeoPosition.GetHashCode() -> int +override StackExchange.Redis.GeoPosition.ToString() -> string! +override StackExchange.Redis.GeoRadiusResult.ToString() -> string! +override StackExchange.Redis.HashEntry.Equals(object? obj) -> bool +override StackExchange.Redis.HashEntry.GetHashCode() -> int +override StackExchange.Redis.HashEntry.ToString() -> string! +override StackExchange.Redis.Maintenance.ServerMaintenanceEvent.ToString() -> string? +override StackExchange.Redis.NameValueEntry.Equals(object? obj) -> bool +override StackExchange.Redis.NameValueEntry.GetHashCode() -> int +override StackExchange.Redis.NameValueEntry.ToString() -> string! +override StackExchange.Redis.RedisChannel.Equals(object? obj) -> bool +override StackExchange.Redis.RedisChannel.GetHashCode() -> int +override StackExchange.Redis.RedisChannel.ToString() -> string! +override StackExchange.Redis.RedisConnectionException.GetObjectData(System.Runtime.Serialization.SerializationInfo! info, System.Runtime.Serialization.StreamingContext context) -> void +override StackExchange.Redis.RedisFeatures.Equals(object? obj) -> bool +override StackExchange.Redis.RedisFeatures.GetHashCode() -> int +override StackExchange.Redis.RedisFeatures.ToString() -> string! +override StackExchange.Redis.RedisKey.Equals(object? obj) -> bool +override StackExchange.Redis.RedisKey.GetHashCode() -> int +override StackExchange.Redis.RedisKey.ToString() -> string! +override StackExchange.Redis.RedisTimeoutException.GetObjectData(System.Runtime.Serialization.SerializationInfo! info, System.Runtime.Serialization.StreamingContext context) -> void +override StackExchange.Redis.RedisValue.Equals(object? obj) -> bool +override StackExchange.Redis.RedisValue.GetHashCode() -> int +override StackExchange.Redis.RedisValue.ToString() -> string! +override StackExchange.Redis.Role.ToString() -> string! +override StackExchange.Redis.ServerCounters.ToString() -> string! +override StackExchange.Redis.SlotRange.Equals(object? obj) -> bool +override StackExchange.Redis.SlotRange.GetHashCode() -> int +override StackExchange.Redis.SlotRange.ToString() -> string! +override StackExchange.Redis.SocketManager.ToString() -> string! +override StackExchange.Redis.SortedSetEntry.Equals(object? obj) -> bool +override StackExchange.Redis.SortedSetEntry.GetHashCode() -> int +override StackExchange.Redis.SortedSetEntry.ToString() -> string! +StackExchange.Redis.Aggregate +StackExchange.Redis.Aggregate.Max = 2 -> StackExchange.Redis.Aggregate +StackExchange.Redis.Aggregate.Min = 1 -> StackExchange.Redis.Aggregate +StackExchange.Redis.Aggregate.Sum = 0 -> StackExchange.Redis.Aggregate +StackExchange.Redis.BacklogPolicy +StackExchange.Redis.BacklogPolicy.AbortPendingOnConnectionFailure.get -> bool +StackExchange.Redis.BacklogPolicy.AbortPendingOnConnectionFailure.init -> void +StackExchange.Redis.BacklogPolicy.BacklogPolicy() -> void +StackExchange.Redis.BacklogPolicy.QueueWhileDisconnected.get -> bool +StackExchange.Redis.BacklogPolicy.QueueWhileDisconnected.init -> void +StackExchange.Redis.Bitwise +StackExchange.Redis.Bitwise.And = 0 -> StackExchange.Redis.Bitwise +StackExchange.Redis.Bitwise.Not = 3 -> StackExchange.Redis.Bitwise +StackExchange.Redis.Bitwise.Or = 1 -> StackExchange.Redis.Bitwise +StackExchange.Redis.Bitwise.Xor = 2 -> StackExchange.Redis.Bitwise +StackExchange.Redis.ChannelMessage +StackExchange.Redis.ChannelMessage.Channel.get -> StackExchange.Redis.RedisChannel +StackExchange.Redis.ChannelMessage.ChannelMessage() -> void +StackExchange.Redis.ChannelMessage.Message.get -> StackExchange.Redis.RedisValue +StackExchange.Redis.ChannelMessage.SubscriptionChannel.get -> StackExchange.Redis.RedisChannel +StackExchange.Redis.ChannelMessageQueue +StackExchange.Redis.ChannelMessageQueue.Channel.get -> StackExchange.Redis.RedisChannel +StackExchange.Redis.ChannelMessageQueue.Completion.get -> System.Threading.Tasks.Task! +StackExchange.Redis.ChannelMessageQueue.GetAsyncEnumerator(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Collections.Generic.IAsyncEnumerator! +StackExchange.Redis.ChannelMessageQueue.OnMessage(System.Action! handler) -> void +StackExchange.Redis.ChannelMessageQueue.OnMessage(System.Func! handler) -> void +StackExchange.Redis.ChannelMessageQueue.ReadAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.ValueTask +StackExchange.Redis.ChannelMessageQueue.TryGetCount(out int count) -> bool +StackExchange.Redis.ChannelMessageQueue.TryRead(out StackExchange.Redis.ChannelMessage item) -> bool +StackExchange.Redis.ChannelMessageQueue.Unsubscribe(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> void +StackExchange.Redis.ChannelMessageQueue.UnsubscribeAsync(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.ClientFlags +StackExchange.Redis.ClientFlags.Blocked = 16 -> StackExchange.Redis.ClientFlags +StackExchange.Redis.ClientFlags.BroadcastTracking = 16384 -> StackExchange.Redis.ClientFlags +StackExchange.Redis.ClientFlags.CloseASAP = 256 -> StackExchange.Redis.ClientFlags +StackExchange.Redis.ClientFlags.Closing = 64 -> StackExchange.Redis.ClientFlags +StackExchange.Redis.ClientFlags.KeysTracking = 4096 -> StackExchange.Redis.ClientFlags +StackExchange.Redis.ClientFlags.Master = 4 -> StackExchange.Redis.ClientFlags +StackExchange.Redis.ClientFlags.None = 0 -> StackExchange.Redis.ClientFlags +StackExchange.Redis.ClientFlags.PubSubSubscriber = 512 -> StackExchange.Redis.ClientFlags +StackExchange.Redis.ClientFlags.ReadOnlyCluster = 1024 -> StackExchange.Redis.ClientFlags +StackExchange.Redis.ClientFlags.Replica = 2 -> StackExchange.Redis.ClientFlags +StackExchange.Redis.ClientFlags.ReplicaMonitor = 1 -> StackExchange.Redis.ClientFlags +StackExchange.Redis.ClientFlags.Slave = 2 -> StackExchange.Redis.ClientFlags +StackExchange.Redis.ClientFlags.SlaveMonitor = 1 -> StackExchange.Redis.ClientFlags +StackExchange.Redis.ClientFlags.TrackingTargetInvalid = 8192 -> StackExchange.Redis.ClientFlags +StackExchange.Redis.ClientFlags.Transaction = 8 -> StackExchange.Redis.ClientFlags +StackExchange.Redis.ClientFlags.TransactionDoomed = 32 -> StackExchange.Redis.ClientFlags +StackExchange.Redis.ClientFlags.Unblocked = 128 -> StackExchange.Redis.ClientFlags +StackExchange.Redis.ClientFlags.UnixDomainSocket = 2048 -> StackExchange.Redis.ClientFlags +StackExchange.Redis.ClientInfo +StackExchange.Redis.ClientInfo.Address.get -> System.Net.EndPoint? +StackExchange.Redis.ClientInfo.AgeSeconds.get -> int +StackExchange.Redis.ClientInfo.ClientInfo() -> void +StackExchange.Redis.ClientInfo.ClientType.get -> StackExchange.Redis.ClientType +StackExchange.Redis.ClientInfo.Database.get -> int +StackExchange.Redis.ClientInfo.Flags.get -> StackExchange.Redis.ClientFlags +StackExchange.Redis.ClientInfo.FlagsRaw.get -> string? +StackExchange.Redis.ClientInfo.Host.get -> string? +StackExchange.Redis.ClientInfo.Id.get -> long +StackExchange.Redis.ClientInfo.IdleSeconds.get -> int +StackExchange.Redis.ClientInfo.LastCommand.get -> string? +StackExchange.Redis.ClientInfo.LibraryName.get -> string? +StackExchange.Redis.ClientInfo.LibraryVersion.get -> string? +StackExchange.Redis.ClientInfo.Name.get -> string? +StackExchange.Redis.ClientInfo.PatternSubscriptionCount.get -> int +StackExchange.Redis.ClientInfo.Port.get -> int +StackExchange.Redis.ClientInfo.ProtocolVersion.get -> string? +StackExchange.Redis.ClientInfo.Raw.get -> string? +StackExchange.Redis.ClientInfo.SubscriptionCount.get -> int +StackExchange.Redis.ClientInfo.TransactionCommandLength.get -> int +StackExchange.Redis.ClientKillFilter +StackExchange.Redis.ClientKillFilter.ClientKillFilter() -> void +StackExchange.Redis.ClientKillFilter.ClientType.get -> StackExchange.Redis.ClientType? +StackExchange.Redis.ClientKillFilter.Endpoint.get -> System.Net.EndPoint? +StackExchange.Redis.ClientKillFilter.Id.get -> long? +StackExchange.Redis.ClientKillFilter.MaxAgeInSeconds.get -> long? +StackExchange.Redis.ClientKillFilter.ServerEndpoint.get -> System.Net.EndPoint? +StackExchange.Redis.ClientKillFilter.SkipMe.get -> bool? +StackExchange.Redis.ClientKillFilter.Username.get -> string? +StackExchange.Redis.ClientKillFilter.WithClientType(StackExchange.Redis.ClientType? clientType) -> StackExchange.Redis.ClientKillFilter! +StackExchange.Redis.ClientKillFilter.WithEndpoint(System.Net.EndPoint? endpoint) -> StackExchange.Redis.ClientKillFilter! +StackExchange.Redis.ClientKillFilter.WithId(long? id) -> StackExchange.Redis.ClientKillFilter! +StackExchange.Redis.ClientKillFilter.WithMaxAgeInSeconds(long? maxAgeInSeconds) -> StackExchange.Redis.ClientKillFilter! +StackExchange.Redis.ClientKillFilter.WithServerEndpoint(System.Net.EndPoint? serverEndpoint) -> StackExchange.Redis.ClientKillFilter! +StackExchange.Redis.ClientKillFilter.WithSkipMe(bool? skipMe) -> StackExchange.Redis.ClientKillFilter! +StackExchange.Redis.ClientKillFilter.WithUsername(string? username) -> StackExchange.Redis.ClientKillFilter! +StackExchange.Redis.ClientType +StackExchange.Redis.ClientType.Normal = 0 -> StackExchange.Redis.ClientType +StackExchange.Redis.ClientType.PubSub = 2 -> StackExchange.Redis.ClientType +StackExchange.Redis.ClientType.Replica = 1 -> StackExchange.Redis.ClientType +StackExchange.Redis.ClientType.Slave = 1 -> StackExchange.Redis.ClientType +StackExchange.Redis.ClusterConfiguration +StackExchange.Redis.ClusterConfiguration.GetBySlot(int slot) -> StackExchange.Redis.ClusterNode? +StackExchange.Redis.ClusterConfiguration.GetBySlot(StackExchange.Redis.RedisKey key) -> StackExchange.Redis.ClusterNode? +StackExchange.Redis.ClusterConfiguration.Nodes.get -> System.Collections.Generic.ICollection! +StackExchange.Redis.ClusterConfiguration.Origin.get -> System.Net.EndPoint! +StackExchange.Redis.ClusterConfiguration.this[System.Net.EndPoint! endpoint].get -> StackExchange.Redis.ClusterNode? +StackExchange.Redis.ClusterNode +StackExchange.Redis.ClusterNode.Children.get -> System.Collections.Generic.IList! +StackExchange.Redis.ClusterNode.CompareTo(StackExchange.Redis.ClusterNode? other) -> int +StackExchange.Redis.ClusterNode.EndPoint.get -> System.Net.EndPoint? +StackExchange.Redis.ClusterNode.Equals(StackExchange.Redis.ClusterNode? other) -> bool +StackExchange.Redis.ClusterNode.IsConnected.get -> bool +StackExchange.Redis.ClusterNode.IsFail.get -> bool +StackExchange.Redis.ClusterNode.IsMyself.get -> bool +StackExchange.Redis.ClusterNode.IsNoAddr.get -> bool +StackExchange.Redis.ClusterNode.IsPossiblyFail.get -> bool +StackExchange.Redis.ClusterNode.IsReplica.get -> bool +StackExchange.Redis.ClusterNode.IsSlave.get -> bool +StackExchange.Redis.ClusterNode.NodeId.get -> string! +StackExchange.Redis.ClusterNode.Parent.get -> StackExchange.Redis.ClusterNode? +StackExchange.Redis.ClusterNode.ParentNodeId.get -> string? +StackExchange.Redis.ClusterNode.Raw.get -> string! +StackExchange.Redis.ClusterNode.Slots.get -> System.Collections.Generic.IList! +StackExchange.Redis.CommandFlags +StackExchange.Redis.CommandFlags.DemandMaster = 4 -> StackExchange.Redis.CommandFlags +StackExchange.Redis.CommandFlags.DemandReplica = StackExchange.Redis.CommandFlags.DemandMaster | StackExchange.Redis.CommandFlags.PreferReplica -> StackExchange.Redis.CommandFlags +StackExchange.Redis.CommandFlags.DemandSlave = StackExchange.Redis.CommandFlags.DemandMaster | StackExchange.Redis.CommandFlags.PreferReplica -> StackExchange.Redis.CommandFlags +StackExchange.Redis.CommandFlags.FireAndForget = 2 -> StackExchange.Redis.CommandFlags +StackExchange.Redis.CommandFlags.HighPriority = 1 -> StackExchange.Redis.CommandFlags +StackExchange.Redis.CommandFlags.None = 0 -> StackExchange.Redis.CommandFlags +StackExchange.Redis.CommandFlags.NoRedirect = 64 -> StackExchange.Redis.CommandFlags +StackExchange.Redis.CommandFlags.NoScriptCache = 512 -> StackExchange.Redis.CommandFlags +StackExchange.Redis.CommandFlags.PreferMaster = 0 -> StackExchange.Redis.CommandFlags +StackExchange.Redis.CommandFlags.PreferReplica = 8 -> StackExchange.Redis.CommandFlags +StackExchange.Redis.CommandFlags.PreferSlave = 8 -> StackExchange.Redis.CommandFlags +StackExchange.Redis.CommandMap +StackExchange.Redis.CommandStatus +StackExchange.Redis.CommandStatus.Sent = 2 -> StackExchange.Redis.CommandStatus +StackExchange.Redis.CommandStatus.Unknown = 0 -> StackExchange.Redis.CommandStatus +StackExchange.Redis.CommandStatus.WaitingToBeSent = 1 -> StackExchange.Redis.CommandStatus +StackExchange.Redis.CommandStatus.WaitingInBacklog = 3 -> StackExchange.Redis.CommandStatus +StackExchange.Redis.CommandTrace +StackExchange.Redis.CommandTrace.Arguments.get -> StackExchange.Redis.RedisValue[]! +StackExchange.Redis.CommandTrace.Duration.get -> System.TimeSpan +StackExchange.Redis.CommandTrace.GetHelpUrl() -> string? +StackExchange.Redis.CommandTrace.Time.get -> System.DateTime +StackExchange.Redis.CommandTrace.UniqueId.get -> long +StackExchange.Redis.Condition +StackExchange.Redis.ConditionResult +StackExchange.Redis.ConditionResult.WasSatisfied.get -> bool +StackExchange.Redis.Configuration.AzureOptionsProvider +StackExchange.Redis.Configuration.AzureOptionsProvider.AzureOptionsProvider() -> void +StackExchange.Redis.Configuration.AzureManagedRedisOptionsProvider +StackExchange.Redis.Configuration.AzureManagedRedisOptionsProvider.AzureManagedRedisOptionsProvider() -> void +StackExchange.Redis.Configuration.DefaultOptionsProvider +StackExchange.Redis.Configuration.DefaultOptionsProvider.ClientName.get -> string! +StackExchange.Redis.Configuration.DefaultOptionsProvider.DefaultOptionsProvider() -> void +StackExchange.Redis.Configuration.Tunnel +StackExchange.Redis.Configuration.Tunnel.Tunnel() -> void +static StackExchange.Redis.Configuration.Tunnel.HttpProxy(System.Net.EndPoint! proxy) -> StackExchange.Redis.Configuration.Tunnel! +virtual StackExchange.Redis.Configuration.Tunnel.BeforeAuthenticateAsync(System.Net.EndPoint! endpoint, StackExchange.Redis.ConnectionType connectionType, System.Net.Sockets.Socket? socket, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.ValueTask +virtual StackExchange.Redis.Configuration.Tunnel.BeforeSocketConnectAsync(System.Net.EndPoint! endPoint, StackExchange.Redis.ConnectionType connectionType, System.Net.Sockets.Socket? socket, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.ValueTask +virtual StackExchange.Redis.Configuration.Tunnel.GetSocketConnectEndpointAsync(System.Net.EndPoint! endpoint, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.ValueTask +StackExchange.Redis.ConfigurationOptions +StackExchange.Redis.ConfigurationOptions.AbortOnConnectFail.get -> bool +StackExchange.Redis.ConfigurationOptions.AbortOnConnectFail.set -> void +StackExchange.Redis.ConfigurationOptions.AllowAdmin.get -> bool +StackExchange.Redis.ConfigurationOptions.AllowAdmin.set -> void +StackExchange.Redis.ConfigurationOptions.Apply(System.Action! configure) -> StackExchange.Redis.ConfigurationOptions! +StackExchange.Redis.ConfigurationOptions.AsyncTimeout.get -> int +StackExchange.Redis.ConfigurationOptions.AsyncTimeout.set -> void +StackExchange.Redis.ConfigurationOptions.BacklogPolicy.get -> StackExchange.Redis.BacklogPolicy! +StackExchange.Redis.ConfigurationOptions.BacklogPolicy.set -> void +StackExchange.Redis.ConfigurationOptions.BeforeSocketConnect.get -> System.Action? +StackExchange.Redis.ConfigurationOptions.BeforeSocketConnect.set -> void +StackExchange.Redis.ConfigurationOptions.CertificateSelection -> System.Net.Security.LocalCertificateSelectionCallback? +StackExchange.Redis.ConfigurationOptions.CertificateValidation -> System.Net.Security.RemoteCertificateValidationCallback? +StackExchange.Redis.ConfigurationOptions.ChannelPrefix.get -> StackExchange.Redis.RedisChannel +StackExchange.Redis.ConfigurationOptions.ChannelPrefix.set -> void +StackExchange.Redis.ConfigurationOptions.CheckCertificateRevocation.get -> bool +StackExchange.Redis.ConfigurationOptions.CheckCertificateRevocation.set -> void +StackExchange.Redis.ConfigurationOptions.ClientName.get -> string? +StackExchange.Redis.ConfigurationOptions.ClientName.set -> void +StackExchange.Redis.ConfigurationOptions.Clone() -> StackExchange.Redis.ConfigurationOptions! +StackExchange.Redis.ConfigurationOptions.CommandMap.get -> StackExchange.Redis.CommandMap! +StackExchange.Redis.ConfigurationOptions.CommandMap.set -> void +StackExchange.Redis.ConfigurationOptions.ConfigCheckSeconds.get -> int +StackExchange.Redis.ConfigurationOptions.ConfigCheckSeconds.set -> void +StackExchange.Redis.ConfigurationOptions.ConfigurationChannel.get -> string! +StackExchange.Redis.ConfigurationOptions.ConfigurationChannel.set -> void +StackExchange.Redis.ConfigurationOptions.ConfigurationOptions() -> void +StackExchange.Redis.ConfigurationOptions.ConnectRetry.get -> int +StackExchange.Redis.ConfigurationOptions.ConnectRetry.set -> void +StackExchange.Redis.ConfigurationOptions.ConnectTimeout.get -> int +StackExchange.Redis.ConfigurationOptions.ConnectTimeout.set -> void +StackExchange.Redis.ConfigurationOptions.DefaultDatabase.get -> int? +StackExchange.Redis.ConfigurationOptions.DefaultDatabase.set -> void +StackExchange.Redis.ConfigurationOptions.Defaults.get -> StackExchange.Redis.Configuration.DefaultOptionsProvider! +StackExchange.Redis.ConfigurationOptions.Defaults.set -> void +StackExchange.Redis.ConfigurationOptions.DefaultVersion.get -> System.Version! +StackExchange.Redis.ConfigurationOptions.DefaultVersion.set -> void +StackExchange.Redis.ConfigurationOptions.EndPoints.get -> StackExchange.Redis.EndPointCollection! +StackExchange.Redis.ConfigurationOptions.EndPoints.init -> void +StackExchange.Redis.ConfigurationOptions.HeartbeatConsistencyChecks.get -> bool +StackExchange.Redis.ConfigurationOptions.HeartbeatConsistencyChecks.set -> void +StackExchange.Redis.ConfigurationOptions.HeartbeatInterval.get -> System.TimeSpan +StackExchange.Redis.ConfigurationOptions.HeartbeatInterval.set -> void +StackExchange.Redis.ConfigurationOptions.HighIntegrity.get -> bool +StackExchange.Redis.ConfigurationOptions.HighIntegrity.set -> void +StackExchange.Redis.ConfigurationOptions.HighPrioritySocketThreads.get -> bool +StackExchange.Redis.ConfigurationOptions.HighPrioritySocketThreads.set -> void +StackExchange.Redis.ConfigurationOptions.IncludeDetailInExceptions.get -> bool +StackExchange.Redis.ConfigurationOptions.IncludeDetailInExceptions.set -> void +StackExchange.Redis.ConfigurationOptions.IncludePerformanceCountersInExceptions.get -> bool +StackExchange.Redis.ConfigurationOptions.IncludePerformanceCountersInExceptions.set -> void +StackExchange.Redis.ConfigurationOptions.KeepAlive.get -> int +StackExchange.Redis.ConfigurationOptions.KeepAlive.set -> void +StackExchange.Redis.ConfigurationOptions.LibraryName.get -> string? +StackExchange.Redis.ConfigurationOptions.LibraryName.set -> void +StackExchange.Redis.ConfigurationOptions.LoggerFactory.get -> Microsoft.Extensions.Logging.ILoggerFactory? +StackExchange.Redis.ConfigurationOptions.LoggerFactory.set -> void +StackExchange.Redis.ConfigurationOptions.Password.get -> string? +StackExchange.Redis.ConfigurationOptions.Password.set -> void +StackExchange.Redis.ConfigurationOptions.PreserveAsyncOrder.get -> bool +StackExchange.Redis.ConfigurationOptions.PreserveAsyncOrder.set -> void +StackExchange.Redis.ConfigurationOptions.Proxy.get -> StackExchange.Redis.Proxy +StackExchange.Redis.ConfigurationOptions.Proxy.set -> void +StackExchange.Redis.ConfigurationOptions.ReconnectRetryPolicy.get -> StackExchange.Redis.IReconnectRetryPolicy! +StackExchange.Redis.ConfigurationOptions.ReconnectRetryPolicy.set -> void +StackExchange.Redis.ConfigurationOptions.ResolveDns.get -> bool +StackExchange.Redis.ConfigurationOptions.ResolveDns.set -> void +StackExchange.Redis.ConfigurationOptions.ResponseTimeout.get -> int +StackExchange.Redis.ConfigurationOptions.ResponseTimeout.set -> void +StackExchange.Redis.ConfigurationOptions.ServiceName.get -> string? +StackExchange.Redis.ConfigurationOptions.ServiceName.set -> void +StackExchange.Redis.ConfigurationOptions.SetClientLibrary.get -> bool +StackExchange.Redis.ConfigurationOptions.SetClientLibrary.set -> void +StackExchange.Redis.ConfigurationOptions.SetDefaultPorts() -> void +StackExchange.Redis.ConfigurationOptions.SocketManager.get -> StackExchange.Redis.SocketManager? +StackExchange.Redis.ConfigurationOptions.SocketManager.set -> void +StackExchange.Redis.ConfigurationOptions.Ssl.get -> bool +StackExchange.Redis.ConfigurationOptions.Ssl.set -> void +StackExchange.Redis.ConfigurationOptions.SslHost.get -> string? +StackExchange.Redis.ConfigurationOptions.SslHost.set -> void +StackExchange.Redis.ConfigurationOptions.SslProtocols.get -> System.Security.Authentication.SslProtocols? +StackExchange.Redis.ConfigurationOptions.SslProtocols.set -> void +StackExchange.Redis.ConfigurationOptions.SyncTimeout.get -> int +StackExchange.Redis.ConfigurationOptions.SyncTimeout.set -> void +StackExchange.Redis.ConfigurationOptions.TieBreaker.get -> string! +StackExchange.Redis.ConfigurationOptions.TieBreaker.set -> void +StackExchange.Redis.ConfigurationOptions.ToString(bool includePassword) -> string! +StackExchange.Redis.ConfigurationOptions.TrustIssuer(string! issuerCertificatePath) -> void +StackExchange.Redis.ConfigurationOptions.TrustIssuer(System.Security.Cryptography.X509Certificates.X509Certificate2! issuer) -> void +StackExchange.Redis.ConfigurationOptions.Tunnel.get -> StackExchange.Redis.Configuration.Tunnel? +StackExchange.Redis.ConfigurationOptions.Tunnel.set -> void +StackExchange.Redis.ConfigurationOptions.User.get -> string? +StackExchange.Redis.ConfigurationOptions.User.set -> void +StackExchange.Redis.ConfigurationOptions.UseSsl.get -> bool +StackExchange.Redis.ConfigurationOptions.UseSsl.set -> void +StackExchange.Redis.ConfigurationOptions.WriteBuffer.get -> int +StackExchange.Redis.ConfigurationOptions.WriteBuffer.set -> void +StackExchange.Redis.ConnectionCounters +StackExchange.Redis.ConnectionCounters.CompletedAsynchronously.get -> long +StackExchange.Redis.ConnectionCounters.CompletedSynchronously.get -> long +StackExchange.Redis.ConnectionCounters.ConnectionType.get -> StackExchange.Redis.ConnectionType +StackExchange.Redis.ConnectionCounters.FailedAsynchronously.get -> long +StackExchange.Redis.ConnectionCounters.IsEmpty.get -> bool +StackExchange.Redis.ConnectionCounters.NonPreferredEndpointCount.get -> long +StackExchange.Redis.ConnectionCounters.OperationCount.get -> long +StackExchange.Redis.ConnectionCounters.PendingUnsentItems.get -> int +StackExchange.Redis.ConnectionCounters.ResponsesAwaitingAsyncCompletion.get -> int +StackExchange.Redis.ConnectionCounters.SentItemsAwaitingResponse.get -> int +StackExchange.Redis.ConnectionCounters.SocketCount.get -> long +StackExchange.Redis.ConnectionCounters.Subscriptions.get -> long +StackExchange.Redis.ConnectionCounters.TotalOutstanding.get -> int +StackExchange.Redis.ConnectionCounters.WriterCount.get -> int +StackExchange.Redis.ConnectionFailedEventArgs +StackExchange.Redis.ConnectionFailedEventArgs.ConnectionFailedEventArgs(object! sender, System.Net.EndPoint! endPoint, StackExchange.Redis.ConnectionType connectionType, StackExchange.Redis.ConnectionFailureType failureType, System.Exception! exception, string! physicalName) -> void +StackExchange.Redis.ConnectionFailedEventArgs.ConnectionType.get -> StackExchange.Redis.ConnectionType +StackExchange.Redis.ConnectionFailedEventArgs.EndPoint.get -> System.Net.EndPoint? +StackExchange.Redis.ConnectionFailedEventArgs.Exception.get -> System.Exception? +StackExchange.Redis.ConnectionFailedEventArgs.FailureType.get -> StackExchange.Redis.ConnectionFailureType +StackExchange.Redis.ConnectionFailureType +StackExchange.Redis.ConnectionFailureType.AuthenticationFailure = 3 -> StackExchange.Redis.ConnectionFailureType +StackExchange.Redis.ConnectionFailureType.ConnectionDisposed = 7 -> StackExchange.Redis.ConnectionFailureType +StackExchange.Redis.ConnectionFailureType.InternalFailure = 5 -> StackExchange.Redis.ConnectionFailureType +StackExchange.Redis.ConnectionFailureType.Loading = 8 -> StackExchange.Redis.ConnectionFailureType +StackExchange.Redis.ConnectionFailureType.None = 0 -> StackExchange.Redis.ConnectionFailureType +StackExchange.Redis.ConnectionFailureType.ProtocolFailure = 4 -> StackExchange.Redis.ConnectionFailureType +StackExchange.Redis.ConnectionFailureType.SocketClosed = 6 -> StackExchange.Redis.ConnectionFailureType +StackExchange.Redis.ConnectionFailureType.SocketFailure = 2 -> StackExchange.Redis.ConnectionFailureType +StackExchange.Redis.ConnectionFailureType.UnableToConnect = 9 -> StackExchange.Redis.ConnectionFailureType +StackExchange.Redis.ConnectionFailureType.ResponseIntegrityFailure = 10 -> StackExchange.Redis.ConnectionFailureType +StackExchange.Redis.ConnectionFailureType.UnableToResolvePhysicalConnection = 1 -> StackExchange.Redis.ConnectionFailureType +StackExchange.Redis.ConnectionMultiplexer +StackExchange.Redis.ConnectionMultiplexer.ClientName.get -> string! +StackExchange.Redis.ConnectionMultiplexer.Close(bool allowCommandsToComplete = true) -> void +StackExchange.Redis.ConnectionMultiplexer.CloseAsync(bool allowCommandsToComplete = true) -> System.Threading.Tasks.Task! +StackExchange.Redis.ConnectionMultiplexer.Configuration.get -> string! +StackExchange.Redis.ConnectionMultiplexer.ConfigurationChanged -> System.EventHandler? +StackExchange.Redis.ConnectionMultiplexer.ConfigurationChangedBroadcast -> System.EventHandler? +StackExchange.Redis.ConnectionMultiplexer.Configure(System.IO.TextWriter? log = null) -> bool +StackExchange.Redis.ConnectionMultiplexer.ConfigureAsync(System.IO.TextWriter? log = null) -> System.Threading.Tasks.Task! +StackExchange.Redis.ConnectionMultiplexer.ConnectionFailed -> System.EventHandler? +StackExchange.Redis.ConnectionMultiplexer.ConnectionRestored -> System.EventHandler? +StackExchange.Redis.ConnectionMultiplexer.Dispose() -> void +StackExchange.Redis.ConnectionMultiplexer.DisposeAsync() -> System.Threading.Tasks.ValueTask +StackExchange.Redis.ConnectionMultiplexer.ErrorMessage -> System.EventHandler? +StackExchange.Redis.ConnectionMultiplexer.ExportConfiguration(System.IO.Stream! destination, StackExchange.Redis.ExportOptions options = (StackExchange.Redis.ExportOptions)-1) -> void +StackExchange.Redis.ConnectionMultiplexer.GetCounters() -> StackExchange.Redis.ServerCounters! +StackExchange.Redis.ConnectionMultiplexer.GetDatabase(int db = -1, object? asyncState = null) -> StackExchange.Redis.IDatabase! +StackExchange.Redis.ConnectionMultiplexer.GetEndPoints(bool configuredOnly = false) -> System.Net.EndPoint![]! +StackExchange.Redis.ConnectionMultiplexer.GetHashSlot(StackExchange.Redis.RedisKey key) -> int +StackExchange.Redis.ConnectionMultiplexer.GetSentinelMasterConnection(StackExchange.Redis.ConfigurationOptions! config, System.IO.TextWriter? log = null) -> StackExchange.Redis.ConnectionMultiplexer! +StackExchange.Redis.ConnectionMultiplexer.GetServer(string! host, int port, object? asyncState = null) -> StackExchange.Redis.IServer! +StackExchange.Redis.ConnectionMultiplexer.GetServer(string! hostAndPort, object? asyncState = null) -> StackExchange.Redis.IServer! +StackExchange.Redis.ConnectionMultiplexer.GetServer(System.Net.EndPoint? endpoint, object? asyncState = null) -> StackExchange.Redis.IServer! +StackExchange.Redis.ConnectionMultiplexer.GetServer(System.Net.IPAddress! host, int port) -> StackExchange.Redis.IServer! +StackExchange.Redis.ConnectionMultiplexer.GetServers() -> StackExchange.Redis.IServer![]! +StackExchange.Redis.ConnectionMultiplexer.GetStatus() -> string! +StackExchange.Redis.ConnectionMultiplexer.GetStatus(System.IO.TextWriter! log) -> void +StackExchange.Redis.ConnectionMultiplexer.GetStormLog() -> string? +StackExchange.Redis.ConnectionMultiplexer.GetSubscriber(object? asyncState = null) -> StackExchange.Redis.ISubscriber! +StackExchange.Redis.ConnectionMultiplexer.HashSlot(StackExchange.Redis.RedisKey key) -> int +StackExchange.Redis.ConnectionMultiplexer.HashSlotMoved -> System.EventHandler? +StackExchange.Redis.ConnectionMultiplexer.IncludeDetailInExceptions.get -> bool +StackExchange.Redis.ConnectionMultiplexer.IncludeDetailInExceptions.set -> void +StackExchange.Redis.ConnectionMultiplexer.IncludePerformanceCountersInExceptions.get -> bool +StackExchange.Redis.ConnectionMultiplexer.IncludePerformanceCountersInExceptions.set -> void +StackExchange.Redis.ConnectionMultiplexer.InternalError -> System.EventHandler? +StackExchange.Redis.ConnectionMultiplexer.IsConnected.get -> bool +StackExchange.Redis.ConnectionMultiplexer.IsConnecting.get -> bool +StackExchange.Redis.ConnectionMultiplexer.OperationCount.get -> long +StackExchange.Redis.ConnectionMultiplexer.PreserveAsyncOrder.get -> bool +StackExchange.Redis.ConnectionMultiplexer.PreserveAsyncOrder.set -> void +StackExchange.Redis.ConnectionMultiplexer.PublishReconfigure(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.ConnectionMultiplexer.PublishReconfigureAsync(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.ConnectionMultiplexer.ReconfigureAsync(string! reason) -> System.Threading.Tasks.Task! +StackExchange.Redis.ConnectionMultiplexer.RegisterProfiler(System.Func! profilingSessionProvider) -> void +StackExchange.Redis.ConnectionMultiplexer.ResetStormLog() -> void +StackExchange.Redis.ConnectionMultiplexer.ServerMaintenanceEvent -> System.EventHandler? +StackExchange.Redis.ConnectionMultiplexer.StormLogThreshold.get -> int +StackExchange.Redis.ConnectionMultiplexer.StormLogThreshold.set -> void +StackExchange.Redis.ConnectionMultiplexer.TimeoutMilliseconds.get -> int +StackExchange.Redis.ConnectionMultiplexer.Wait(System.Threading.Tasks.Task! task) -> void +StackExchange.Redis.ConnectionMultiplexer.Wait(System.Threading.Tasks.Task! task) -> T +StackExchange.Redis.ConnectionMultiplexer.WaitAll(params System.Threading.Tasks.Task![]! tasks) -> void +StackExchange.Redis.ConnectionType +StackExchange.Redis.ConnectionType.Interactive = 1 -> StackExchange.Redis.ConnectionType +StackExchange.Redis.ConnectionType.None = 0 -> StackExchange.Redis.ConnectionType +StackExchange.Redis.ConnectionType.Subscription = 2 -> StackExchange.Redis.ConnectionType +StackExchange.Redis.EndPointCollection +StackExchange.Redis.EndPointCollection.Add(string! host, int port) -> void +StackExchange.Redis.EndPointCollection.Add(string! hostAndPort) -> void +StackExchange.Redis.EndPointCollection.Add(System.Net.IPAddress! host, int port) -> void +StackExchange.Redis.EndPointCollection.EndPointCollection() -> void +StackExchange.Redis.EndPointCollection.EndPointCollection(System.Collections.Generic.IList! endpoints) -> void +StackExchange.Redis.EndPointCollection.GetEnumerator() -> System.Collections.Generic.IEnumerator! +StackExchange.Redis.EndPointCollection.TryAdd(System.Net.EndPoint! endpoint) -> bool +StackExchange.Redis.EndPointEventArgs +StackExchange.Redis.EndPointEventArgs.EndPoint.get -> System.Net.EndPoint! +StackExchange.Redis.EndPointEventArgs.EndPointEventArgs(object! sender, System.Net.EndPoint! endpoint) -> void +StackExchange.Redis.Exclude +StackExchange.Redis.Exclude.Both = StackExchange.Redis.Exclude.Start | StackExchange.Redis.Exclude.Stop -> StackExchange.Redis.Exclude +StackExchange.Redis.Exclude.None = 0 -> StackExchange.Redis.Exclude +StackExchange.Redis.Exclude.Start = 1 -> StackExchange.Redis.Exclude +StackExchange.Redis.Exclude.Stop = 2 -> StackExchange.Redis.Exclude +StackExchange.Redis.ExpireResult +StackExchange.Redis.ExpireResult.ConditionNotMet = 0 -> StackExchange.Redis.ExpireResult +StackExchange.Redis.ExpireResult.Due = 2 -> StackExchange.Redis.ExpireResult +StackExchange.Redis.ExpireResult.NoSuchField = -2 -> StackExchange.Redis.ExpireResult +StackExchange.Redis.ExpireResult.Success = 1 -> StackExchange.Redis.ExpireResult +StackExchange.Redis.ExpireWhen +StackExchange.Redis.ExpireWhen.Always = 0 -> StackExchange.Redis.ExpireWhen +StackExchange.Redis.ExpireWhen.GreaterThanCurrentExpiry = 1 -> StackExchange.Redis.ExpireWhen +StackExchange.Redis.ExpireWhen.HasExpiry = 2 -> StackExchange.Redis.ExpireWhen +StackExchange.Redis.ExpireWhen.HasNoExpiry = 3 -> StackExchange.Redis.ExpireWhen +StackExchange.Redis.ExpireWhen.LessThanCurrentExpiry = 4 -> StackExchange.Redis.ExpireWhen +StackExchange.Redis.ExponentialRetry +StackExchange.Redis.ExponentialRetry.ExponentialRetry(int deltaBackOffMilliseconds) -> void +StackExchange.Redis.ExponentialRetry.ExponentialRetry(int deltaBackOffMilliseconds, int maxDeltaBackOffMilliseconds) -> void +StackExchange.Redis.ExponentialRetry.ShouldRetry(long currentRetryCount, int timeElapsedMillisecondsSinceLastRetry) -> bool +StackExchange.Redis.ExportOptions +StackExchange.Redis.ExportOptions.All = -1 -> StackExchange.Redis.ExportOptions +StackExchange.Redis.ExportOptions.Client = 4 -> StackExchange.Redis.ExportOptions +StackExchange.Redis.ExportOptions.Cluster = 8 -> StackExchange.Redis.ExportOptions +StackExchange.Redis.ExportOptions.Config = 2 -> StackExchange.Redis.ExportOptions +StackExchange.Redis.ExportOptions.Info = 1 -> StackExchange.Redis.ExportOptions +StackExchange.Redis.ExportOptions.None = 0 -> StackExchange.Redis.ExportOptions +StackExchange.Redis.ExtensionMethods +StackExchange.Redis.GeoEntry +StackExchange.Redis.GeoEntry.Equals(StackExchange.Redis.GeoEntry other) -> bool +StackExchange.Redis.GeoEntry.GeoEntry() -> void +StackExchange.Redis.GeoEntry.GeoEntry(double longitude, double latitude, StackExchange.Redis.RedisValue member) -> void +StackExchange.Redis.GeoEntry.Latitude.get -> double +StackExchange.Redis.GeoEntry.Longitude.get -> double +StackExchange.Redis.GeoEntry.Member.get -> StackExchange.Redis.RedisValue +StackExchange.Redis.GeoEntry.Position.get -> StackExchange.Redis.GeoPosition +StackExchange.Redis.GeoPosition +StackExchange.Redis.GeoPosition.Equals(StackExchange.Redis.GeoPosition other) -> bool +StackExchange.Redis.GeoPosition.GeoPosition() -> void +StackExchange.Redis.GeoPosition.GeoPosition(double longitude, double latitude) -> void +StackExchange.Redis.GeoPosition.Latitude.get -> double +StackExchange.Redis.GeoPosition.Longitude.get -> double +StackExchange.Redis.GeoRadiusOptions +StackExchange.Redis.GeoRadiusOptions.Default = StackExchange.Redis.GeoRadiusOptions.WithCoordinates | StackExchange.Redis.GeoRadiusOptions.WithDistance -> StackExchange.Redis.GeoRadiusOptions +StackExchange.Redis.GeoRadiusOptions.None = 0 -> StackExchange.Redis.GeoRadiusOptions +StackExchange.Redis.GeoRadiusOptions.WithCoordinates = 1 -> StackExchange.Redis.GeoRadiusOptions +StackExchange.Redis.GeoRadiusOptions.WithDistance = 2 -> StackExchange.Redis.GeoRadiusOptions +StackExchange.Redis.GeoRadiusOptions.WithGeoHash = 4 -> StackExchange.Redis.GeoRadiusOptions +StackExchange.Redis.GeoRadiusResult +StackExchange.Redis.GeoRadiusResult.Distance.get -> double? +StackExchange.Redis.GeoRadiusResult.GeoRadiusResult() -> void +StackExchange.Redis.GeoRadiusResult.GeoRadiusResult(in StackExchange.Redis.RedisValue member, double? distance, long? hash, StackExchange.Redis.GeoPosition? position) -> void +StackExchange.Redis.GeoRadiusResult.Hash.get -> long? +StackExchange.Redis.GeoRadiusResult.Member.get -> StackExchange.Redis.RedisValue +StackExchange.Redis.GeoRadiusResult.Position.get -> StackExchange.Redis.GeoPosition? +StackExchange.Redis.GeoSearchBox +StackExchange.Redis.GeoSearchBox.GeoSearchBox(double height, double width, StackExchange.Redis.GeoUnit unit = StackExchange.Redis.GeoUnit.Meters) -> void +StackExchange.Redis.GeoSearchCircle +StackExchange.Redis.GeoSearchCircle.GeoSearchCircle(double radius, StackExchange.Redis.GeoUnit unit = StackExchange.Redis.GeoUnit.Meters) -> void +StackExchange.Redis.GeoSearchShape +StackExchange.Redis.GeoSearchShape.GeoSearchShape(StackExchange.Redis.GeoUnit unit) -> void +StackExchange.Redis.GeoSearchShape.Unit.get -> StackExchange.Redis.GeoUnit +StackExchange.Redis.GeoUnit +StackExchange.Redis.GeoUnit.Feet = 3 -> StackExchange.Redis.GeoUnit +StackExchange.Redis.GeoUnit.Kilometers = 1 -> StackExchange.Redis.GeoUnit +StackExchange.Redis.GeoUnit.Meters = 0 -> StackExchange.Redis.GeoUnit +StackExchange.Redis.GeoUnit.Miles = 2 -> StackExchange.Redis.GeoUnit +StackExchange.Redis.HashEntry +StackExchange.Redis.HashEntry.Equals(StackExchange.Redis.HashEntry other) -> bool +StackExchange.Redis.HashEntry.HashEntry() -> void +StackExchange.Redis.HashEntry.HashEntry(StackExchange.Redis.RedisValue name, StackExchange.Redis.RedisValue value) -> void +StackExchange.Redis.HashEntry.Key.get -> StackExchange.Redis.RedisValue +StackExchange.Redis.HashEntry.Name.get -> StackExchange.Redis.RedisValue +StackExchange.Redis.HashEntry.Value.get -> StackExchange.Redis.RedisValue +StackExchange.Redis.HashSlotMovedEventArgs +StackExchange.Redis.HashSlotMovedEventArgs.HashSlot.get -> int +StackExchange.Redis.HashSlotMovedEventArgs.HashSlotMovedEventArgs(object! sender, int hashSlot, System.Net.EndPoint! old, System.Net.EndPoint! new) -> void +StackExchange.Redis.HashSlotMovedEventArgs.NewEndPoint.get -> System.Net.EndPoint! +StackExchange.Redis.HashSlotMovedEventArgs.OldEndPoint.get -> System.Net.EndPoint? +StackExchange.Redis.IBatch +StackExchange.Redis.IBatch.Execute() -> void +StackExchange.Redis.IConnectionMultiplexer +StackExchange.Redis.IConnectionMultiplexer.ClientName.get -> string! +StackExchange.Redis.IConnectionMultiplexer.Close(bool allowCommandsToComplete = true) -> void +StackExchange.Redis.IConnectionMultiplexer.CloseAsync(bool allowCommandsToComplete = true) -> System.Threading.Tasks.Task! +StackExchange.Redis.IConnectionMultiplexer.Configuration.get -> string! +StackExchange.Redis.IConnectionMultiplexer.ConfigurationChanged -> System.EventHandler! +StackExchange.Redis.IConnectionMultiplexer.ConfigurationChangedBroadcast -> System.EventHandler! +StackExchange.Redis.IConnectionMultiplexer.Configure(System.IO.TextWriter? log = null) -> bool +StackExchange.Redis.IConnectionMultiplexer.ConfigureAsync(System.IO.TextWriter? log = null) -> System.Threading.Tasks.Task! +StackExchange.Redis.IConnectionMultiplexer.ConnectionFailed -> System.EventHandler! +StackExchange.Redis.IConnectionMultiplexer.ConnectionRestored -> System.EventHandler! +StackExchange.Redis.IConnectionMultiplexer.ErrorMessage -> System.EventHandler! +StackExchange.Redis.IConnectionMultiplexer.ExportConfiguration(System.IO.Stream! destination, StackExchange.Redis.ExportOptions options = (StackExchange.Redis.ExportOptions)-1) -> void +StackExchange.Redis.IConnectionMultiplexer.GetCounters() -> StackExchange.Redis.ServerCounters! +StackExchange.Redis.IConnectionMultiplexer.GetDatabase(int db = -1, object? asyncState = null) -> StackExchange.Redis.IDatabase! +StackExchange.Redis.IConnectionMultiplexer.GetEndPoints(bool configuredOnly = false) -> System.Net.EndPoint![]! +StackExchange.Redis.IConnectionMultiplexer.GetHashSlot(StackExchange.Redis.RedisKey key) -> int +StackExchange.Redis.IConnectionMultiplexer.GetServer(string! host, int port, object? asyncState = null) -> StackExchange.Redis.IServer! +StackExchange.Redis.IConnectionMultiplexer.GetServer(string! hostAndPort, object? asyncState = null) -> StackExchange.Redis.IServer! +StackExchange.Redis.IConnectionMultiplexer.GetServer(System.Net.EndPoint! endpoint, object? asyncState = null) -> StackExchange.Redis.IServer! +StackExchange.Redis.IConnectionMultiplexer.GetServer(System.Net.IPAddress! host, int port) -> StackExchange.Redis.IServer! +StackExchange.Redis.IConnectionMultiplexer.GetServers() -> StackExchange.Redis.IServer![]! +StackExchange.Redis.IConnectionMultiplexer.GetStatus() -> string! +StackExchange.Redis.IConnectionMultiplexer.GetStatus(System.IO.TextWriter! log) -> void +StackExchange.Redis.IConnectionMultiplexer.GetStormLog() -> string? +StackExchange.Redis.IConnectionMultiplexer.GetSubscriber(object? asyncState = null) -> StackExchange.Redis.ISubscriber! +StackExchange.Redis.IConnectionMultiplexer.HashSlot(StackExchange.Redis.RedisKey key) -> int +StackExchange.Redis.IConnectionMultiplexer.HashSlotMoved -> System.EventHandler! +StackExchange.Redis.IConnectionMultiplexer.IncludeDetailInExceptions.get -> bool +StackExchange.Redis.IConnectionMultiplexer.IncludeDetailInExceptions.set -> void +StackExchange.Redis.IConnectionMultiplexer.InternalError -> System.EventHandler! +StackExchange.Redis.IConnectionMultiplexer.IsConnected.get -> bool +StackExchange.Redis.IConnectionMultiplexer.IsConnecting.get -> bool +StackExchange.Redis.IConnectionMultiplexer.OperationCount.get -> long +StackExchange.Redis.IConnectionMultiplexer.PreserveAsyncOrder.get -> bool +StackExchange.Redis.IConnectionMultiplexer.PreserveAsyncOrder.set -> void +StackExchange.Redis.IConnectionMultiplexer.PublishReconfigure(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IConnectionMultiplexer.PublishReconfigureAsync(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IConnectionMultiplexer.RegisterProfiler(System.Func! profilingSessionProvider) -> void +StackExchange.Redis.IConnectionMultiplexer.ResetStormLog() -> void +StackExchange.Redis.IConnectionMultiplexer.ServerMaintenanceEvent -> System.EventHandler! +StackExchange.Redis.IConnectionMultiplexer.StormLogThreshold.get -> int +StackExchange.Redis.IConnectionMultiplexer.StormLogThreshold.set -> void +StackExchange.Redis.IConnectionMultiplexer.TimeoutMilliseconds.get -> int +StackExchange.Redis.IConnectionMultiplexer.ToString() -> string! +StackExchange.Redis.IConnectionMultiplexer.Wait(System.Threading.Tasks.Task! task) -> void +StackExchange.Redis.IConnectionMultiplexer.Wait(System.Threading.Tasks.Task! task) -> T +StackExchange.Redis.IConnectionMultiplexer.WaitAll(params System.Threading.Tasks.Task![]! tasks) -> void +StackExchange.Redis.IDatabase +StackExchange.Redis.IDatabase.CreateBatch(object? asyncState = null) -> StackExchange.Redis.IBatch! +StackExchange.Redis.IDatabase.CreateTransaction(object? asyncState = null) -> StackExchange.Redis.ITransaction! +StackExchange.Redis.IDatabase.Database.get -> int +StackExchange.Redis.IDatabase.DebugObject(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue +StackExchange.Redis.IDatabase.Execute(string! command, params object![]! args) -> StackExchange.Redis.RedisResult! +StackExchange.Redis.IDatabase.Execute(string! command, System.Collections.Generic.ICollection! args, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisResult! +StackExchange.Redis.IDatabase.GeoAdd(StackExchange.Redis.RedisKey key, double longitude, double latitude, StackExchange.Redis.RedisValue member, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IDatabase.GeoAdd(StackExchange.Redis.RedisKey key, StackExchange.Redis.GeoEntry value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IDatabase.GeoAdd(StackExchange.Redis.RedisKey key, StackExchange.Redis.GeoEntry[]! values, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.GeoDistance(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member1, StackExchange.Redis.RedisValue member2, StackExchange.Redis.GeoUnit unit = StackExchange.Redis.GeoUnit.Meters, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> double? +StackExchange.Redis.IDatabase.GeoHash(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> string? +StackExchange.Redis.IDatabase.GeoHash(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! members, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> string?[]! +StackExchange.Redis.IDatabase.GeoPosition(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.GeoPosition? +StackExchange.Redis.IDatabase.GeoPosition(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! members, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.GeoPosition?[]! +StackExchange.Redis.IDatabase.GeoRadius(StackExchange.Redis.RedisKey key, double longitude, double latitude, double radius, StackExchange.Redis.GeoUnit unit = StackExchange.Redis.GeoUnit.Meters, int count = -1, StackExchange.Redis.Order? order = null, StackExchange.Redis.GeoRadiusOptions options = StackExchange.Redis.GeoRadiusOptions.Default, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.GeoRadiusResult[]! +StackExchange.Redis.IDatabase.GeoRadius(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, double radius, StackExchange.Redis.GeoUnit unit = StackExchange.Redis.GeoUnit.Meters, int count = -1, StackExchange.Redis.Order? order = null, StackExchange.Redis.GeoRadiusOptions options = StackExchange.Redis.GeoRadiusOptions.Default, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.GeoRadiusResult[]! +StackExchange.Redis.IDatabase.GeoRemove(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IDatabase.GeoSearch(StackExchange.Redis.RedisKey key, double longitude, double latitude, StackExchange.Redis.GeoSearchShape! shape, int count = -1, bool demandClosest = true, StackExchange.Redis.Order? order = null, StackExchange.Redis.GeoRadiusOptions options = StackExchange.Redis.GeoRadiusOptions.Default, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.GeoRadiusResult[]! +StackExchange.Redis.IDatabase.GeoSearch(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, StackExchange.Redis.GeoSearchShape! shape, int count = -1, bool demandClosest = true, StackExchange.Redis.Order? order = null, StackExchange.Redis.GeoRadiusOptions options = StackExchange.Redis.GeoRadiusOptions.Default, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.GeoRadiusResult[]! +StackExchange.Redis.IDatabase.GeoSearchAndStore(StackExchange.Redis.RedisKey sourceKey, StackExchange.Redis.RedisKey destinationKey, double longitude, double latitude, StackExchange.Redis.GeoSearchShape! shape, int count = -1, bool demandClosest = true, StackExchange.Redis.Order? order = null, bool storeDistances = false, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.GeoSearchAndStore(StackExchange.Redis.RedisKey sourceKey, StackExchange.Redis.RedisKey destinationKey, StackExchange.Redis.RedisValue member, StackExchange.Redis.GeoSearchShape! shape, int count = -1, bool demandClosest = true, StackExchange.Redis.Order? order = null, bool storeDistances = false, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.HashDecrement(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue hashField, double value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> double +StackExchange.Redis.IDatabase.HashDecrement(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue hashField, long value = 1, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.HashDelete(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue hashField, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IDatabase.HashDelete(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! hashFields, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.HashExists(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue hashField, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IDatabase.HashFieldExpire(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! hashFields, System.DateTime expiry, StackExchange.Redis.ExpireWhen when = StackExchange.Redis.ExpireWhen.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.ExpireResult[]! +StackExchange.Redis.IDatabase.HashFieldExpire(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! hashFields, System.TimeSpan expiry, StackExchange.Redis.ExpireWhen when = StackExchange.Redis.ExpireWhen.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.ExpireResult[]! +StackExchange.Redis.IDatabase.HashFieldGetExpireDateTime(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! hashFields, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long[]! +StackExchange.Redis.IDatabase.HashFieldGetTimeToLive(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! hashFields, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long[]! +StackExchange.Redis.IDatabase.HashFieldPersist(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! hashFields, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.PersistResult[]! +StackExchange.Redis.IDatabase.HashGet(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue hashField, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue +StackExchange.Redis.IDatabase.HashGet(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! hashFields, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue[]! +StackExchange.Redis.IDatabase.HashGetAll(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.HashEntry[]! +StackExchange.Redis.IDatabase.HashGetLease(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue hashField, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.Lease? +StackExchange.Redis.IDatabase.HashIncrement(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue hashField, double value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> double +StackExchange.Redis.IDatabase.HashIncrement(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue hashField, long value = 1, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.HashKeys(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue[]! +StackExchange.Redis.IDatabase.HashLength(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.HashRandomField(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue +StackExchange.Redis.IDatabase.HashRandomFields(StackExchange.Redis.RedisKey key, long count, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue[]! +StackExchange.Redis.IDatabase.HashRandomFieldsWithValues(StackExchange.Redis.RedisKey key, long count, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.HashEntry[]! +StackExchange.Redis.IDatabase.HashScan(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue pattern = default(StackExchange.Redis.RedisValue), int pageSize = 250, long cursor = 0, int pageOffset = 0, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Collections.Generic.IEnumerable! +StackExchange.Redis.IDatabase.HashScan(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue pattern, int pageSize, StackExchange.Redis.CommandFlags flags) -> System.Collections.Generic.IEnumerable! +StackExchange.Redis.IDatabase.HashScanNoValues(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue pattern = default(StackExchange.Redis.RedisValue), int pageSize = 250, long cursor = 0, int pageOffset = 0, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Collections.Generic.IEnumerable! +StackExchange.Redis.IDatabase.HashSet(StackExchange.Redis.RedisKey key, StackExchange.Redis.HashEntry[]! hashFields, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> void +StackExchange.Redis.IDatabase.HashSet(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue hashField, StackExchange.Redis.RedisValue value, StackExchange.Redis.When when = StackExchange.Redis.When.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IDatabase.HashStringLength(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue hashField, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.HashValues(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue[]! +StackExchange.Redis.IDatabase.HyperLogLogAdd(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IDatabase.HyperLogLogAdd(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! values, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IDatabase.HyperLogLogLength(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.HyperLogLogLength(StackExchange.Redis.RedisKey[]! keys, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.HyperLogLogMerge(StackExchange.Redis.RedisKey destination, StackExchange.Redis.RedisKey first, StackExchange.Redis.RedisKey second, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> void +StackExchange.Redis.IDatabase.HyperLogLogMerge(StackExchange.Redis.RedisKey destination, StackExchange.Redis.RedisKey[]! sourceKeys, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> void +StackExchange.Redis.IDatabase.IdentifyEndpoint(StackExchange.Redis.RedisKey key = default(StackExchange.Redis.RedisKey), StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Net.EndPoint? +StackExchange.Redis.IDatabase.KeyCopy(StackExchange.Redis.RedisKey sourceKey, StackExchange.Redis.RedisKey destinationKey, int destinationDatabase = -1, bool replace = false, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IDatabase.KeyDelete(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IDatabase.KeyDelete(StackExchange.Redis.RedisKey[]! keys, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.KeyDump(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> byte[]? +StackExchange.Redis.IDatabase.KeyEncoding(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> string? +StackExchange.Redis.IDatabase.KeyExists(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IDatabase.KeyExists(StackExchange.Redis.RedisKey[]! keys, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.KeyExpire(StackExchange.Redis.RedisKey key, System.DateTime? expiry, StackExchange.Redis.CommandFlags flags) -> bool +StackExchange.Redis.IDatabase.KeyExpire(StackExchange.Redis.RedisKey key, System.DateTime? expiry, StackExchange.Redis.ExpireWhen when = StackExchange.Redis.ExpireWhen.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IDatabase.KeyExpire(StackExchange.Redis.RedisKey key, System.TimeSpan? expiry, StackExchange.Redis.CommandFlags flags) -> bool +StackExchange.Redis.IDatabase.KeyExpire(StackExchange.Redis.RedisKey key, System.TimeSpan? expiry, StackExchange.Redis.ExpireWhen when = StackExchange.Redis.ExpireWhen.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IDatabase.KeyExpireTime(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.DateTime? +StackExchange.Redis.IDatabase.KeyFrequency(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long? +StackExchange.Redis.IDatabase.KeyIdleTime(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.TimeSpan? +StackExchange.Redis.IDatabase.KeyMigrate(StackExchange.Redis.RedisKey key, System.Net.EndPoint! toServer, int toDatabase = 0, int timeoutMilliseconds = 0, StackExchange.Redis.MigrateOptions migrateOptions = StackExchange.Redis.MigrateOptions.None, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> void +StackExchange.Redis.IDatabase.KeyMove(StackExchange.Redis.RedisKey key, int database, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IDatabase.KeyPersist(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IDatabase.KeyRandom(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisKey +StackExchange.Redis.IDatabase.KeyRefCount(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long? +StackExchange.Redis.IDatabase.KeyRename(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisKey newKey, StackExchange.Redis.When when = StackExchange.Redis.When.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IDatabase.KeyRestore(StackExchange.Redis.RedisKey key, byte[]! value, System.TimeSpan? expiry = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> void +StackExchange.Redis.IDatabase.KeyTimeToLive(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.TimeSpan? +StackExchange.Redis.IDatabase.KeyTouch(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IDatabase.KeyTouch(StackExchange.Redis.RedisKey[]! keys, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.KeyType(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisType +StackExchange.Redis.IDatabase.ListGetByIndex(StackExchange.Redis.RedisKey key, long index, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue +StackExchange.Redis.IDatabase.ListInsertAfter(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue pivot, StackExchange.Redis.RedisValue value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.ListInsertBefore(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue pivot, StackExchange.Redis.RedisValue value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.ListMove(StackExchange.Redis.RedisKey sourceKey, StackExchange.Redis.RedisKey destinationKey, StackExchange.Redis.ListSide sourceSide, StackExchange.Redis.ListSide destinationSide, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue +StackExchange.Redis.IDatabase.ListLeftPop(StackExchange.Redis.RedisKey key, long count, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue[]! +StackExchange.Redis.IDatabase.ListLeftPop(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue +StackExchange.Redis.IDatabase.ListLeftPop(StackExchange.Redis.RedisKey[]! keys, long count, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.ListPopResult +StackExchange.Redis.IDatabase.ListLeftPush(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue value, StackExchange.Redis.When when = StackExchange.Redis.When.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.ListLeftPush(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! values, StackExchange.Redis.CommandFlags flags) -> long +StackExchange.Redis.IDatabase.ListLeftPush(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! values, StackExchange.Redis.When when = StackExchange.Redis.When.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.ListLength(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.ListPosition(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue element, long rank = 1, long maxLength = 0, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.ListPositions(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue element, long count, long rank = 1, long maxLength = 0, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long[]! +StackExchange.Redis.IDatabase.ListRange(StackExchange.Redis.RedisKey key, long start = 0, long stop = -1, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue[]! +StackExchange.Redis.IDatabase.ListRemove(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue value, long count = 0, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.ListRightPop(StackExchange.Redis.RedisKey key, long count, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue[]! +StackExchange.Redis.IDatabase.ListRightPop(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue +StackExchange.Redis.IDatabase.ListRightPop(StackExchange.Redis.RedisKey[]! keys, long count, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.ListPopResult +StackExchange.Redis.IDatabase.ListRightPopLeftPush(StackExchange.Redis.RedisKey source, StackExchange.Redis.RedisKey destination, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue +StackExchange.Redis.IDatabase.ListRightPush(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue value, StackExchange.Redis.When when = StackExchange.Redis.When.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.ListRightPush(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! values, StackExchange.Redis.CommandFlags flags) -> long +StackExchange.Redis.IDatabase.ListRightPush(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! values, StackExchange.Redis.When when = StackExchange.Redis.When.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.ListSetByIndex(StackExchange.Redis.RedisKey key, long index, StackExchange.Redis.RedisValue value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> void +StackExchange.Redis.IDatabase.ListTrim(StackExchange.Redis.RedisKey key, long start, long stop, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> void +StackExchange.Redis.IDatabase.LockExtend(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue value, System.TimeSpan expiry, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IDatabase.LockQuery(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue +StackExchange.Redis.IDatabase.LockRelease(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IDatabase.LockTake(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue value, System.TimeSpan expiry, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IDatabase.Publish(StackExchange.Redis.RedisChannel channel, StackExchange.Redis.RedisValue message, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.ScriptEvaluate(byte[]! hash, StackExchange.Redis.RedisKey[]? keys = null, StackExchange.Redis.RedisValue[]? values = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisResult! +StackExchange.Redis.IDatabase.ScriptEvaluate(StackExchange.Redis.LoadedLuaScript! script, object? parameters = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisResult! +StackExchange.Redis.IDatabase.ScriptEvaluate(StackExchange.Redis.LuaScript! script, object? parameters = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisResult! +StackExchange.Redis.IDatabase.ScriptEvaluate(string! script, StackExchange.Redis.RedisKey[]? keys = null, StackExchange.Redis.RedisValue[]? values = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisResult! +StackExchange.Redis.IDatabase.ScriptEvaluateReadOnly(byte[]! hash, StackExchange.Redis.RedisKey[]? keys = null, StackExchange.Redis.RedisValue[]? values = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisResult! +StackExchange.Redis.IDatabase.ScriptEvaluateReadOnly(string! script, StackExchange.Redis.RedisKey[]? keys = null, StackExchange.Redis.RedisValue[]? values = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisResult! +StackExchange.Redis.IDatabase.SetAdd(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IDatabase.SetAdd(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! values, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.SetCombine(StackExchange.Redis.SetOperation operation, StackExchange.Redis.RedisKey first, StackExchange.Redis.RedisKey second, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue[]! +StackExchange.Redis.IDatabase.SetCombine(StackExchange.Redis.SetOperation operation, StackExchange.Redis.RedisKey[]! keys, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue[]! +StackExchange.Redis.IDatabase.SetCombineAndStore(StackExchange.Redis.SetOperation operation, StackExchange.Redis.RedisKey destination, StackExchange.Redis.RedisKey first, StackExchange.Redis.RedisKey second, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.SetCombineAndStore(StackExchange.Redis.SetOperation operation, StackExchange.Redis.RedisKey destination, StackExchange.Redis.RedisKey[]! keys, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.SetContains(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IDatabase.SetContains(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! values, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool[]! +StackExchange.Redis.IDatabase.SetIntersectionLength(StackExchange.Redis.RedisKey[]! keys, long limit = 0, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.SetLength(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.SetMembers(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue[]! +StackExchange.Redis.IDatabase.SetMove(StackExchange.Redis.RedisKey source, StackExchange.Redis.RedisKey destination, StackExchange.Redis.RedisValue value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IDatabase.SetPop(StackExchange.Redis.RedisKey key, long count, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue[]! +StackExchange.Redis.IDatabase.SetPop(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue +StackExchange.Redis.IDatabase.SetRandomMember(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue +StackExchange.Redis.IDatabase.SetRandomMembers(StackExchange.Redis.RedisKey key, long count, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue[]! +StackExchange.Redis.IDatabase.SetRemove(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IDatabase.SetRemove(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! values, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.SetScan(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue pattern = default(StackExchange.Redis.RedisValue), int pageSize = 250, long cursor = 0, int pageOffset = 0, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Collections.Generic.IEnumerable! +StackExchange.Redis.IDatabase.SetScan(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue pattern, int pageSize, StackExchange.Redis.CommandFlags flags) -> System.Collections.Generic.IEnumerable! +StackExchange.Redis.IDatabase.Sort(StackExchange.Redis.RedisKey key, long skip = 0, long take = -1, StackExchange.Redis.Order order = StackExchange.Redis.Order.Ascending, StackExchange.Redis.SortType sortType = StackExchange.Redis.SortType.Numeric, StackExchange.Redis.RedisValue by = default(StackExchange.Redis.RedisValue), StackExchange.Redis.RedisValue[]? get = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue[]! +StackExchange.Redis.IDatabase.SortAndStore(StackExchange.Redis.RedisKey destination, StackExchange.Redis.RedisKey key, long skip = 0, long take = -1, StackExchange.Redis.Order order = StackExchange.Redis.Order.Ascending, StackExchange.Redis.SortType sortType = StackExchange.Redis.SortType.Numeric, StackExchange.Redis.RedisValue by = default(StackExchange.Redis.RedisValue), StackExchange.Redis.RedisValue[]? get = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.SortedSetAdd(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, double score, StackExchange.Redis.CommandFlags flags) -> bool +StackExchange.Redis.IDatabase.SortedSetAdd(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, double score, StackExchange.Redis.When when, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IDatabase.SortedSetAdd(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, double score, StackExchange.Redis.SortedSetWhen when = StackExchange.Redis.SortedSetWhen.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IDatabase.SortedSetAdd(StackExchange.Redis.RedisKey key, StackExchange.Redis.SortedSetEntry[]! values, StackExchange.Redis.CommandFlags flags) -> long +StackExchange.Redis.IDatabase.SortedSetAdd(StackExchange.Redis.RedisKey key, StackExchange.Redis.SortedSetEntry[]! values, StackExchange.Redis.When when, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.SortedSetAdd(StackExchange.Redis.RedisKey key, StackExchange.Redis.SortedSetEntry[]! values, StackExchange.Redis.SortedSetWhen when = StackExchange.Redis.SortedSetWhen.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.SortedSetCombine(StackExchange.Redis.SetOperation operation, StackExchange.Redis.RedisKey[]! keys, double[]? weights = null, StackExchange.Redis.Aggregate aggregate = StackExchange.Redis.Aggregate.Sum, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue[]! +StackExchange.Redis.IDatabase.SortedSetCombineWithScores(StackExchange.Redis.SetOperation operation, StackExchange.Redis.RedisKey[]! keys, double[]? weights = null, StackExchange.Redis.Aggregate aggregate = StackExchange.Redis.Aggregate.Sum, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.SortedSetEntry[]! +StackExchange.Redis.IDatabase.SortedSetCombineAndStore(StackExchange.Redis.SetOperation operation, StackExchange.Redis.RedisKey destination, StackExchange.Redis.RedisKey first, StackExchange.Redis.RedisKey second, StackExchange.Redis.Aggregate aggregate = StackExchange.Redis.Aggregate.Sum, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.SortedSetCombineAndStore(StackExchange.Redis.SetOperation operation, StackExchange.Redis.RedisKey destination, StackExchange.Redis.RedisKey[]! keys, double[]? weights = null, StackExchange.Redis.Aggregate aggregate = StackExchange.Redis.Aggregate.Sum, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.SortedSetDecrement(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, double value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> double +StackExchange.Redis.IDatabase.SortedSetIncrement(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, double value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> double +StackExchange.Redis.IDatabase.SortedSetIntersectionLength(StackExchange.Redis.RedisKey[]! keys, long limit = 0, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.SortedSetLength(StackExchange.Redis.RedisKey key, double min = -Infinity, double max = Infinity, StackExchange.Redis.Exclude exclude = StackExchange.Redis.Exclude.None, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.SortedSetLengthByValue(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue min, StackExchange.Redis.RedisValue max, StackExchange.Redis.Exclude exclude = StackExchange.Redis.Exclude.None, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.SortedSetPop(StackExchange.Redis.RedisKey key, long count, StackExchange.Redis.Order order = StackExchange.Redis.Order.Ascending, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.SortedSetEntry[]! +StackExchange.Redis.IDatabase.SortedSetPop(StackExchange.Redis.RedisKey key, StackExchange.Redis.Order order = StackExchange.Redis.Order.Ascending, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.SortedSetEntry? +StackExchange.Redis.IDatabase.SortedSetPop(StackExchange.Redis.RedisKey[]! keys, long count, StackExchange.Redis.Order order = StackExchange.Redis.Order.Ascending, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.SortedSetPopResult +StackExchange.Redis.IDatabase.SortedSetRandomMember(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue +StackExchange.Redis.IDatabase.SortedSetRandomMembers(StackExchange.Redis.RedisKey key, long count, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue[]! +StackExchange.Redis.IDatabase.SortedSetRandomMembersWithScores(StackExchange.Redis.RedisKey key, long count, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.SortedSetEntry[]! +StackExchange.Redis.IDatabase.SortedSetRangeByRank(StackExchange.Redis.RedisKey key, long start = 0, long stop = -1, StackExchange.Redis.Order order = StackExchange.Redis.Order.Ascending, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue[]! +StackExchange.Redis.IDatabase.SortedSetRangeAndStore(StackExchange.Redis.RedisKey sourceKey, StackExchange.Redis.RedisKey destinationKey, StackExchange.Redis.RedisValue start, StackExchange.Redis.RedisValue stop, StackExchange.Redis.SortedSetOrder sortedSetOrder = StackExchange.Redis.SortedSetOrder.ByRank, StackExchange.Redis.Exclude exclude = StackExchange.Redis.Exclude.None, StackExchange.Redis.Order order = StackExchange.Redis.Order.Ascending, long skip = 0, long? take = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.SortedSetRangeByRankWithScores(StackExchange.Redis.RedisKey key, long start = 0, long stop = -1, StackExchange.Redis.Order order = StackExchange.Redis.Order.Ascending, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.SortedSetEntry[]! +StackExchange.Redis.IDatabase.SortedSetRangeByScore(StackExchange.Redis.RedisKey key, double start = -Infinity, double stop = Infinity, StackExchange.Redis.Exclude exclude = StackExchange.Redis.Exclude.None, StackExchange.Redis.Order order = StackExchange.Redis.Order.Ascending, long skip = 0, long take = -1, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue[]! +StackExchange.Redis.IDatabase.SortedSetRangeByScoreWithScores(StackExchange.Redis.RedisKey key, double start = -Infinity, double stop = Infinity, StackExchange.Redis.Exclude exclude = StackExchange.Redis.Exclude.None, StackExchange.Redis.Order order = StackExchange.Redis.Order.Ascending, long skip = 0, long take = -1, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.SortedSetEntry[]! +StackExchange.Redis.IDatabase.SortedSetRangeByValue(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue min = default(StackExchange.Redis.RedisValue), StackExchange.Redis.RedisValue max = default(StackExchange.Redis.RedisValue), StackExchange.Redis.Exclude exclude = StackExchange.Redis.Exclude.None, StackExchange.Redis.Order order = StackExchange.Redis.Order.Ascending, long skip = 0, long take = -1, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue[]! +StackExchange.Redis.IDatabase.SortedSetRangeByValue(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue min, StackExchange.Redis.RedisValue max, StackExchange.Redis.Exclude exclude, long skip, long take = -1, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue[]! +StackExchange.Redis.IDatabase.SortedSetRank(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, StackExchange.Redis.Order order = StackExchange.Redis.Order.Ascending, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long? +StackExchange.Redis.IDatabase.SortedSetRemove(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IDatabase.SortedSetRemove(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! members, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.SortedSetRemoveRangeByRank(StackExchange.Redis.RedisKey key, long start, long stop, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.SortedSetRemoveRangeByScore(StackExchange.Redis.RedisKey key, double start, double stop, StackExchange.Redis.Exclude exclude = StackExchange.Redis.Exclude.None, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.SortedSetRemoveRangeByValue(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue min, StackExchange.Redis.RedisValue max, StackExchange.Redis.Exclude exclude = StackExchange.Redis.Exclude.None, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.SortedSetScan(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue pattern = default(StackExchange.Redis.RedisValue), int pageSize = 250, long cursor = 0, int pageOffset = 0, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Collections.Generic.IEnumerable! +StackExchange.Redis.IDatabase.SortedSetScan(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue pattern, int pageSize, StackExchange.Redis.CommandFlags flags) -> System.Collections.Generic.IEnumerable! +StackExchange.Redis.IDatabase.SortedSetScore(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> double? +StackExchange.Redis.IDatabase.SortedSetScores(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! members, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> double?[]! +StackExchange.Redis.IDatabase.SortedSetUpdate(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, double score, StackExchange.Redis.SortedSetWhen when = StackExchange.Redis.SortedSetWhen.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IDatabase.SortedSetUpdate(StackExchange.Redis.RedisKey key, StackExchange.Redis.SortedSetEntry[]! values, StackExchange.Redis.SortedSetWhen when = StackExchange.Redis.SortedSetWhen.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.StreamAcknowledge(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue groupName, StackExchange.Redis.RedisValue messageId, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.StreamAcknowledge(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue groupName, StackExchange.Redis.RedisValue[]! messageIds, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.StreamAcknowledgeAndDelete(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue groupName, StackExchange.Redis.StreamTrimMode mode, StackExchange.Redis.RedisValue messageId, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.StreamTrimResult +StackExchange.Redis.IDatabase.StreamAcknowledgeAndDelete(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue groupName, StackExchange.Redis.StreamTrimMode mode, StackExchange.Redis.RedisValue[]! messageIds, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.StreamTrimResult[]! +StackExchange.Redis.IDatabase.StreamAdd(StackExchange.Redis.RedisKey key, StackExchange.Redis.NameValueEntry[]! streamPairs, StackExchange.Redis.RedisValue? messageId = null, long? maxLength = null, bool useApproximateMaxLength = false, long? limit = null, StackExchange.Redis.StreamTrimMode trimMode = StackExchange.Redis.StreamTrimMode.KeepReferences, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue +StackExchange.Redis.IDatabase.StreamAdd(StackExchange.Redis.RedisKey key, StackExchange.Redis.NameValueEntry[]! streamPairs, StackExchange.Redis.RedisValue? messageId, int? maxLength, bool useApproximateMaxLength, StackExchange.Redis.CommandFlags flags) -> StackExchange.Redis.RedisValue +StackExchange.Redis.IDatabase.StreamAdd(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue streamField, StackExchange.Redis.RedisValue streamValue, StackExchange.Redis.RedisValue? messageId = null, long? maxLength = null, bool useApproximateMaxLength = false, long? limit = null, StackExchange.Redis.StreamTrimMode trimMode = StackExchange.Redis.StreamTrimMode.KeepReferences, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue +StackExchange.Redis.IDatabase.StreamAdd(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue streamField, StackExchange.Redis.RedisValue streamValue, StackExchange.Redis.RedisValue? messageId, int? maxLength, bool useApproximateMaxLength, StackExchange.Redis.CommandFlags flags) -> StackExchange.Redis.RedisValue +StackExchange.Redis.IDatabase.StreamAutoClaim(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue consumerGroup, StackExchange.Redis.RedisValue claimingConsumer, long minIdleTimeInMs, StackExchange.Redis.RedisValue startAtId, int? count = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.StreamAutoClaimResult +StackExchange.Redis.IDatabase.StreamAutoClaimIdsOnly(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue consumerGroup, StackExchange.Redis.RedisValue claimingConsumer, long minIdleTimeInMs, StackExchange.Redis.RedisValue startAtId, int? count = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.StreamAutoClaimIdsOnlyResult +StackExchange.Redis.IDatabase.StreamClaim(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue consumerGroup, StackExchange.Redis.RedisValue claimingConsumer, long minIdleTimeInMs, StackExchange.Redis.RedisValue[]! messageIds, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.StreamEntry[]! +StackExchange.Redis.IDatabase.StreamClaimIdsOnly(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue consumerGroup, StackExchange.Redis.RedisValue claimingConsumer, long minIdleTimeInMs, StackExchange.Redis.RedisValue[]! messageIds, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue[]! +StackExchange.Redis.IDatabase.StreamConsumerGroupSetPosition(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue groupName, StackExchange.Redis.RedisValue position, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IDatabase.StreamConsumerInfo(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue groupName, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.StreamConsumerInfo[]! +StackExchange.Redis.IDatabase.StreamCreateConsumerGroup(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue groupName, StackExchange.Redis.RedisValue? position = null, bool createStream = true, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IDatabase.StreamCreateConsumerGroup(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue groupName, StackExchange.Redis.RedisValue? position, StackExchange.Redis.CommandFlags flags) -> bool +StackExchange.Redis.IDatabase.StreamDelete(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! messageIds, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.StreamDelete(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! messageIds, StackExchange.Redis.StreamTrimMode mode, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.StreamTrimResult[]! +StackExchange.Redis.IDatabase.StreamDeleteConsumer(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue groupName, StackExchange.Redis.RedisValue consumerName, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.StreamDeleteConsumerGroup(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue groupName, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IDatabase.StreamGroupInfo(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.StreamGroupInfo[]! +StackExchange.Redis.IDatabase.StreamInfo(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.StreamInfo +StackExchange.Redis.IDatabase.StreamLength(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.StreamPending(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue groupName, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.StreamPendingInfo +StackExchange.Redis.IDatabase.StreamPendingMessages(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue groupName, int count, StackExchange.Redis.RedisValue consumerName, StackExchange.Redis.RedisValue? minId, StackExchange.Redis.RedisValue? maxId, StackExchange.Redis.CommandFlags flags) -> StackExchange.Redis.StreamPendingMessageInfo[]! +StackExchange.Redis.IDatabase.StreamPendingMessages(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue groupName, int count, StackExchange.Redis.RedisValue consumerName, StackExchange.Redis.RedisValue? minId = null, StackExchange.Redis.RedisValue? maxId = null, long? minIdleTimeInMs = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.StreamPendingMessageInfo[]! +StackExchange.Redis.IDatabase.StreamRange(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue? minId = null, StackExchange.Redis.RedisValue? maxId = null, int? count = null, StackExchange.Redis.Order messageOrder = StackExchange.Redis.Order.Ascending, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.StreamEntry[]! +StackExchange.Redis.IDatabase.StreamRead(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue position, int? count = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.StreamEntry[]! +StackExchange.Redis.IDatabase.StreamRead(StackExchange.Redis.StreamPosition[]! streamPositions, int? countPerStream = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisStream[]! +StackExchange.Redis.IDatabase.StreamReadGroup(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue groupName, StackExchange.Redis.RedisValue consumerName, StackExchange.Redis.RedisValue? position, int? count, bool noAck, StackExchange.Redis.CommandFlags flags) -> StackExchange.Redis.StreamEntry[]! +StackExchange.Redis.IDatabase.StreamReadGroup(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue groupName, StackExchange.Redis.RedisValue consumerName, StackExchange.Redis.RedisValue? position = null, int? count = null, bool noAck = false, System.TimeSpan? claimMinIdleTime = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.StreamEntry[]! +StackExchange.Redis.IDatabase.StreamReadGroup(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue groupName, StackExchange.Redis.RedisValue consumerName, StackExchange.Redis.RedisValue? position, int? count, StackExchange.Redis.CommandFlags flags) -> StackExchange.Redis.StreamEntry[]! +StackExchange.Redis.IDatabase.StreamReadGroup(StackExchange.Redis.StreamPosition[]! streamPositions, StackExchange.Redis.RedisValue groupName, StackExchange.Redis.RedisValue consumerName, int? countPerStream, StackExchange.Redis.CommandFlags flags) -> StackExchange.Redis.RedisStream[]! +StackExchange.Redis.IDatabase.StreamTrim(StackExchange.Redis.RedisKey key, int maxLength, bool useApproximateMaxLength, StackExchange.Redis.CommandFlags flags) -> long +StackExchange.Redis.IDatabase.StreamTrim(StackExchange.Redis.RedisKey key, long maxLength, bool useApproximateMaxLength = false, long? limit = null, StackExchange.Redis.StreamTrimMode mode = StackExchange.Redis.StreamTrimMode.KeepReferences, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.StreamTrimByMinId(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue minId, bool useApproximateMaxLength = false, long? limit = null, StackExchange.Redis.StreamTrimMode mode = StackExchange.Redis.StreamTrimMode.KeepReferences, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.StringAppend(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.StringBitCount(StackExchange.Redis.RedisKey key, long start, long end, StackExchange.Redis.CommandFlags flags) -> long +StackExchange.Redis.IDatabase.StringBitCount(StackExchange.Redis.RedisKey key, long start = 0, long end = -1, StackExchange.Redis.StringIndexType indexType = StackExchange.Redis.StringIndexType.Byte, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.StringBitOperation(StackExchange.Redis.Bitwise operation, StackExchange.Redis.RedisKey destination, StackExchange.Redis.RedisKey first, StackExchange.Redis.RedisKey second = default(StackExchange.Redis.RedisKey), StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.StringBitOperation(StackExchange.Redis.Bitwise operation, StackExchange.Redis.RedisKey destination, StackExchange.Redis.RedisKey[]! keys, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.StringBitPosition(StackExchange.Redis.RedisKey key, bool bit, long start, long end, StackExchange.Redis.CommandFlags flags) -> long +StackExchange.Redis.IDatabase.StringBitPosition(StackExchange.Redis.RedisKey key, bool bit, long start = 0, long end = -1, StackExchange.Redis.StringIndexType indexType = StackExchange.Redis.StringIndexType.Byte, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.StringDecrement(StackExchange.Redis.RedisKey key, double value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> double +StackExchange.Redis.IDatabase.StringDecrement(StackExchange.Redis.RedisKey key, long value = 1, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.StringGet(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue +StackExchange.Redis.IDatabase.StringGet(StackExchange.Redis.RedisKey[]! keys, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue[]! +StackExchange.Redis.IDatabase.StringGetBit(StackExchange.Redis.RedisKey key, long offset, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IDatabase.StringGetDelete(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue +StackExchange.Redis.IDatabase.StringGetLease(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.Lease? +StackExchange.Redis.IDatabase.StringGetRange(StackExchange.Redis.RedisKey key, long start, long end, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue +StackExchange.Redis.IDatabase.StringGetSet(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue +StackExchange.Redis.IDatabase.StringGetSetExpiry(StackExchange.Redis.RedisKey key, System.TimeSpan? expiry, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue +StackExchange.Redis.IDatabase.StringGetSetExpiry(StackExchange.Redis.RedisKey key, System.DateTime expiry, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue +StackExchange.Redis.IDatabase.StringGetWithExpiry(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValueWithExpiry +StackExchange.Redis.IDatabase.StringIncrement(StackExchange.Redis.RedisKey key, double value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> double +StackExchange.Redis.IDatabase.StringIncrement(StackExchange.Redis.RedisKey key, long value = 1, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.StringLength(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.StringLongestCommonSubsequence(StackExchange.Redis.RedisKey first, StackExchange.Redis.RedisKey second, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> string? +StackExchange.Redis.IDatabase.StringLongestCommonSubsequenceLength(StackExchange.Redis.RedisKey first, StackExchange.Redis.RedisKey second, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IDatabase.StringLongestCommonSubsequenceWithMatches(StackExchange.Redis.RedisKey first, StackExchange.Redis.RedisKey second, long minLength = 0, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.LCSMatchResult +StackExchange.Redis.IDatabase.StringSet(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue value, System.TimeSpan? expiry, StackExchange.Redis.When when) -> bool +StackExchange.Redis.IDatabase.StringSet(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue value, System.TimeSpan? expiry, StackExchange.Redis.When when, StackExchange.Redis.CommandFlags flags) -> bool +StackExchange.Redis.IDatabase.StringSet(System.Collections.Generic.KeyValuePair[]! values, StackExchange.Redis.When when, StackExchange.Redis.CommandFlags flags) -> bool +StackExchange.Redis.IDatabase.StringSetAndGet(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue value, System.TimeSpan? expiry = null, bool keepTtl = false, StackExchange.Redis.When when = StackExchange.Redis.When.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue +StackExchange.Redis.IDatabase.StringSetAndGet(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue value, System.TimeSpan? expiry, StackExchange.Redis.When when, StackExchange.Redis.CommandFlags flags) -> StackExchange.Redis.RedisValue +StackExchange.Redis.IDatabase.StringSetBit(StackExchange.Redis.RedisKey key, long offset, bool bit, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IDatabase.StringSetRange(StackExchange.Redis.RedisKey key, long offset, StackExchange.Redis.RedisValue value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue +StackExchange.Redis.IDatabaseAsync +StackExchange.Redis.IDatabaseAsync.DebugObjectAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.ExecuteAsync(string! command, params object![]! args) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.ExecuteAsync(string! command, System.Collections.Generic.ICollection? args, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.GeoAddAsync(StackExchange.Redis.RedisKey key, double longitude, double latitude, StackExchange.Redis.RedisValue member, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.GeoAddAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.GeoEntry value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.GeoAddAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.GeoEntry[]! values, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.GeoDistanceAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member1, StackExchange.Redis.RedisValue member2, StackExchange.Redis.GeoUnit unit = StackExchange.Redis.GeoUnit.Meters, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.GeoHashAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.GeoHashAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! members, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.GeoPositionAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.GeoPositionAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! members, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.GeoRadiusAsync(StackExchange.Redis.RedisKey key, double longitude, double latitude, double radius, StackExchange.Redis.GeoUnit unit = StackExchange.Redis.GeoUnit.Meters, int count = -1, StackExchange.Redis.Order? order = null, StackExchange.Redis.GeoRadiusOptions options = StackExchange.Redis.GeoRadiusOptions.Default, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.GeoRadiusAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, double radius, StackExchange.Redis.GeoUnit unit = StackExchange.Redis.GeoUnit.Meters, int count = -1, StackExchange.Redis.Order? order = null, StackExchange.Redis.GeoRadiusOptions options = StackExchange.Redis.GeoRadiusOptions.Default, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.GeoRemoveAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.GeoSearchAsync(StackExchange.Redis.RedisKey key, double longitude, double latitude, StackExchange.Redis.GeoSearchShape! shape, int count = -1, bool demandClosest = true, StackExchange.Redis.Order? order = null, StackExchange.Redis.GeoRadiusOptions options = StackExchange.Redis.GeoRadiusOptions.Default, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.GeoSearchAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, StackExchange.Redis.GeoSearchShape! shape, int count = -1, bool demandClosest = true, StackExchange.Redis.Order? order = null, StackExchange.Redis.GeoRadiusOptions options = StackExchange.Redis.GeoRadiusOptions.Default, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.GeoSearchAndStoreAsync(StackExchange.Redis.RedisKey sourceKey, StackExchange.Redis.RedisKey destinationKey, double longitude, double latitude, StackExchange.Redis.GeoSearchShape! shape, int count = -1, bool demandClosest = true, StackExchange.Redis.Order? order = null, bool storeDistances = false, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.GeoSearchAndStoreAsync(StackExchange.Redis.RedisKey sourceKey, StackExchange.Redis.RedisKey destinationKey, StackExchange.Redis.RedisValue member, StackExchange.Redis.GeoSearchShape! shape, int count = -1, bool demandClosest = true, StackExchange.Redis.Order? order = null, bool storeDistances = false, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.HashDecrementAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue hashField, double value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.HashDecrementAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue hashField, long value = 1, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.HashDeleteAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue hashField, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.HashDeleteAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! hashFields, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.HashExistsAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue hashField, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! + +StackExchange.Redis.IDatabaseAsync.HashFieldExpireAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! hashFields, System.DateTime expiry, StackExchange.Redis.ExpireWhen when = StackExchange.Redis.ExpireWhen.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.HashFieldExpireAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! hashFields, System.TimeSpan expiry, StackExchange.Redis.ExpireWhen when = StackExchange.Redis.ExpireWhen.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.HashFieldGetExpireDateTimeAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! hashFields, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.HashFieldGetTimeToLiveAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! hashFields, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.HashFieldPersistAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! hashFields, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! + +StackExchange.Redis.IDatabaseAsync.HashGetAllAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.HashGetAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue hashField, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.HashGetAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! hashFields, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.HashGetLeaseAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue hashField, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task?>! +StackExchange.Redis.IDatabaseAsync.HashIncrementAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue hashField, double value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.HashIncrementAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue hashField, long value = 1, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.HashKeysAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.HashLengthAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.HashRandomFieldAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.HashRandomFieldsAsync(StackExchange.Redis.RedisKey key, long count, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.HashRandomFieldsWithValuesAsync(StackExchange.Redis.RedisKey key, long count, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.HashScanAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue pattern = default(StackExchange.Redis.RedisValue), int pageSize = 250, long cursor = 0, int pageOffset = 0, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Collections.Generic.IAsyncEnumerable! +StackExchange.Redis.IDatabaseAsync.HashScanNoValuesAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue pattern = default(StackExchange.Redis.RedisValue), int pageSize = 250, long cursor = 0, int pageOffset = 0, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Collections.Generic.IAsyncEnumerable! +StackExchange.Redis.IDatabaseAsync.HashSetAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.HashEntry[]! hashFields, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.HashSetAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue hashField, StackExchange.Redis.RedisValue value, StackExchange.Redis.When when = StackExchange.Redis.When.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.HashStringLengthAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue hashField, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.HashValuesAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.HyperLogLogAddAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.HyperLogLogAddAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! values, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.HyperLogLogLengthAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.HyperLogLogLengthAsync(StackExchange.Redis.RedisKey[]! keys, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.HyperLogLogMergeAsync(StackExchange.Redis.RedisKey destination, StackExchange.Redis.RedisKey first, StackExchange.Redis.RedisKey second, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.HyperLogLogMergeAsync(StackExchange.Redis.RedisKey destination, StackExchange.Redis.RedisKey[]! sourceKeys, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.IdentifyEndpointAsync(StackExchange.Redis.RedisKey key = default(StackExchange.Redis.RedisKey), StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.IsConnected(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IDatabaseAsync.KeyCopyAsync(StackExchange.Redis.RedisKey sourceKey, StackExchange.Redis.RedisKey destinationKey, int destinationDatabase = -1, bool replace = false, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.KeyDeleteAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.KeyDeleteAsync(StackExchange.Redis.RedisKey[]! keys, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.KeyDumpAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.KeyEncodingAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.KeyExistsAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.KeyExistsAsync(StackExchange.Redis.RedisKey[]! keys, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.KeyExpireAsync(StackExchange.Redis.RedisKey key, System.DateTime? expiry, StackExchange.Redis.CommandFlags flags) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.KeyExpireAsync(StackExchange.Redis.RedisKey key, System.DateTime? expiry, StackExchange.Redis.ExpireWhen when = StackExchange.Redis.ExpireWhen.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.KeyExpireAsync(StackExchange.Redis.RedisKey key, System.TimeSpan? expiry, StackExchange.Redis.CommandFlags flags) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.KeyExpireAsync(StackExchange.Redis.RedisKey key, System.TimeSpan? expiry, StackExchange.Redis.ExpireWhen when = StackExchange.Redis.ExpireWhen.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.KeyExpireTimeAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.KeyFrequencyAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.KeyIdleTimeAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.KeyMigrateAsync(StackExchange.Redis.RedisKey key, System.Net.EndPoint! toServer, int toDatabase = 0, int timeoutMilliseconds = 0, StackExchange.Redis.MigrateOptions migrateOptions = StackExchange.Redis.MigrateOptions.None, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.KeyMoveAsync(StackExchange.Redis.RedisKey key, int database, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.KeyPersistAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.KeyRandomAsync(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.KeyRefCountAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.KeyRenameAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisKey newKey, StackExchange.Redis.When when = StackExchange.Redis.When.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.KeyRestoreAsync(StackExchange.Redis.RedisKey key, byte[]! value, System.TimeSpan? expiry = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.KeyTimeToLiveAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.KeyTouchAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.KeyTouchAsync(StackExchange.Redis.RedisKey[]! keys, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.KeyTypeAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.ListGetByIndexAsync(StackExchange.Redis.RedisKey key, long index, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.ListInsertAfterAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue pivot, StackExchange.Redis.RedisValue value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.ListInsertBeforeAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue pivot, StackExchange.Redis.RedisValue value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.ListMoveAsync(StackExchange.Redis.RedisKey sourceKey, StackExchange.Redis.RedisKey destinationKey, StackExchange.Redis.ListSide sourceSide, StackExchange.Redis.ListSide destinationSide, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.ListLeftPopAsync(StackExchange.Redis.RedisKey key, long count, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.ListLeftPopAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.ListLeftPopAsync(StackExchange.Redis.RedisKey[]! keys, long count, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.ListLeftPushAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue value, StackExchange.Redis.When when = StackExchange.Redis.When.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.ListLeftPushAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! values, StackExchange.Redis.CommandFlags flags) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.ListLeftPushAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! values, StackExchange.Redis.When when = StackExchange.Redis.When.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.ListLengthAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.ListPositionAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue element, long rank = 1, long maxLength = 0, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.ListPositionsAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue element, long count, long rank = 1, long maxLength = 0, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.ListRangeAsync(StackExchange.Redis.RedisKey key, long start = 0, long stop = -1, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.ListRemoveAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue value, long count = 0, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.ListRightPopAsync(StackExchange.Redis.RedisKey key, long count, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.ListRightPopAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.ListRightPopAsync(StackExchange.Redis.RedisKey[]! keys, long count, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.ListRightPopLeftPushAsync(StackExchange.Redis.RedisKey source, StackExchange.Redis.RedisKey destination, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.ListRightPushAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue value, StackExchange.Redis.When when = StackExchange.Redis.When.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.ListRightPushAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! values, StackExchange.Redis.CommandFlags flags) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.ListRightPushAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! values, StackExchange.Redis.When when = StackExchange.Redis.When.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.ListSetByIndexAsync(StackExchange.Redis.RedisKey key, long index, StackExchange.Redis.RedisValue value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.ListTrimAsync(StackExchange.Redis.RedisKey key, long start, long stop, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.LockExtendAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue value, System.TimeSpan expiry, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.LockQueryAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.LockReleaseAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.LockTakeAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue value, System.TimeSpan expiry, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.PublishAsync(StackExchange.Redis.RedisChannel channel, StackExchange.Redis.RedisValue message, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.ScriptEvaluateAsync(byte[]! hash, StackExchange.Redis.RedisKey[]? keys = null, StackExchange.Redis.RedisValue[]? values = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.ScriptEvaluateAsync(StackExchange.Redis.LoadedLuaScript! script, object? parameters = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.ScriptEvaluateAsync(StackExchange.Redis.LuaScript! script, object? parameters = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.ScriptEvaluateAsync(string! script, StackExchange.Redis.RedisKey[]? keys = null, StackExchange.Redis.RedisValue[]? values = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.ScriptEvaluateReadOnlyAsync(byte[]! hash, StackExchange.Redis.RedisKey[]? keys = null, StackExchange.Redis.RedisValue[]? values = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.ScriptEvaluateReadOnlyAsync(string! script, StackExchange.Redis.RedisKey[]? keys = null, StackExchange.Redis.RedisValue[]? values = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SetAddAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SetAddAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! values, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SetCombineAndStoreAsync(StackExchange.Redis.SetOperation operation, StackExchange.Redis.RedisKey destination, StackExchange.Redis.RedisKey first, StackExchange.Redis.RedisKey second, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SetCombineAndStoreAsync(StackExchange.Redis.SetOperation operation, StackExchange.Redis.RedisKey destination, StackExchange.Redis.RedisKey[]! keys, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SetCombineAsync(StackExchange.Redis.SetOperation operation, StackExchange.Redis.RedisKey first, StackExchange.Redis.RedisKey second, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SetCombineAsync(StackExchange.Redis.SetOperation operation, StackExchange.Redis.RedisKey[]! keys, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SetContainsAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SetContainsAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! values, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SetIntersectionLengthAsync(StackExchange.Redis.RedisKey[]! keys, long limit = 0, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SetLengthAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SetMembersAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SetMoveAsync(StackExchange.Redis.RedisKey source, StackExchange.Redis.RedisKey destination, StackExchange.Redis.RedisValue value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SetPopAsync(StackExchange.Redis.RedisKey key, long count, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SetPopAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SetRandomMemberAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SetRandomMembersAsync(StackExchange.Redis.RedisKey key, long count, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SetRemoveAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SetRemoveAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! values, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SetScanAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue pattern = default(StackExchange.Redis.RedisValue), int pageSize = 250, long cursor = 0, int pageOffset = 0, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Collections.Generic.IAsyncEnumerable! +StackExchange.Redis.IDatabaseAsync.SortAndStoreAsync(StackExchange.Redis.RedisKey destination, StackExchange.Redis.RedisKey key, long skip = 0, long take = -1, StackExchange.Redis.Order order = StackExchange.Redis.Order.Ascending, StackExchange.Redis.SortType sortType = StackExchange.Redis.SortType.Numeric, StackExchange.Redis.RedisValue by = default(StackExchange.Redis.RedisValue), StackExchange.Redis.RedisValue[]? get = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortAsync(StackExchange.Redis.RedisKey key, long skip = 0, long take = -1, StackExchange.Redis.Order order = StackExchange.Redis.Order.Ascending, StackExchange.Redis.SortType sortType = StackExchange.Redis.SortType.Numeric, StackExchange.Redis.RedisValue by = default(StackExchange.Redis.RedisValue), StackExchange.Redis.RedisValue[]? get = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortedSetAddAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, double score, StackExchange.Redis.CommandFlags flags) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortedSetAddAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, double score, StackExchange.Redis.When when, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortedSetAddAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, double score, StackExchange.Redis.SortedSetWhen when = StackExchange.Redis.SortedSetWhen.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortedSetAddAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.SortedSetEntry[]! values, StackExchange.Redis.CommandFlags flags) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortedSetAddAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.SortedSetEntry[]! values, StackExchange.Redis.When when, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortedSetAddAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.SortedSetEntry[]! values, StackExchange.Redis.SortedSetWhen when = StackExchange.Redis.SortedSetWhen.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortedSetCombineAsync(StackExchange.Redis.SetOperation operation, StackExchange.Redis.RedisKey[]! keys, double[]? weights = null, StackExchange.Redis.Aggregate aggregate = StackExchange.Redis.Aggregate.Sum, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortedSetCombineWithScoresAsync(StackExchange.Redis.SetOperation operation, StackExchange.Redis.RedisKey[]! keys, double[]? weights = null, StackExchange.Redis.Aggregate aggregate = StackExchange.Redis.Aggregate.Sum, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortedSetCombineAndStoreAsync(StackExchange.Redis.SetOperation operation, StackExchange.Redis.RedisKey destination, StackExchange.Redis.RedisKey first, StackExchange.Redis.RedisKey second, StackExchange.Redis.Aggregate aggregate = StackExchange.Redis.Aggregate.Sum, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortedSetCombineAndStoreAsync(StackExchange.Redis.SetOperation operation, StackExchange.Redis.RedisKey destination, StackExchange.Redis.RedisKey[]! keys, double[]? weights = null, StackExchange.Redis.Aggregate aggregate = StackExchange.Redis.Aggregate.Sum, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortedSetDecrementAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, double value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortedSetIncrementAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, double value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortedSetIntersectionLengthAsync(StackExchange.Redis.RedisKey[]! keys, long limit = 0, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortedSetLengthAsync(StackExchange.Redis.RedisKey key, double min = -Infinity, double max = Infinity, StackExchange.Redis.Exclude exclude = StackExchange.Redis.Exclude.None, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortedSetLengthByValueAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue min, StackExchange.Redis.RedisValue max, StackExchange.Redis.Exclude exclude = StackExchange.Redis.Exclude.None, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortedSetPopAsync(StackExchange.Redis.RedisKey key, long count, StackExchange.Redis.Order order = StackExchange.Redis.Order.Ascending, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortedSetPopAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.Order order = StackExchange.Redis.Order.Ascending, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortedSetPopAsync(StackExchange.Redis.RedisKey[]! keys, long count, StackExchange.Redis.Order order = StackExchange.Redis.Order.Ascending, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortedSetRandomMemberAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortedSetRandomMembersAsync(StackExchange.Redis.RedisKey key, long count, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortedSetRandomMembersWithScoresAsync(StackExchange.Redis.RedisKey key, long count, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortedSetRangeAndStoreAsync(StackExchange.Redis.RedisKey sourceKey, StackExchange.Redis.RedisKey destinationKey, StackExchange.Redis.RedisValue start, StackExchange.Redis.RedisValue stop, StackExchange.Redis.SortedSetOrder sortedSetOrder = StackExchange.Redis.SortedSetOrder.ByRank, StackExchange.Redis.Exclude exclude = StackExchange.Redis.Exclude.None, StackExchange.Redis.Order order = StackExchange.Redis.Order.Ascending, long skip = 0, long? take = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortedSetRangeByRankAsync(StackExchange.Redis.RedisKey key, long start = 0, long stop = -1, StackExchange.Redis.Order order = StackExchange.Redis.Order.Ascending, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortedSetRangeByRankWithScoresAsync(StackExchange.Redis.RedisKey key, long start = 0, long stop = -1, StackExchange.Redis.Order order = StackExchange.Redis.Order.Ascending, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortedSetRangeByScoreAsync(StackExchange.Redis.RedisKey key, double start = -Infinity, double stop = Infinity, StackExchange.Redis.Exclude exclude = StackExchange.Redis.Exclude.None, StackExchange.Redis.Order order = StackExchange.Redis.Order.Ascending, long skip = 0, long take = -1, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortedSetRangeByScoreWithScoresAsync(StackExchange.Redis.RedisKey key, double start = -Infinity, double stop = Infinity, StackExchange.Redis.Exclude exclude = StackExchange.Redis.Exclude.None, StackExchange.Redis.Order order = StackExchange.Redis.Order.Ascending, long skip = 0, long take = -1, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortedSetRangeByValueAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue min = default(StackExchange.Redis.RedisValue), StackExchange.Redis.RedisValue max = default(StackExchange.Redis.RedisValue), StackExchange.Redis.Exclude exclude = StackExchange.Redis.Exclude.None, StackExchange.Redis.Order order = StackExchange.Redis.Order.Ascending, long skip = 0, long take = -1, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortedSetRangeByValueAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue min, StackExchange.Redis.RedisValue max, StackExchange.Redis.Exclude exclude, long skip, long take = -1, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortedSetRankAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, StackExchange.Redis.Order order = StackExchange.Redis.Order.Ascending, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortedSetRemoveAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortedSetRemoveAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! members, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortedSetRemoveRangeByRankAsync(StackExchange.Redis.RedisKey key, long start, long stop, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortedSetRemoveRangeByScoreAsync(StackExchange.Redis.RedisKey key, double start, double stop, StackExchange.Redis.Exclude exclude = StackExchange.Redis.Exclude.None, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortedSetRemoveRangeByValueAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue min, StackExchange.Redis.RedisValue max, StackExchange.Redis.Exclude exclude = StackExchange.Redis.Exclude.None, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortedSetScanAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue pattern = default(StackExchange.Redis.RedisValue), int pageSize = 250, long cursor = 0, int pageOffset = 0, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Collections.Generic.IAsyncEnumerable! +StackExchange.Redis.IDatabaseAsync.SortedSetScoreAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortedSetScoresAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! members, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortedSetUpdateAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, double score, StackExchange.Redis.SortedSetWhen when = StackExchange.Redis.SortedSetWhen.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.SortedSetUpdateAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.SortedSetEntry[]! values, StackExchange.Redis.SortedSetWhen when = StackExchange.Redis.SortedSetWhen.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StreamAcknowledgeAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue groupName, StackExchange.Redis.RedisValue messageId, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StreamAcknowledgeAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue groupName, StackExchange.Redis.RedisValue[]! messageIds, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StreamAcknowledgeAndDeleteAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue groupName, StackExchange.Redis.StreamTrimMode mode, StackExchange.Redis.RedisValue messageId, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StreamAcknowledgeAndDeleteAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue groupName, StackExchange.Redis.StreamTrimMode mode, StackExchange.Redis.RedisValue[]! messageIds, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StreamAddAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.NameValueEntry[]! streamPairs, StackExchange.Redis.RedisValue? messageId = null, long? maxLength = null, bool useApproximateMaxLength = false, long? limit = null, StackExchange.Redis.StreamTrimMode trimMode = StackExchange.Redis.StreamTrimMode.KeepReferences, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StreamAddAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.NameValueEntry[]! streamPairs, StackExchange.Redis.RedisValue? messageId, int? maxLength, bool useApproximateMaxLength, StackExchange.Redis.CommandFlags flags) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StreamAddAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue streamField, StackExchange.Redis.RedisValue streamValue, StackExchange.Redis.RedisValue? messageId = null, long? maxLength = null, bool useApproximateMaxLength = false, long? limit = null, StackExchange.Redis.StreamTrimMode trimMode = StackExchange.Redis.StreamTrimMode.KeepReferences, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StreamAddAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue streamField, StackExchange.Redis.RedisValue streamValue, StackExchange.Redis.RedisValue? messageId, int? maxLength, bool useApproximateMaxLength, StackExchange.Redis.CommandFlags flags) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StreamAutoClaimAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue consumerGroup, StackExchange.Redis.RedisValue claimingConsumer, long minIdleTimeInMs, StackExchange.Redis.RedisValue startAtId, int? count = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StreamAutoClaimIdsOnlyAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue consumerGroup, StackExchange.Redis.RedisValue claimingConsumer, long minIdleTimeInMs, StackExchange.Redis.RedisValue startAtId, int? count = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StreamClaimAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue consumerGroup, StackExchange.Redis.RedisValue claimingConsumer, long minIdleTimeInMs, StackExchange.Redis.RedisValue[]! messageIds, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StreamClaimIdsOnlyAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue consumerGroup, StackExchange.Redis.RedisValue claimingConsumer, long minIdleTimeInMs, StackExchange.Redis.RedisValue[]! messageIds, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StreamConsumerGroupSetPositionAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue groupName, StackExchange.Redis.RedisValue position, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StreamConsumerInfoAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue groupName, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StreamCreateConsumerGroupAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue groupName, StackExchange.Redis.RedisValue? position = null, bool createStream = true, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StreamCreateConsumerGroupAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue groupName, StackExchange.Redis.RedisValue? position, StackExchange.Redis.CommandFlags flags) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StreamDeleteAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! messageIds, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StreamDeleteAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! messageIds, StackExchange.Redis.StreamTrimMode mode, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StreamDeleteConsumerAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue groupName, StackExchange.Redis.RedisValue consumerName, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StreamDeleteConsumerGroupAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue groupName, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StreamGroupInfoAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StreamInfoAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StreamLengthAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StreamPendingAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue groupName, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StreamPendingMessagesAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue groupName, int count, StackExchange.Redis.RedisValue consumerName, StackExchange.Redis.RedisValue? minId, StackExchange.Redis.RedisValue? maxId, StackExchange.Redis.CommandFlags flags) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StreamPendingMessagesAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue groupName, int count, StackExchange.Redis.RedisValue consumerName, StackExchange.Redis.RedisValue? minId = null, StackExchange.Redis.RedisValue? maxId = null, long? minIdleTimeInMs = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StreamRangeAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue? minId = null, StackExchange.Redis.RedisValue? maxId = null, int? count = null, StackExchange.Redis.Order messageOrder = StackExchange.Redis.Order.Ascending, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StreamReadAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue position, int? count = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StreamReadAsync(StackExchange.Redis.StreamPosition[]! streamPositions, int? countPerStream = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StreamReadGroupAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue groupName, StackExchange.Redis.RedisValue consumerName, StackExchange.Redis.RedisValue? position, int? count, bool noAck, StackExchange.Redis.CommandFlags flags) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StreamReadGroupAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue groupName, StackExchange.Redis.RedisValue consumerName, StackExchange.Redis.RedisValue? position = null, int? count = null, bool noAck = false, System.TimeSpan? claimMinIdleTime = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StreamReadGroupAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue groupName, StackExchange.Redis.RedisValue consumerName, StackExchange.Redis.RedisValue? position, int? count, StackExchange.Redis.CommandFlags flags) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StreamReadGroupAsync(StackExchange.Redis.StreamPosition[]! streamPositions, StackExchange.Redis.RedisValue groupName, StackExchange.Redis.RedisValue consumerName, int? countPerStream, StackExchange.Redis.CommandFlags flags) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StreamTrimAsync(StackExchange.Redis.RedisKey key, int maxLength, bool useApproximateMaxLength, StackExchange.Redis.CommandFlags flags) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StreamTrimAsync(StackExchange.Redis.RedisKey key, long maxLength, bool useApproximateMaxLength = false, long? limit = null, StackExchange.Redis.StreamTrimMode mode = StackExchange.Redis.StreamTrimMode.KeepReferences, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StreamTrimByMinIdAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue minId, bool useApproximateMaxLength = false, long? limit = null, StackExchange.Redis.StreamTrimMode mode = StackExchange.Redis.StreamTrimMode.KeepReferences, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StringAppendAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StringBitCountAsync(StackExchange.Redis.RedisKey key, long start, long end, StackExchange.Redis.CommandFlags flags) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StringBitCountAsync(StackExchange.Redis.RedisKey key, long start = 0, long end = -1, StackExchange.Redis.StringIndexType indexType = StackExchange.Redis.StringIndexType.Byte, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StringBitOperationAsync(StackExchange.Redis.Bitwise operation, StackExchange.Redis.RedisKey destination, StackExchange.Redis.RedisKey first, StackExchange.Redis.RedisKey second = default(StackExchange.Redis.RedisKey), StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StringBitOperationAsync(StackExchange.Redis.Bitwise operation, StackExchange.Redis.RedisKey destination, StackExchange.Redis.RedisKey[]! keys, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StringBitPositionAsync(StackExchange.Redis.RedisKey key, bool bit, long start, long end, StackExchange.Redis.CommandFlags flags) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StringBitPositionAsync(StackExchange.Redis.RedisKey key, bool bit, long start = 0, long end = -1, StackExchange.Redis.StringIndexType indexType = StackExchange.Redis.StringIndexType.Byte, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StringDecrementAsync(StackExchange.Redis.RedisKey key, double value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StringDecrementAsync(StackExchange.Redis.RedisKey key, long value = 1, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StringGetAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StringGetAsync(StackExchange.Redis.RedisKey[]! keys, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StringGetBitAsync(StackExchange.Redis.RedisKey key, long offset, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StringGetDeleteAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StringGetLeaseAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task?>! +StackExchange.Redis.IDatabaseAsync.StringGetRangeAsync(StackExchange.Redis.RedisKey key, long start, long end, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StringGetSetAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StringGetSetExpiryAsync(StackExchange.Redis.RedisKey key, System.TimeSpan? expiry, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StringGetSetExpiryAsync(StackExchange.Redis.RedisKey key, System.DateTime expiry, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StringGetWithExpiryAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StringIncrementAsync(StackExchange.Redis.RedisKey key, double value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StringIncrementAsync(StackExchange.Redis.RedisKey key, long value = 1, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StringLengthAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StringLongestCommonSubsequenceAsync(StackExchange.Redis.RedisKey first, StackExchange.Redis.RedisKey second, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StringLongestCommonSubsequenceLengthAsync(StackExchange.Redis.RedisKey first, StackExchange.Redis.RedisKey second, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StringLongestCommonSubsequenceWithMatchesAsync(StackExchange.Redis.RedisKey first, StackExchange.Redis.RedisKey second, long minLength = 0, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StringSetAndGetAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue value, System.TimeSpan? expiry = null, bool keepTtl = false, StackExchange.Redis.When when = StackExchange.Redis.When.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StringSetAndGetAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue value, System.TimeSpan? expiry, StackExchange.Redis.When when, StackExchange.Redis.CommandFlags flags) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StringSetAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue value, System.TimeSpan? expiry, StackExchange.Redis.When when) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StringSetAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue value, System.TimeSpan? expiry, StackExchange.Redis.When when, StackExchange.Redis.CommandFlags flags) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StringSetAsync(System.Collections.Generic.KeyValuePair[]! values, StackExchange.Redis.When when, StackExchange.Redis.CommandFlags flags) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StringSetBitAsync(StackExchange.Redis.RedisKey key, long offset, bool bit, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StringSetRangeAsync(StackExchange.Redis.RedisKey key, long offset, StackExchange.Redis.RedisValue value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.InternalErrorEventArgs +StackExchange.Redis.InternalErrorEventArgs.ConnectionType.get -> StackExchange.Redis.ConnectionType +StackExchange.Redis.InternalErrorEventArgs.EndPoint.get -> System.Net.EndPoint? +StackExchange.Redis.InternalErrorEventArgs.Exception.get -> System.Exception! +StackExchange.Redis.InternalErrorEventArgs.InternalErrorEventArgs(object! sender, System.Net.EndPoint! endpoint, StackExchange.Redis.ConnectionType connectionType, System.Exception! exception, string! origin) -> void +StackExchange.Redis.InternalErrorEventArgs.Origin.get -> string? +StackExchange.Redis.IReconnectRetryPolicy +StackExchange.Redis.IReconnectRetryPolicy.ShouldRetry(long currentRetryCount, int timeElapsedMillisecondsSinceLastRetry) -> bool +StackExchange.Redis.IRedis +StackExchange.Redis.IRedis.Ping(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.TimeSpan +StackExchange.Redis.IRedisAsync +StackExchange.Redis.IRedisAsync.Multiplexer.get -> StackExchange.Redis.IConnectionMultiplexer! +StackExchange.Redis.IRedisAsync.PingAsync(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IRedisAsync.TryWait(System.Threading.Tasks.Task! task) -> bool +StackExchange.Redis.IRedisAsync.Wait(System.Threading.Tasks.Task! task) -> void +StackExchange.Redis.IRedisAsync.Wait(System.Threading.Tasks.Task! task) -> T +StackExchange.Redis.IRedisAsync.WaitAll(params System.Threading.Tasks.Task![]! tasks) -> void +StackExchange.Redis.IScanningCursor +StackExchange.Redis.IScanningCursor.Cursor.get -> long +StackExchange.Redis.IScanningCursor.PageOffset.get -> int +StackExchange.Redis.IScanningCursor.PageSize.get -> int +StackExchange.Redis.IServer +StackExchange.Redis.IServer.AllowReplicaWrites.get -> bool +StackExchange.Redis.IServer.AllowReplicaWrites.set -> void +StackExchange.Redis.IServer.AllowSlaveWrites.get -> bool +StackExchange.Redis.IServer.AllowSlaveWrites.set -> void +StackExchange.Redis.IServer.ClientKill(long? id = null, StackExchange.Redis.ClientType? clientType = null, System.Net.EndPoint? endpoint = null, bool skipMe = true, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IServer.ClientKill(System.Net.EndPoint! endpoint, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> void +StackExchange.Redis.IServer.ClientKill(StackExchange.Redis.ClientKillFilter! filter, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IServer.ClientKillAsync(long? id = null, StackExchange.Redis.ClientType? clientType = null, System.Net.EndPoint? endpoint = null, bool skipMe = true, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.ClientKillAsync(System.Net.EndPoint! endpoint, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.ClientKillAsync(StackExchange.Redis.ClientKillFilter! filter, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.ClientList(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.ClientInfo![]! +StackExchange.Redis.IServer.ClientListAsync(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.ClusterConfiguration.get -> StackExchange.Redis.ClusterConfiguration? +StackExchange.Redis.IServer.ClusterNodes(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.ClusterConfiguration? +StackExchange.Redis.IServer.ClusterNodesAsync(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.ClusterNodesRaw(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> string? +StackExchange.Redis.IServer.ClusterNodesRawAsync(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.ConfigGet(StackExchange.Redis.RedisValue pattern = default(StackExchange.Redis.RedisValue), StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Collections.Generic.KeyValuePair[]! +StackExchange.Redis.IServer.ConfigGetAsync(StackExchange.Redis.RedisValue pattern = default(StackExchange.Redis.RedisValue), StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task[]!>! +StackExchange.Redis.IServer.ConfigResetStatistics(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> void +StackExchange.Redis.IServer.ConfigResetStatisticsAsync(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.ConfigRewrite(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> void +StackExchange.Redis.IServer.ConfigRewriteAsync(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.ConfigSet(StackExchange.Redis.RedisValue setting, StackExchange.Redis.RedisValue value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> void +StackExchange.Redis.IServer.ConfigSetAsync(StackExchange.Redis.RedisValue setting, StackExchange.Redis.RedisValue value, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.CommandCount(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IServer.CommandCountAsync(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.CommandGetKeys(StackExchange.Redis.RedisValue[]! command, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisKey[]! +StackExchange.Redis.IServer.CommandGetKeysAsync(StackExchange.Redis.RedisValue[]! command, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.CommandList(StackExchange.Redis.RedisValue? moduleName = null, StackExchange.Redis.RedisValue? category = null, StackExchange.Redis.RedisValue? pattern = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> string![]! +StackExchange.Redis.IServer.CommandListAsync(StackExchange.Redis.RedisValue? moduleName = null, StackExchange.Redis.RedisValue? category = null, StackExchange.Redis.RedisValue? pattern = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.DatabaseCount.get -> int +StackExchange.Redis.IServer.DatabaseSize(int database = -1, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IServer.DatabaseSizeAsync(int database = -1, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.Echo(StackExchange.Redis.RedisValue message, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue +StackExchange.Redis.IServer.EchoAsync(StackExchange.Redis.RedisValue message, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.EndPoint.get -> System.Net.EndPoint! +StackExchange.Redis.IServer.Execute(string! command, params object![]! args) -> StackExchange.Redis.RedisResult! +StackExchange.Redis.IServer.Execute(string! command, System.Collections.Generic.ICollection! args, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisResult! +StackExchange.Redis.IServer.ExecuteAsync(string! command, params object![]! args) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.ExecuteAsync(string! command, System.Collections.Generic.ICollection! args, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.Features.get -> StackExchange.Redis.RedisFeatures +StackExchange.Redis.IServer.FlushAllDatabases(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> void +StackExchange.Redis.IServer.FlushAllDatabasesAsync(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.FlushDatabase(int database = -1, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> void +StackExchange.Redis.IServer.FlushDatabaseAsync(int database = -1, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.GetCounters() -> StackExchange.Redis.ServerCounters! +StackExchange.Redis.IServer.Info(StackExchange.Redis.RedisValue section = default(StackExchange.Redis.RedisValue), StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Linq.IGrouping>![]! +StackExchange.Redis.IServer.InfoAsync(StackExchange.Redis.RedisValue section = default(StackExchange.Redis.RedisValue), StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task>![]!>! +StackExchange.Redis.IServer.InfoRaw(StackExchange.Redis.RedisValue section = default(StackExchange.Redis.RedisValue), StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> string? +StackExchange.Redis.IServer.InfoRawAsync(StackExchange.Redis.RedisValue section = default(StackExchange.Redis.RedisValue), StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.IsConnected.get -> bool +StackExchange.Redis.IServer.IsReplica.get -> bool +StackExchange.Redis.IServer.IsSlave.get -> bool +StackExchange.Redis.IServer.Keys(int database = -1, StackExchange.Redis.RedisValue pattern = default(StackExchange.Redis.RedisValue), int pageSize = 250, long cursor = 0, int pageOffset = 0, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Collections.Generic.IEnumerable! +StackExchange.Redis.IServer.Keys(int database, StackExchange.Redis.RedisValue pattern, int pageSize, StackExchange.Redis.CommandFlags flags) -> System.Collections.Generic.IEnumerable! +StackExchange.Redis.IServer.KeysAsync(int database = -1, StackExchange.Redis.RedisValue pattern = default(StackExchange.Redis.RedisValue), int pageSize = 250, long cursor = 0, int pageOffset = 0, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Collections.Generic.IAsyncEnumerable! +StackExchange.Redis.IServer.LastSave(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.DateTime +StackExchange.Redis.IServer.LastSaveAsync(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.LatencyDoctor(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> string! +StackExchange.Redis.IServer.LatencyDoctorAsync(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.LatencyHistory(string! eventName, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.LatencyHistoryEntry[]! +StackExchange.Redis.IServer.LatencyHistoryAsync(string! eventName, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.LatencyLatest(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.LatencyLatestEntry[]! +StackExchange.Redis.IServer.LatencyLatestAsync(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.LatencyReset(string![]? eventNames = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IServer.LatencyResetAsync(string![]? eventNames = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.MakeMaster(StackExchange.Redis.ReplicationChangeOptions options, System.IO.TextWriter? log = null) -> void +StackExchange.Redis.IServer.MakePrimaryAsync(StackExchange.Redis.ReplicationChangeOptions options, System.IO.TextWriter? log = null) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.MemoryAllocatorStats(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> string? +StackExchange.Redis.IServer.MemoryAllocatorStatsAsync(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.MemoryDoctor(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> string! +StackExchange.Redis.IServer.MemoryDoctorAsync(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.MemoryPurge(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> void +StackExchange.Redis.IServer.MemoryPurgeAsync(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.MemoryStats(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisResult! +StackExchange.Redis.IServer.MemoryStatsAsync(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.ReplicaOf(System.Net.EndPoint! master, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> void +StackExchange.Redis.IServer.ReplicaOfAsync(System.Net.EndPoint! master, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.Role(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.Role! +StackExchange.Redis.IServer.RoleAsync(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.Save(StackExchange.Redis.SaveType type, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> void +StackExchange.Redis.IServer.SaveAsync(StackExchange.Redis.SaveType type, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.ScriptExists(byte[]! sha1, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IServer.ScriptExists(string! script, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IServer.ScriptExistsAsync(byte[]! sha1, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.ScriptExistsAsync(string! script, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.ScriptFlush(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> void +StackExchange.Redis.IServer.ScriptFlushAsync(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.ScriptLoad(StackExchange.Redis.LuaScript! script, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.LoadedLuaScript! +StackExchange.Redis.IServer.ScriptLoad(string! script, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> byte[]! +StackExchange.Redis.IServer.ScriptLoadAsync(StackExchange.Redis.LuaScript! script, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.ScriptLoadAsync(string! script, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.SentinelFailover(string! serviceName, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> void +StackExchange.Redis.IServer.SentinelFailoverAsync(string! serviceName, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.SentinelGetMasterAddressByName(string! serviceName, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Net.EndPoint? +StackExchange.Redis.IServer.SentinelGetMasterAddressByNameAsync(string! serviceName, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.SentinelGetReplicaAddresses(string! serviceName, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Net.EndPoint![]! +StackExchange.Redis.IServer.SentinelGetReplicaAddressesAsync(string! serviceName, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.SentinelGetSentinelAddresses(string! serviceName, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Net.EndPoint![]! +StackExchange.Redis.IServer.SentinelGetSentinelAddressesAsync(string! serviceName, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.SentinelMaster(string! serviceName, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Collections.Generic.KeyValuePair[]! +StackExchange.Redis.IServer.SentinelMasterAsync(string! serviceName, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task[]!>! +StackExchange.Redis.IServer.SentinelMasters(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Collections.Generic.KeyValuePair[]![]! +StackExchange.Redis.IServer.SentinelMastersAsync(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task[]![]!>! +StackExchange.Redis.IServer.SentinelReplicas(string! serviceName, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Collections.Generic.KeyValuePair[]![]! +StackExchange.Redis.IServer.SentinelReplicasAsync(string! serviceName, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task[]![]!>! +StackExchange.Redis.IServer.SentinelSentinels(string! serviceName, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Collections.Generic.KeyValuePair[]![]! +StackExchange.Redis.IServer.SentinelSentinelsAsync(string! serviceName, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task[]![]!>! +StackExchange.Redis.IServer.SentinelSlaves(string! serviceName, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Collections.Generic.KeyValuePair[]![]! +StackExchange.Redis.IServer.SentinelSlavesAsync(string! serviceName, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task[]![]!>! +StackExchange.Redis.IServer.ServerType.get -> StackExchange.Redis.ServerType +StackExchange.Redis.IServer.Shutdown(StackExchange.Redis.ShutdownMode shutdownMode = StackExchange.Redis.ShutdownMode.Default, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> void +StackExchange.Redis.IServer.SlaveOf(System.Net.EndPoint! master, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> void +StackExchange.Redis.IServer.SlaveOfAsync(System.Net.EndPoint! master, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.SlowlogGet(int count = 0, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.CommandTrace![]! +StackExchange.Redis.IServer.SlowlogGetAsync(int count = 0, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.SlowlogReset(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> void +StackExchange.Redis.IServer.SlowlogResetAsync(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.SubscriptionChannels(StackExchange.Redis.RedisChannel pattern = default(StackExchange.Redis.RedisChannel), StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisChannel[]! +StackExchange.Redis.IServer.SubscriptionChannelsAsync(StackExchange.Redis.RedisChannel pattern = default(StackExchange.Redis.RedisChannel), StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.SubscriptionPatternCount(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IServer.SubscriptionPatternCountAsync(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.SubscriptionSubscriberCount(StackExchange.Redis.RedisChannel channel, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.IServer.SubscriptionSubscriberCountAsync(StackExchange.Redis.RedisChannel channel, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.SwapDatabases(int first, int second, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> void +StackExchange.Redis.IServer.SwapDatabasesAsync(int first, int second, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.Time(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.DateTime +StackExchange.Redis.IServer.TimeAsync(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IServer.Version.get -> System.Version! +StackExchange.Redis.ISubscriber +StackExchange.Redis.ISubscriber.IdentifyEndpoint(StackExchange.Redis.RedisChannel channel, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Net.EndPoint? +StackExchange.Redis.ISubscriber.IdentifyEndpointAsync(StackExchange.Redis.RedisChannel channel, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.ISubscriber.IsConnected(StackExchange.Redis.RedisChannel channel = default(StackExchange.Redis.RedisChannel)) -> bool +StackExchange.Redis.ISubscriber.Publish(StackExchange.Redis.RedisChannel channel, StackExchange.Redis.RedisValue message, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +StackExchange.Redis.ISubscriber.PublishAsync(StackExchange.Redis.RedisChannel channel, StackExchange.Redis.RedisValue message, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.ISubscriber.Subscribe(StackExchange.Redis.RedisChannel channel, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.ChannelMessageQueue! +StackExchange.Redis.ISubscriber.Subscribe(StackExchange.Redis.RedisChannel channel, System.Action! handler, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> void +StackExchange.Redis.ISubscriber.SubscribeAsync(StackExchange.Redis.RedisChannel channel, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.ISubscriber.SubscribeAsync(StackExchange.Redis.RedisChannel channel, System.Action! handler, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.ISubscriber.SubscribedEndpoint(StackExchange.Redis.RedisChannel channel) -> System.Net.EndPoint? +StackExchange.Redis.ISubscriber.Unsubscribe(StackExchange.Redis.RedisChannel channel, System.Action? handler = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> void +StackExchange.Redis.ISubscriber.UnsubscribeAll(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> void +StackExchange.Redis.ISubscriber.UnsubscribeAllAsync(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.ISubscriber.UnsubscribeAsync(StackExchange.Redis.RedisChannel channel, System.Action? handler = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.ITransaction +StackExchange.Redis.ITransaction.AddCondition(StackExchange.Redis.Condition! condition) -> StackExchange.Redis.ConditionResult! +StackExchange.Redis.ITransaction.Execute(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.ITransaction.ExecuteAsync(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.KeyspaceIsolation.DatabaseExtensions +StackExchange.Redis.LatencyHistoryEntry +StackExchange.Redis.LatencyHistoryEntry.DurationMilliseconds.get -> int +StackExchange.Redis.LatencyHistoryEntry.LatencyHistoryEntry() -> void +StackExchange.Redis.LatencyHistoryEntry.Timestamp.get -> System.DateTime +StackExchange.Redis.LatencyLatestEntry +StackExchange.Redis.LatencyLatestEntry.DurationMilliseconds.get -> int +StackExchange.Redis.LatencyLatestEntry.EventName.get -> string! +StackExchange.Redis.LatencyLatestEntry.LatencyLatestEntry() -> void +StackExchange.Redis.LatencyLatestEntry.MaxDurationMilliseconds.get -> int +StackExchange.Redis.LatencyLatestEntry.Timestamp.get -> System.DateTime +StackExchange.Redis.Lease +StackExchange.Redis.Lease.ArraySegment.get -> System.ArraySegment +StackExchange.Redis.Lease.Dispose() -> void +StackExchange.Redis.Lease.Length.get -> int +StackExchange.Redis.Lease.Memory.get -> System.Memory +StackExchange.Redis.Lease.Span.get -> System.Span +StackExchange.Redis.LinearRetry +StackExchange.Redis.LinearRetry.LinearRetry(int maxRetryElapsedTimeAllowedMilliseconds) -> void +StackExchange.Redis.LinearRetry.ShouldRetry(long currentRetryCount, int timeElapsedMillisecondsSinceLastRetry) -> bool +StackExchange.Redis.LoadedLuaScript +StackExchange.Redis.LoadedLuaScript.Evaluate(StackExchange.Redis.IDatabase! db, object? ps = null, StackExchange.Redis.RedisKey? withKeyPrefix = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisResult! +StackExchange.Redis.LoadedLuaScript.EvaluateAsync(StackExchange.Redis.IDatabaseAsync! db, object? ps = null, StackExchange.Redis.RedisKey? withKeyPrefix = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.LoadedLuaScript.ExecutableScript.get -> string! +StackExchange.Redis.LoadedLuaScript.Hash.get -> byte[]! +StackExchange.Redis.LoadedLuaScript.OriginalScript.get -> string! +StackExchange.Redis.LCSMatchResult +StackExchange.Redis.LCSMatchResult.IsEmpty.get -> bool +StackExchange.Redis.LCSMatchResult.LCSMatchResult() -> void +StackExchange.Redis.LCSMatchResult.LongestMatchLength.get -> long +StackExchange.Redis.LCSMatchResult.Matches.get -> StackExchange.Redis.LCSMatchResult.LCSMatch[]! +StackExchange.Redis.LCSMatchResult.LCSMatch +StackExchange.Redis.LCSMatchResult.LCSMatch.LCSMatch() -> void +StackExchange.Redis.LCSMatchResult.LCSMatch.FirstStringIndex.get -> long +StackExchange.Redis.LCSMatchResult.LCSMatch.SecondStringIndex.get -> long +StackExchange.Redis.LCSMatchResult.LCSMatch.Length.get -> long +StackExchange.Redis.ListPopResult +StackExchange.Redis.ListPopResult.IsNull.get -> bool +StackExchange.Redis.ListPopResult.Key.get -> StackExchange.Redis.RedisKey +StackExchange.Redis.ListPopResult.ListPopResult() -> void +StackExchange.Redis.ListPopResult.Values.get -> StackExchange.Redis.RedisValue[]! +StackExchange.Redis.ListSide +StackExchange.Redis.ListSide.Left = 0 -> StackExchange.Redis.ListSide +StackExchange.Redis.ListSide.Right = 1 -> StackExchange.Redis.ListSide +StackExchange.Redis.LuaScript +StackExchange.Redis.LuaScript.Evaluate(StackExchange.Redis.IDatabase! db, object? ps = null, StackExchange.Redis.RedisKey? withKeyPrefix = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisResult! +StackExchange.Redis.LuaScript.EvaluateAsync(StackExchange.Redis.IDatabaseAsync! db, object? ps = null, StackExchange.Redis.RedisKey? withKeyPrefix = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.LuaScript.ExecutableScript.get -> string! +StackExchange.Redis.LuaScript.Load(StackExchange.Redis.IServer! server, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.LoadedLuaScript! +StackExchange.Redis.LuaScript.LoadAsync(StackExchange.Redis.IServer! server, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.LuaScript.OriginalScript.get -> string! +StackExchange.Redis.Maintenance.AzureMaintenanceEvent +StackExchange.Redis.Maintenance.AzureMaintenanceEvent.IPAddress.get -> System.Net.IPAddress? +StackExchange.Redis.Maintenance.AzureMaintenanceEvent.IsReplica.get -> bool +StackExchange.Redis.Maintenance.AzureMaintenanceEvent.NonSslPort.get -> int +StackExchange.Redis.Maintenance.AzureMaintenanceEvent.NotificationType.get -> StackExchange.Redis.Maintenance.AzureNotificationType +StackExchange.Redis.Maintenance.AzureMaintenanceEvent.NotificationTypeString.get -> string! +StackExchange.Redis.Maintenance.AzureMaintenanceEvent.SslPort.get -> int +StackExchange.Redis.Maintenance.AzureNotificationType +StackExchange.Redis.Maintenance.AzureNotificationType.NodeMaintenanceEnded = 4 -> StackExchange.Redis.Maintenance.AzureNotificationType +StackExchange.Redis.Maintenance.AzureNotificationType.NodeMaintenanceFailoverComplete = 5 -> StackExchange.Redis.Maintenance.AzureNotificationType +StackExchange.Redis.Maintenance.AzureNotificationType.NodeMaintenanceScaleComplete = 6 -> StackExchange.Redis.Maintenance.AzureNotificationType +StackExchange.Redis.Maintenance.AzureNotificationType.NodeMaintenanceScheduled = 1 -> StackExchange.Redis.Maintenance.AzureNotificationType +StackExchange.Redis.Maintenance.AzureNotificationType.NodeMaintenanceStart = 3 -> StackExchange.Redis.Maintenance.AzureNotificationType +StackExchange.Redis.Maintenance.AzureNotificationType.NodeMaintenanceStarting = 2 -> StackExchange.Redis.Maintenance.AzureNotificationType +StackExchange.Redis.Maintenance.AzureNotificationType.Unknown = 0 -> StackExchange.Redis.Maintenance.AzureNotificationType +StackExchange.Redis.Maintenance.ServerMaintenanceEvent +StackExchange.Redis.Maintenance.ServerMaintenanceEvent.RawMessage.get -> string? +StackExchange.Redis.Maintenance.ServerMaintenanceEvent.ReceivedTimeUtc.get -> System.DateTime +StackExchange.Redis.Maintenance.ServerMaintenanceEvent.StartTimeUtc.get -> System.DateTime? +StackExchange.Redis.MigrateOptions +StackExchange.Redis.MigrateOptions.Copy = 1 -> StackExchange.Redis.MigrateOptions +StackExchange.Redis.MigrateOptions.None = 0 -> StackExchange.Redis.MigrateOptions +StackExchange.Redis.MigrateOptions.Replace = 2 -> StackExchange.Redis.MigrateOptions +StackExchange.Redis.NameValueEntry +StackExchange.Redis.NameValueEntry.Equals(StackExchange.Redis.NameValueEntry other) -> bool +StackExchange.Redis.NameValueEntry.Name.get -> StackExchange.Redis.RedisValue +StackExchange.Redis.NameValueEntry.NameValueEntry() -> void +StackExchange.Redis.NameValueEntry.NameValueEntry(StackExchange.Redis.RedisValue name, StackExchange.Redis.RedisValue value) -> void +StackExchange.Redis.NameValueEntry.Value.get -> StackExchange.Redis.RedisValue +StackExchange.Redis.Order +StackExchange.Redis.Order.Ascending = 0 -> StackExchange.Redis.Order +StackExchange.Redis.Order.Descending = 1 -> StackExchange.Redis.Order +StackExchange.Redis.PersistResult +StackExchange.Redis.PersistResult.ConditionNotMet = -1 -> StackExchange.Redis.PersistResult +StackExchange.Redis.PersistResult.NoSuchField = -2 -> StackExchange.Redis.PersistResult +StackExchange.Redis.PersistResult.Success = 1 -> StackExchange.Redis.PersistResult +StackExchange.Redis.Profiling.IProfiledCommand +StackExchange.Redis.Profiling.IProfiledCommand.Command.get -> string! +StackExchange.Redis.Profiling.IProfiledCommand.CommandCreated.get -> System.DateTime +StackExchange.Redis.Profiling.IProfiledCommand.CreationToEnqueued.get -> System.TimeSpan +StackExchange.Redis.Profiling.IProfiledCommand.Db.get -> int +StackExchange.Redis.Profiling.IProfiledCommand.ElapsedTime.get -> System.TimeSpan +StackExchange.Redis.Profiling.IProfiledCommand.EndPoint.get -> System.Net.EndPoint! +StackExchange.Redis.Profiling.IProfiledCommand.EnqueuedToSending.get -> System.TimeSpan +StackExchange.Redis.Profiling.IProfiledCommand.Flags.get -> StackExchange.Redis.CommandFlags +StackExchange.Redis.Profiling.IProfiledCommand.ResponseToCompletion.get -> System.TimeSpan +StackExchange.Redis.Profiling.IProfiledCommand.RetransmissionOf.get -> StackExchange.Redis.Profiling.IProfiledCommand? +StackExchange.Redis.Profiling.IProfiledCommand.RetransmissionReason.get -> StackExchange.Redis.RetransmissionReasonType? +StackExchange.Redis.Profiling.IProfiledCommand.SentToResponse.get -> System.TimeSpan +StackExchange.Redis.Profiling.ProfiledCommandEnumerable +StackExchange.Redis.Profiling.ProfiledCommandEnumerable.Count() -> int +StackExchange.Redis.Profiling.ProfiledCommandEnumerable.Count(System.Func! predicate) -> int +StackExchange.Redis.Profiling.ProfiledCommandEnumerable.Enumerator +StackExchange.Redis.Profiling.ProfiledCommandEnumerable.Enumerator.Current.get -> StackExchange.Redis.Profiling.IProfiledCommand! +StackExchange.Redis.Profiling.ProfiledCommandEnumerable.Enumerator.Dispose() -> void +StackExchange.Redis.Profiling.ProfiledCommandEnumerable.Enumerator.Enumerator() -> void +StackExchange.Redis.Profiling.ProfiledCommandEnumerable.Enumerator.MoveNext() -> bool +StackExchange.Redis.Profiling.ProfiledCommandEnumerable.Enumerator.Reset() -> void +StackExchange.Redis.Profiling.ProfiledCommandEnumerable.GetEnumerator() -> StackExchange.Redis.Profiling.ProfiledCommandEnumerable.Enumerator +StackExchange.Redis.Profiling.ProfiledCommandEnumerable.ProfiledCommandEnumerable() -> void +StackExchange.Redis.Profiling.ProfiledCommandEnumerable.ToArray() -> StackExchange.Redis.Profiling.IProfiledCommand![]! +StackExchange.Redis.Profiling.ProfiledCommandEnumerable.ToList() -> System.Collections.Generic.List! +StackExchange.Redis.Profiling.ProfilingSession +StackExchange.Redis.Profiling.ProfilingSession.FinishProfiling() -> StackExchange.Redis.Profiling.ProfiledCommandEnumerable +StackExchange.Redis.Profiling.ProfilingSession.ProfilingSession(object? userToken = null) -> void +StackExchange.Redis.Profiling.ProfilingSession.UserToken.get -> object? +StackExchange.Redis.Proxy +StackExchange.Redis.Proxy.Envoyproxy = 2 -> StackExchange.Redis.Proxy +StackExchange.Redis.Proxy.None = 0 -> StackExchange.Redis.Proxy +StackExchange.Redis.Proxy.Twemproxy = 1 -> StackExchange.Redis.Proxy +StackExchange.Redis.RedisChannel +StackExchange.Redis.RedisChannel.Equals(StackExchange.Redis.RedisChannel other) -> bool +StackExchange.Redis.RedisChannel.IsNullOrEmpty.get -> bool +StackExchange.Redis.RedisChannel.IsPattern.get -> bool +StackExchange.Redis.RedisChannel.IsSharded.get -> bool +StackExchange.Redis.RedisChannel.PatternMode +StackExchange.Redis.RedisChannel.PatternMode.Auto = 0 -> StackExchange.Redis.RedisChannel.PatternMode +StackExchange.Redis.RedisChannel.PatternMode.Literal = 1 -> StackExchange.Redis.RedisChannel.PatternMode +StackExchange.Redis.RedisChannel.PatternMode.Pattern = 2 -> StackExchange.Redis.RedisChannel.PatternMode +StackExchange.Redis.RedisChannel.RedisChannel() -> void +StackExchange.Redis.RedisChannel.RedisChannel(byte[]? value, StackExchange.Redis.RedisChannel.PatternMode mode) -> void +StackExchange.Redis.RedisChannel.RedisChannel(string! value, StackExchange.Redis.RedisChannel.PatternMode mode) -> void +StackExchange.Redis.RedisCommandException +StackExchange.Redis.RedisCommandException.RedisCommandException(string! message) -> void +StackExchange.Redis.RedisCommandException.RedisCommandException(string! message, System.Exception! innerException) -> void +StackExchange.Redis.RedisConnectionException +StackExchange.Redis.RedisConnectionException.CommandStatus.get -> StackExchange.Redis.CommandStatus +StackExchange.Redis.RedisConnectionException.FailureType.get -> StackExchange.Redis.ConnectionFailureType +StackExchange.Redis.RedisConnectionException.RedisConnectionException(StackExchange.Redis.ConnectionFailureType failureType, string! message) -> void +StackExchange.Redis.RedisConnectionException.RedisConnectionException(StackExchange.Redis.ConnectionFailureType failureType, string! message, System.Exception? innerException) -> void +StackExchange.Redis.RedisConnectionException.RedisConnectionException(StackExchange.Redis.ConnectionFailureType failureType, string! message, System.Exception? innerException, StackExchange.Redis.CommandStatus commandStatus) -> void +StackExchange.Redis.RedisErrorEventArgs +StackExchange.Redis.RedisErrorEventArgs.EndPoint.get -> System.Net.EndPoint! +StackExchange.Redis.RedisErrorEventArgs.Message.get -> string! +StackExchange.Redis.RedisErrorEventArgs.RedisErrorEventArgs(object! sender, System.Net.EndPoint! endpoint, string! message) -> void +StackExchange.Redis.RedisException +StackExchange.Redis.RedisException.RedisException(string! message) -> void +StackExchange.Redis.RedisException.RedisException(string! message, System.Exception? innerException) -> void +StackExchange.Redis.RedisException.RedisException(System.Runtime.Serialization.SerializationInfo! info, System.Runtime.Serialization.StreamingContext ctx) -> void +StackExchange.Redis.RedisFeatures +StackExchange.Redis.RedisFeatures.BitwiseOperations.get -> bool +StackExchange.Redis.RedisFeatures.ClientName.get -> bool +StackExchange.Redis.RedisFeatures.ExecAbort.get -> bool +StackExchange.Redis.RedisFeatures.ExpireOverwrite.get -> bool +StackExchange.Redis.RedisFeatures.Geo.get -> bool +StackExchange.Redis.RedisFeatures.GetDelete.get -> bool +StackExchange.Redis.RedisFeatures.HashStringLength.get -> bool +StackExchange.Redis.RedisFeatures.HashVaradicDelete.get -> bool +StackExchange.Redis.RedisFeatures.HyperLogLogCountReplicaSafe.get -> bool +StackExchange.Redis.RedisFeatures.HyperLogLogCountSlaveSafe.get -> bool +StackExchange.Redis.RedisFeatures.IncrementFloat.get -> bool +StackExchange.Redis.RedisFeatures.InfoSections.get -> bool +StackExchange.Redis.RedisFeatures.KeyTouch.get -> bool +StackExchange.Redis.RedisFeatures.ListInsert.get -> bool +StackExchange.Redis.RedisFeatures.Memory.get -> bool +StackExchange.Redis.RedisFeatures.MillisecondExpiry.get -> bool +StackExchange.Redis.RedisFeatures.Module.get -> bool +StackExchange.Redis.RedisFeatures.MultipleRandom.get -> bool +StackExchange.Redis.RedisFeatures.Persist.get -> bool +StackExchange.Redis.RedisFeatures.PushIfNotExists.get -> bool +StackExchange.Redis.RedisFeatures.PushMultiple.get -> bool +StackExchange.Redis.RedisFeatures.RedisFeatures() -> void +StackExchange.Redis.RedisFeatures.RedisFeatures(System.Version! version) -> void +StackExchange.Redis.RedisFeatures.ReplicaCommands.get -> bool +StackExchange.Redis.RedisFeatures.Scan.get -> bool +StackExchange.Redis.RedisFeatures.Scripting.get -> bool +StackExchange.Redis.RedisFeatures.ScriptingDatabaseSafe.get -> bool +StackExchange.Redis.RedisFeatures.SetAndGet.get -> bool +StackExchange.Redis.RedisFeatures.SetConditional.get -> bool +StackExchange.Redis.RedisFeatures.SetKeepTtl.get -> bool +StackExchange.Redis.RedisFeatures.SetNotExistsAndGet.get -> bool +StackExchange.Redis.RedisFeatures.SetPopMultiple.get -> bool +StackExchange.Redis.RedisFeatures.SetVaradicAddRemove.get -> bool +StackExchange.Redis.RedisFeatures.SortedSetPop.get -> bool +StackExchange.Redis.RedisFeatures.SortedSetRangeStore.get -> bool +StackExchange.Redis.RedisFeatures.Streams.get -> bool +StackExchange.Redis.RedisFeatures.StringLength.get -> bool +StackExchange.Redis.RedisFeatures.StringSetRange.get -> bool +StackExchange.Redis.RedisFeatures.SwapDB.get -> bool +StackExchange.Redis.RedisFeatures.Time.get -> bool +StackExchange.Redis.RedisFeatures.Unlink.get -> bool +StackExchange.Redis.RedisFeatures.Version.get -> System.Version! +StackExchange.Redis.RedisKey +StackExchange.Redis.RedisKey.Append(StackExchange.Redis.RedisKey suffix) -> StackExchange.Redis.RedisKey +StackExchange.Redis.RedisKey.Equals(StackExchange.Redis.RedisKey other) -> bool +StackExchange.Redis.RedisKey.Prepend(StackExchange.Redis.RedisKey prefix) -> StackExchange.Redis.RedisKey +StackExchange.Redis.RedisKey.RedisKey() -> void +StackExchange.Redis.RedisKey.RedisKey(string? key) -> void +StackExchange.Redis.RedisResult +StackExchange.Redis.RedisResult.RedisResult() -> void +StackExchange.Redis.RedisResult.ToDictionary(System.Collections.Generic.IEqualityComparer? comparer = null) -> System.Collections.Generic.Dictionary! +StackExchange.Redis.RedisServerException +StackExchange.Redis.RedisServerException.RedisServerException(string! message) -> void +StackExchange.Redis.RedisStream +StackExchange.Redis.RedisStream.Entries.get -> StackExchange.Redis.StreamEntry[]! +StackExchange.Redis.RedisStream.Key.get -> StackExchange.Redis.RedisKey +StackExchange.Redis.RedisStream.RedisStream() -> void +StackExchange.Redis.RedisTimeoutException +StackExchange.Redis.RedisTimeoutException.Commandstatus.get -> StackExchange.Redis.CommandStatus +StackExchange.Redis.RedisTimeoutException.RedisTimeoutException(string! message, StackExchange.Redis.CommandStatus commandStatus) -> void +StackExchange.Redis.RedisType +StackExchange.Redis.RedisType.Hash = 5 -> StackExchange.Redis.RedisType +StackExchange.Redis.RedisType.List = 2 -> StackExchange.Redis.RedisType +StackExchange.Redis.RedisType.None = 0 -> StackExchange.Redis.RedisType +StackExchange.Redis.RedisType.Set = 3 -> StackExchange.Redis.RedisType +StackExchange.Redis.RedisType.SortedSet = 4 -> StackExchange.Redis.RedisType +StackExchange.Redis.RedisType.Stream = 6 -> StackExchange.Redis.RedisType +StackExchange.Redis.RedisType.String = 1 -> StackExchange.Redis.RedisType +StackExchange.Redis.RedisType.Unknown = 7 -> StackExchange.Redis.RedisType +StackExchange.Redis.RedisValue +StackExchange.Redis.RedisValue.Box() -> object? +StackExchange.Redis.RedisValue.CompareTo(StackExchange.Redis.RedisValue other) -> int +StackExchange.Redis.RedisValue.Equals(StackExchange.Redis.RedisValue other) -> bool +StackExchange.Redis.RedisValue.HasValue.get -> bool +StackExchange.Redis.RedisValue.IsInteger.get -> bool +StackExchange.Redis.RedisValue.IsNull.get -> bool +StackExchange.Redis.RedisValue.IsNullOrEmpty.get -> bool +StackExchange.Redis.RedisValue.Length() -> long +StackExchange.Redis.RedisValue.RedisValue() -> void +StackExchange.Redis.RedisValue.RedisValue(string! value) -> void +StackExchange.Redis.RedisValue.StartsWith(StackExchange.Redis.RedisValue value) -> bool +StackExchange.Redis.RedisValue.TryParse(out double val) -> bool +StackExchange.Redis.RedisValue.TryParse(out int val) -> bool +StackExchange.Redis.RedisValue.TryParse(out long val) -> bool +StackExchange.Redis.RedisValueWithExpiry +StackExchange.Redis.RedisValueWithExpiry.Expiry.get -> System.TimeSpan? +StackExchange.Redis.RedisValueWithExpiry.RedisValueWithExpiry() -> void +StackExchange.Redis.RedisValueWithExpiry.RedisValueWithExpiry(StackExchange.Redis.RedisValue value, System.TimeSpan? expiry) -> void +StackExchange.Redis.RedisValueWithExpiry.Value.get -> StackExchange.Redis.RedisValue +StackExchange.Redis.ReplicationChangeOptions +StackExchange.Redis.ReplicationChangeOptions.All = StackExchange.Redis.ReplicationChangeOptions.SetTiebreaker | StackExchange.Redis.ReplicationChangeOptions.Broadcast | StackExchange.Redis.ReplicationChangeOptions.EnslaveSubordinates -> StackExchange.Redis.ReplicationChangeOptions +StackExchange.Redis.ReplicationChangeOptions.Broadcast = 2 -> StackExchange.Redis.ReplicationChangeOptions +StackExchange.Redis.ReplicationChangeOptions.EnslaveSubordinates = 4 -> StackExchange.Redis.ReplicationChangeOptions +StackExchange.Redis.ReplicationChangeOptions.None = 0 -> StackExchange.Redis.ReplicationChangeOptions +StackExchange.Redis.ReplicationChangeOptions.ReplicateToOtherEndpoints = 4 -> StackExchange.Redis.ReplicationChangeOptions +StackExchange.Redis.ReplicationChangeOptions.SetTiebreaker = 1 -> StackExchange.Redis.ReplicationChangeOptions +StackExchange.Redis.ResultType +StackExchange.Redis.ResultType.BulkString = 4 -> StackExchange.Redis.ResultType +StackExchange.Redis.ResultType.Error = 2 -> StackExchange.Redis.ResultType +StackExchange.Redis.ResultType.Integer = 3 -> StackExchange.Redis.ResultType +StackExchange.Redis.ResultType.MultiBulk = 5 -> StackExchange.Redis.ResultType +StackExchange.Redis.ResultType.None = 0 -> StackExchange.Redis.ResultType +StackExchange.Redis.ResultType.SimpleString = 1 -> StackExchange.Redis.ResultType +StackExchange.Redis.RetransmissionReasonType +StackExchange.Redis.RetransmissionReasonType.Ask = 1 -> StackExchange.Redis.RetransmissionReasonType +StackExchange.Redis.RetransmissionReasonType.Moved = 2 -> StackExchange.Redis.RetransmissionReasonType +StackExchange.Redis.RetransmissionReasonType.None = 0 -> StackExchange.Redis.RetransmissionReasonType +StackExchange.Redis.Role +StackExchange.Redis.Role.Master +StackExchange.Redis.Role.Master.Replica +StackExchange.Redis.Role.Master.Replica.Ip.get -> string! +StackExchange.Redis.Role.Master.Replica.Port.get -> int +StackExchange.Redis.Role.Master.Replica.Replica() -> void +StackExchange.Redis.Role.Master.Replica.ReplicationOffset.get -> long +StackExchange.Redis.Role.Master.Replicas.get -> System.Collections.Generic.ICollection! +StackExchange.Redis.Role.Master.ReplicationOffset.get -> long +StackExchange.Redis.Role.Replica +StackExchange.Redis.Role.Replica.MasterIp.get -> string! +StackExchange.Redis.Role.Replica.MasterPort.get -> int +StackExchange.Redis.Role.Replica.ReplicationOffset.get -> long +StackExchange.Redis.Role.Replica.State.get -> string! +StackExchange.Redis.Role.Sentinel +StackExchange.Redis.Role.Sentinel.MonitoredMasters.get -> System.Collections.Generic.ICollection! +StackExchange.Redis.Role.Unknown +StackExchange.Redis.Role.Value.get -> string! +StackExchange.Redis.SaveType +StackExchange.Redis.SaveType.BackgroundRewriteAppendOnlyFile = 0 -> StackExchange.Redis.SaveType +StackExchange.Redis.SaveType.BackgroundSave = 1 -> StackExchange.Redis.SaveType +StackExchange.Redis.SaveType.ForegroundSave = 2 -> StackExchange.Redis.SaveType +StackExchange.Redis.ServerCounters +StackExchange.Redis.ServerCounters.EndPoint.get -> System.Net.EndPoint? +StackExchange.Redis.ServerCounters.Interactive.get -> StackExchange.Redis.ConnectionCounters! +StackExchange.Redis.ServerCounters.Other.get -> StackExchange.Redis.ConnectionCounters! +StackExchange.Redis.ServerCounters.ServerCounters(System.Net.EndPoint? endpoint) -> void +StackExchange.Redis.ServerCounters.Subscription.get -> StackExchange.Redis.ConnectionCounters! +StackExchange.Redis.ServerCounters.TotalOutstanding.get -> long +StackExchange.Redis.ServerType +StackExchange.Redis.ServerType.Cluster = 2 -> StackExchange.Redis.ServerType +StackExchange.Redis.ServerType.Envoyproxy = 4 -> StackExchange.Redis.ServerType +StackExchange.Redis.ServerType.Sentinel = 1 -> StackExchange.Redis.ServerType +StackExchange.Redis.ServerType.Standalone = 0 -> StackExchange.Redis.ServerType +StackExchange.Redis.ServerType.Twemproxy = 3 -> StackExchange.Redis.ServerType +StackExchange.Redis.SetOperation +StackExchange.Redis.SetOperation.Difference = 2 -> StackExchange.Redis.SetOperation +StackExchange.Redis.SetOperation.Intersect = 1 -> StackExchange.Redis.SetOperation +StackExchange.Redis.SetOperation.Union = 0 -> StackExchange.Redis.SetOperation +StackExchange.Redis.ShutdownMode +StackExchange.Redis.ShutdownMode.Always = 2 -> StackExchange.Redis.ShutdownMode +StackExchange.Redis.ShutdownMode.Default = 0 -> StackExchange.Redis.ShutdownMode +StackExchange.Redis.ShutdownMode.Never = 1 -> StackExchange.Redis.ShutdownMode +StackExchange.Redis.SlotRange +StackExchange.Redis.SlotRange.CompareTo(StackExchange.Redis.SlotRange other) -> int +StackExchange.Redis.SlotRange.Equals(StackExchange.Redis.SlotRange other) -> bool +StackExchange.Redis.SlotRange.From.get -> int +StackExchange.Redis.SlotRange.SlotRange() -> void +StackExchange.Redis.SlotRange.SlotRange(int from, int to) -> void +StackExchange.Redis.SlotRange.To.get -> int +StackExchange.Redis.SocketManager +StackExchange.Redis.SocketManager.Dispose() -> void +StackExchange.Redis.SocketManager.Name.get -> string! +StackExchange.Redis.SocketManager.SocketManager(string? name = null, int workerCount = 0, StackExchange.Redis.SocketManager.SocketManagerOptions options = StackExchange.Redis.SocketManager.SocketManagerOptions.None) -> void +StackExchange.Redis.SocketManager.SocketManager(string! name) -> void +StackExchange.Redis.SocketManager.SocketManager(string! name, bool useHighPrioritySocketThreads) -> void +StackExchange.Redis.SocketManager.SocketManager(string! name, int workerCount, bool useHighPrioritySocketThreads) -> void +StackExchange.Redis.SocketManager.SocketManagerOptions +StackExchange.Redis.SocketManager.SocketManagerOptions.None = 0 -> StackExchange.Redis.SocketManager.SocketManagerOptions +StackExchange.Redis.SocketManager.SocketManagerOptions.UseHighPrioritySocketThreads = 1 -> StackExchange.Redis.SocketManager.SocketManagerOptions +StackExchange.Redis.SocketManager.SocketManagerOptions.UseThreadPool = 2 -> StackExchange.Redis.SocketManager.SocketManagerOptions +StackExchange.Redis.SortedSetEntry +StackExchange.Redis.SortedSetEntry.CompareTo(object? obj) -> int +StackExchange.Redis.SortedSetEntry.CompareTo(StackExchange.Redis.SortedSetEntry other) -> int +StackExchange.Redis.SortedSetEntry.Element.get -> StackExchange.Redis.RedisValue +StackExchange.Redis.SortedSetEntry.Equals(StackExchange.Redis.SortedSetEntry other) -> bool +StackExchange.Redis.SortedSetEntry.Key.get -> StackExchange.Redis.RedisValue +StackExchange.Redis.SortedSetEntry.Score.get -> double +StackExchange.Redis.SortedSetEntry.SortedSetEntry() -> void +StackExchange.Redis.SortedSetEntry.SortedSetEntry(StackExchange.Redis.RedisValue element, double score) -> void +StackExchange.Redis.SortedSetEntry.Value.get -> double +StackExchange.Redis.SortedSetOrder +StackExchange.Redis.SortedSetOrder.ByLex = 2 -> StackExchange.Redis.SortedSetOrder +StackExchange.Redis.SortedSetOrder.ByRank = 0 -> StackExchange.Redis.SortedSetOrder +StackExchange.Redis.SortedSetOrder.ByScore = 1 -> StackExchange.Redis.SortedSetOrder +StackExchange.Redis.SortedSetPopResult +StackExchange.Redis.SortedSetPopResult.Entries.get -> StackExchange.Redis.SortedSetEntry[]! +StackExchange.Redis.SortedSetPopResult.IsNull.get -> bool +StackExchange.Redis.SortedSetPopResult.Key.get -> StackExchange.Redis.RedisKey +StackExchange.Redis.SortedSetPopResult.SortedSetPopResult() -> void +StackExchange.Redis.SortType +StackExchange.Redis.SortType.Alphabetic = 1 -> StackExchange.Redis.SortType +StackExchange.Redis.SortType.Numeric = 0 -> StackExchange.Redis.SortType +StackExchange.Redis.StreamAutoClaimIdsOnlyResult +StackExchange.Redis.StreamAutoClaimIdsOnlyResult.ClaimedIds.get -> StackExchange.Redis.RedisValue[]! +StackExchange.Redis.StreamAutoClaimIdsOnlyResult.DeletedIds.get -> StackExchange.Redis.RedisValue[]! +StackExchange.Redis.StreamAutoClaimIdsOnlyResult.IsNull.get -> bool +StackExchange.Redis.StreamAutoClaimIdsOnlyResult.NextStartId.get -> StackExchange.Redis.RedisValue +StackExchange.Redis.StreamAutoClaimIdsOnlyResult.StreamAutoClaimIdsOnlyResult() -> void +StackExchange.Redis.StreamAutoClaimResult +StackExchange.Redis.StreamAutoClaimResult.ClaimedEntries.get -> StackExchange.Redis.StreamEntry[]! +StackExchange.Redis.StreamAutoClaimResult.DeletedIds.get -> StackExchange.Redis.RedisValue[]! +StackExchange.Redis.StreamAutoClaimResult.IsNull.get -> bool +StackExchange.Redis.StreamAutoClaimResult.NextStartId.get -> StackExchange.Redis.RedisValue +StackExchange.Redis.StreamAutoClaimResult.StreamAutoClaimResult() -> void +StackExchange.Redis.StreamConsumer +StackExchange.Redis.StreamConsumer.Name.get -> StackExchange.Redis.RedisValue +StackExchange.Redis.StreamConsumer.PendingMessageCount.get -> int +StackExchange.Redis.StreamConsumer.StreamConsumer() -> void +StackExchange.Redis.StreamConsumerInfo +StackExchange.Redis.StreamConsumerInfo.IdleTimeInMilliseconds.get -> long +StackExchange.Redis.StreamConsumerInfo.Name.get -> string! +StackExchange.Redis.StreamConsumerInfo.PendingMessageCount.get -> int +StackExchange.Redis.StreamConsumerInfo.StreamConsumerInfo() -> void +StackExchange.Redis.StreamEntry +StackExchange.Redis.StreamEntry.Id.get -> StackExchange.Redis.RedisValue +StackExchange.Redis.StreamEntry.IsNull.get -> bool +StackExchange.Redis.StreamEntry.StreamEntry() -> void +StackExchange.Redis.StreamEntry.StreamEntry(StackExchange.Redis.RedisValue id, StackExchange.Redis.NameValueEntry[]! values) -> void +StackExchange.Redis.StreamEntry.this[StackExchange.Redis.RedisValue fieldName].get -> StackExchange.Redis.RedisValue +StackExchange.Redis.StreamEntry.Values.get -> StackExchange.Redis.NameValueEntry[]! +StackExchange.Redis.StreamGroupInfo +StackExchange.Redis.StreamGroupInfo.ConsumerCount.get -> int +StackExchange.Redis.StreamGroupInfo.EntriesRead.get -> long? +StackExchange.Redis.StreamGroupInfo.Lag.get -> long? +StackExchange.Redis.StreamGroupInfo.LastDeliveredId.get -> string? +StackExchange.Redis.StreamGroupInfo.Name.get -> string! +StackExchange.Redis.StreamGroupInfo.PendingMessageCount.get -> int +StackExchange.Redis.StreamGroupInfo.StreamGroupInfo() -> void +StackExchange.Redis.StreamInfo +StackExchange.Redis.StreamInfo.ConsumerGroupCount.get -> int +StackExchange.Redis.StreamInfo.FirstEntry.get -> StackExchange.Redis.StreamEntry +StackExchange.Redis.StreamInfo.LastEntry.get -> StackExchange.Redis.StreamEntry +StackExchange.Redis.StreamInfo.LastGeneratedId.get -> StackExchange.Redis.RedisValue +StackExchange.Redis.StreamInfo.Length.get -> int +StackExchange.Redis.StreamInfo.RadixTreeKeys.get -> int +StackExchange.Redis.StreamInfo.RadixTreeNodes.get -> int +StackExchange.Redis.StreamInfo.StreamInfo() -> void +StackExchange.Redis.StreamPendingInfo +StackExchange.Redis.StreamPendingInfo.Consumers.get -> StackExchange.Redis.StreamConsumer[]! +StackExchange.Redis.StreamPendingInfo.HighestPendingMessageId.get -> StackExchange.Redis.RedisValue +StackExchange.Redis.StreamPendingInfo.LowestPendingMessageId.get -> StackExchange.Redis.RedisValue +StackExchange.Redis.StreamPendingInfo.PendingMessageCount.get -> int +StackExchange.Redis.StreamPendingInfo.StreamPendingInfo() -> void +StackExchange.Redis.StreamPendingMessageInfo +StackExchange.Redis.StreamPendingMessageInfo.ConsumerName.get -> StackExchange.Redis.RedisValue +StackExchange.Redis.StreamPendingMessageInfo.DeliveryCount.get -> int +StackExchange.Redis.StreamPendingMessageInfo.IdleTimeInMilliseconds.get -> long +StackExchange.Redis.StreamPendingMessageInfo.MessageId.get -> StackExchange.Redis.RedisValue +StackExchange.Redis.StreamPendingMessageInfo.StreamPendingMessageInfo() -> void +StackExchange.Redis.StreamPosition +StackExchange.Redis.StreamPosition.Key.get -> StackExchange.Redis.RedisKey +StackExchange.Redis.StreamPosition.Position.get -> StackExchange.Redis.RedisValue +StackExchange.Redis.StreamPosition.StreamPosition() -> void +StackExchange.Redis.StreamPosition.StreamPosition(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue position) -> void +StackExchange.Redis.StringIndexType +StackExchange.Redis.StringIndexType.Byte = 0 -> StackExchange.Redis.StringIndexType +StackExchange.Redis.StringIndexType.Bit = 1 -> StackExchange.Redis.StringIndexType +StackExchange.Redis.SortedSetWhen +StackExchange.Redis.SortedSetWhen.Always = 0 -> StackExchange.Redis.SortedSetWhen +StackExchange.Redis.SortedSetWhen.Exists = 1 -> StackExchange.Redis.SortedSetWhen +StackExchange.Redis.SortedSetWhen.GreaterThan = 2 -> StackExchange.Redis.SortedSetWhen +StackExchange.Redis.SortedSetWhen.LessThan = 4 -> StackExchange.Redis.SortedSetWhen +StackExchange.Redis.SortedSetWhen.NotExists = 8 -> StackExchange.Redis.SortedSetWhen +StackExchange.Redis.When +StackExchange.Redis.When.Always = 0 -> StackExchange.Redis.When +StackExchange.Redis.When.Exists = 1 -> StackExchange.Redis.When +StackExchange.Redis.When.NotExists = 2 -> StackExchange.Redis.When +static StackExchange.Redis.BacklogPolicy.Default.get -> StackExchange.Redis.BacklogPolicy! +static StackExchange.Redis.BacklogPolicy.FailFast.get -> StackExchange.Redis.BacklogPolicy! +static StackExchange.Redis.ChannelMessage.operator !=(StackExchange.Redis.ChannelMessage left, StackExchange.Redis.ChannelMessage right) -> bool +static StackExchange.Redis.ChannelMessage.operator ==(StackExchange.Redis.ChannelMessage left, StackExchange.Redis.ChannelMessage right) -> bool +static StackExchange.Redis.CommandMap.Create(System.Collections.Generic.Dictionary? overrides) -> StackExchange.Redis.CommandMap! +static StackExchange.Redis.CommandMap.Create(System.Collections.Generic.HashSet! commands, bool available = true) -> StackExchange.Redis.CommandMap! +static StackExchange.Redis.CommandMap.Default.get -> StackExchange.Redis.CommandMap! +static StackExchange.Redis.CommandMap.Envoyproxy.get -> StackExchange.Redis.CommandMap! +static StackExchange.Redis.CommandMap.Sentinel.get -> StackExchange.Redis.CommandMap! +static StackExchange.Redis.CommandMap.SSDB.get -> StackExchange.Redis.CommandMap! +static StackExchange.Redis.CommandMap.Twemproxy.get -> StackExchange.Redis.CommandMap! +static StackExchange.Redis.Condition.HashEqual(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue hashField, StackExchange.Redis.RedisValue value) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.HashExists(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue hashField) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.HashLengthEqual(StackExchange.Redis.RedisKey key, long length) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.HashLengthGreaterThan(StackExchange.Redis.RedisKey key, long length) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.HashLengthLessThan(StackExchange.Redis.RedisKey key, long length) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.HashNotEqual(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue hashField, StackExchange.Redis.RedisValue value) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.HashNotExists(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue hashField) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.KeyExists(StackExchange.Redis.RedisKey key) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.KeyNotExists(StackExchange.Redis.RedisKey key) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.ListIndexEqual(StackExchange.Redis.RedisKey key, long index, StackExchange.Redis.RedisValue value) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.ListIndexExists(StackExchange.Redis.RedisKey key, long index) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.ListIndexNotEqual(StackExchange.Redis.RedisKey key, long index, StackExchange.Redis.RedisValue value) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.ListIndexNotExists(StackExchange.Redis.RedisKey key, long index) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.ListLengthEqual(StackExchange.Redis.RedisKey key, long length) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.ListLengthGreaterThan(StackExchange.Redis.RedisKey key, long length) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.ListLengthLessThan(StackExchange.Redis.RedisKey key, long length) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.SetContains(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.SetLengthEqual(StackExchange.Redis.RedisKey key, long length) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.SetLengthGreaterThan(StackExchange.Redis.RedisKey key, long length) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.SetLengthLessThan(StackExchange.Redis.RedisKey key, long length) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.SetNotContains(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.SortedSetContains(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.SortedSetEqual(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, StackExchange.Redis.RedisValue score) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.SortedSetLengthEqual(StackExchange.Redis.RedisKey key, long length) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.SortedSetLengthEqual(StackExchange.Redis.RedisKey key, long length, double min = -Infinity, double max = Infinity) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.SortedSetLengthGreaterThan(StackExchange.Redis.RedisKey key, long length) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.SortedSetLengthGreaterThan(StackExchange.Redis.RedisKey key, long length, double min = -Infinity, double max = Infinity) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.SortedSetLengthLessThan(StackExchange.Redis.RedisKey key, long length) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.SortedSetLengthLessThan(StackExchange.Redis.RedisKey key, long length, double min = -Infinity, double max = Infinity) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.SortedSetNotContains(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.SortedSetNotEqual(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, StackExchange.Redis.RedisValue score) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.SortedSetScoreExists(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue score) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.SortedSetScoreExists(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue score, StackExchange.Redis.RedisValue count) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.SortedSetScoreNotExists(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue score) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.SortedSetScoreNotExists(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue score, StackExchange.Redis.RedisValue count) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.StreamLengthEqual(StackExchange.Redis.RedisKey key, long length) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.StreamLengthGreaterThan(StackExchange.Redis.RedisKey key, long length) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.StreamLengthLessThan(StackExchange.Redis.RedisKey key, long length) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.StringEqual(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue value) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.StringLengthEqual(StackExchange.Redis.RedisKey key, long length) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.StringLengthGreaterThan(StackExchange.Redis.RedisKey key, long length) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.StringLengthLessThan(StackExchange.Redis.RedisKey key, long length) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.StringNotEqual(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue value) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Configuration.DefaultOptionsProvider.AddProvider(StackExchange.Redis.Configuration.DefaultOptionsProvider! provider) -> void +static StackExchange.Redis.Configuration.DefaultOptionsProvider.ComputerName.get -> string! +static StackExchange.Redis.Configuration.DefaultOptionsProvider.GetProvider(StackExchange.Redis.EndPointCollection! endpoints) -> StackExchange.Redis.Configuration.DefaultOptionsProvider! +static StackExchange.Redis.Configuration.DefaultOptionsProvider.GetProvider(System.Net.EndPoint! endpoint) -> StackExchange.Redis.Configuration.DefaultOptionsProvider! +static StackExchange.Redis.Configuration.DefaultOptionsProvider.LibraryVersion.get -> string! +static StackExchange.Redis.ConfigurationOptions.Parse(string! configuration) -> StackExchange.Redis.ConfigurationOptions! +static StackExchange.Redis.ConfigurationOptions.Parse(string! configuration, bool ignoreUnknown) -> StackExchange.Redis.ConfigurationOptions! +static StackExchange.Redis.ConnectionMultiplexer.Connect(StackExchange.Redis.ConfigurationOptions! configuration, System.IO.TextWriter? log = null) -> StackExchange.Redis.ConnectionMultiplexer! +static StackExchange.Redis.ConnectionMultiplexer.Connect(string! configuration, System.Action! configure, System.IO.TextWriter? log = null) -> StackExchange.Redis.ConnectionMultiplexer! +static StackExchange.Redis.ConnectionMultiplexer.Connect(string! configuration, System.IO.TextWriter? log = null) -> StackExchange.Redis.ConnectionMultiplexer! +static StackExchange.Redis.ConnectionMultiplexer.ConnectAsync(StackExchange.Redis.ConfigurationOptions! configuration, System.IO.TextWriter? log = null) -> System.Threading.Tasks.Task! +static StackExchange.Redis.ConnectionMultiplexer.ConnectAsync(string! configuration, System.Action! configure, System.IO.TextWriter? log = null) -> System.Threading.Tasks.Task! +static StackExchange.Redis.ConnectionMultiplexer.ConnectAsync(string! configuration, System.IO.TextWriter? log = null) -> System.Threading.Tasks.Task! +static StackExchange.Redis.ConnectionMultiplexer.Factory.get -> System.Threading.Tasks.TaskFactory! +static StackExchange.Redis.ConnectionMultiplexer.Factory.set -> void +static StackExchange.Redis.ConnectionMultiplexer.GetFeatureFlag(string! flag) -> bool +static StackExchange.Redis.ConnectionMultiplexer.SentinelConnect(StackExchange.Redis.ConfigurationOptions! configuration, System.IO.TextWriter? log = null) -> StackExchange.Redis.ConnectionMultiplexer! +static StackExchange.Redis.ConnectionMultiplexer.SentinelConnect(string! configuration, System.IO.TextWriter? log = null) -> StackExchange.Redis.ConnectionMultiplexer! +static StackExchange.Redis.ConnectionMultiplexer.SentinelConnectAsync(StackExchange.Redis.ConfigurationOptions! configuration, System.IO.TextWriter? log = null) -> System.Threading.Tasks.Task! +static StackExchange.Redis.ConnectionMultiplexer.SentinelConnectAsync(string! configuration, System.IO.TextWriter? log = null) -> System.Threading.Tasks.Task! +static StackExchange.Redis.ConnectionMultiplexer.SetFeatureFlag(string! flag, bool enabled) -> void +static StackExchange.Redis.EndPointCollection.ToString(System.Net.EndPoint? endpoint) -> string! +static StackExchange.Redis.EndPointCollection.TryParse(string! endpoint) -> System.Net.EndPoint? +static StackExchange.Redis.ExtensionMethods.AsStream(this StackExchange.Redis.Lease? bytes, bool ownsLease = true) -> System.IO.Stream? +static StackExchange.Redis.ExtensionMethods.DecodeLease(this StackExchange.Redis.Lease? bytes, System.Text.Encoding? encoding = null) -> StackExchange.Redis.Lease? +static StackExchange.Redis.ExtensionMethods.DecodeString(this StackExchange.Redis.Lease! bytes, System.Text.Encoding? encoding = null) -> string? +static StackExchange.Redis.ExtensionMethods.ToDictionary(this StackExchange.Redis.HashEntry[]? hash) -> System.Collections.Generic.Dictionary? +static StackExchange.Redis.ExtensionMethods.ToDictionary(this StackExchange.Redis.SortedSetEntry[]? sortedSet) -> System.Collections.Generic.Dictionary? +static StackExchange.Redis.ExtensionMethods.ToDictionary(this System.Collections.Generic.KeyValuePair[]? pairs) -> System.Collections.Generic.Dictionary? +static StackExchange.Redis.ExtensionMethods.ToDictionary(this System.Collections.Generic.KeyValuePair[]? pairs) -> System.Collections.Generic.Dictionary? +static StackExchange.Redis.ExtensionMethods.ToRedisValueArray(this string![]? values) -> StackExchange.Redis.RedisValue[]? +static StackExchange.Redis.ExtensionMethods.ToStringArray(this StackExchange.Redis.RedisValue[]? values) -> string?[]? +static StackExchange.Redis.ExtensionMethods.ToStringDictionary(this StackExchange.Redis.HashEntry[]? hash) -> System.Collections.Generic.Dictionary? +static StackExchange.Redis.ExtensionMethods.ToStringDictionary(this StackExchange.Redis.SortedSetEntry[]? sortedSet) -> System.Collections.Generic.Dictionary? +static StackExchange.Redis.ExtensionMethods.ToStringDictionary(this System.Collections.Generic.KeyValuePair[]? pairs) -> System.Collections.Generic.Dictionary? +static StackExchange.Redis.GeoEntry.operator !=(StackExchange.Redis.GeoEntry x, StackExchange.Redis.GeoEntry y) -> bool +static StackExchange.Redis.GeoEntry.operator ==(StackExchange.Redis.GeoEntry x, StackExchange.Redis.GeoEntry y) -> bool +static StackExchange.Redis.GeoPosition.operator !=(StackExchange.Redis.GeoPosition x, StackExchange.Redis.GeoPosition y) -> bool +static StackExchange.Redis.GeoPosition.operator ==(StackExchange.Redis.GeoPosition x, StackExchange.Redis.GeoPosition y) -> bool +static StackExchange.Redis.HashEntry.implicit operator StackExchange.Redis.HashEntry(System.Collections.Generic.KeyValuePair value) -> StackExchange.Redis.HashEntry +static StackExchange.Redis.HashEntry.implicit operator System.Collections.Generic.KeyValuePair(StackExchange.Redis.HashEntry value) -> System.Collections.Generic.KeyValuePair +static StackExchange.Redis.HashEntry.operator !=(StackExchange.Redis.HashEntry x, StackExchange.Redis.HashEntry y) -> bool +static StackExchange.Redis.HashEntry.operator ==(StackExchange.Redis.HashEntry x, StackExchange.Redis.HashEntry y) -> bool +static StackExchange.Redis.KeyspaceIsolation.DatabaseExtensions.WithKeyPrefix(this StackExchange.Redis.IDatabase! database, StackExchange.Redis.RedisKey keyPrefix) -> StackExchange.Redis.IDatabase! +static StackExchange.Redis.Lease.Create(int length, bool clear = true) -> StackExchange.Redis.Lease! +static StackExchange.Redis.Lease.Empty.get -> StackExchange.Redis.Lease! +static StackExchange.Redis.ListPopResult.Null.get -> StackExchange.Redis.ListPopResult +static StackExchange.Redis.LuaScript.GetCachedScriptCount() -> int +static StackExchange.Redis.LuaScript.Prepare(string! script) -> StackExchange.Redis.LuaScript! +static StackExchange.Redis.LuaScript.PurgeCache() -> void +static StackExchange.Redis.NameValueEntry.implicit operator StackExchange.Redis.NameValueEntry(System.Collections.Generic.KeyValuePair value) -> StackExchange.Redis.NameValueEntry +static StackExchange.Redis.NameValueEntry.implicit operator System.Collections.Generic.KeyValuePair(StackExchange.Redis.NameValueEntry value) -> System.Collections.Generic.KeyValuePair +static StackExchange.Redis.NameValueEntry.operator !=(StackExchange.Redis.NameValueEntry x, StackExchange.Redis.NameValueEntry y) -> bool +static StackExchange.Redis.NameValueEntry.operator ==(StackExchange.Redis.NameValueEntry x, StackExchange.Redis.NameValueEntry y) -> bool +static StackExchange.Redis.RedisChannel.implicit operator byte[]?(StackExchange.Redis.RedisChannel key) -> byte[]? +static StackExchange.Redis.RedisChannel.implicit operator StackExchange.Redis.RedisChannel(byte[]? key) -> StackExchange.Redis.RedisChannel +static StackExchange.Redis.RedisChannel.implicit operator StackExchange.Redis.RedisChannel(string! key) -> StackExchange.Redis.RedisChannel +static StackExchange.Redis.RedisChannel.implicit operator string?(StackExchange.Redis.RedisChannel key) -> string? +static StackExchange.Redis.RedisChannel.Literal(byte[]! value) -> StackExchange.Redis.RedisChannel +static StackExchange.Redis.RedisChannel.Literal(string! value) -> StackExchange.Redis.RedisChannel +static StackExchange.Redis.RedisChannel.operator !=(byte[]! x, StackExchange.Redis.RedisChannel y) -> bool +static StackExchange.Redis.RedisChannel.operator !=(StackExchange.Redis.RedisChannel x, byte[]! y) -> bool +static StackExchange.Redis.RedisChannel.operator !=(StackExchange.Redis.RedisChannel x, StackExchange.Redis.RedisChannel y) -> bool +static StackExchange.Redis.RedisChannel.operator !=(StackExchange.Redis.RedisChannel x, string! y) -> bool +static StackExchange.Redis.RedisChannel.operator !=(string! x, StackExchange.Redis.RedisChannel y) -> bool +static StackExchange.Redis.RedisChannel.operator ==(byte[]! x, StackExchange.Redis.RedisChannel y) -> bool +static StackExchange.Redis.RedisChannel.operator ==(StackExchange.Redis.RedisChannel x, byte[]! y) -> bool +static StackExchange.Redis.RedisChannel.operator ==(StackExchange.Redis.RedisChannel x, StackExchange.Redis.RedisChannel y) -> bool +static StackExchange.Redis.RedisChannel.operator ==(StackExchange.Redis.RedisChannel x, string! y) -> bool +static StackExchange.Redis.RedisChannel.operator ==(string! x, StackExchange.Redis.RedisChannel y) -> bool +static StackExchange.Redis.RedisChannel.Pattern(byte[]! value) -> StackExchange.Redis.RedisChannel +static StackExchange.Redis.RedisChannel.Pattern(string! value) -> StackExchange.Redis.RedisChannel +static StackExchange.Redis.RedisChannel.UseImplicitAutoPattern.get -> bool +static StackExchange.Redis.RedisChannel.UseImplicitAutoPattern.set -> void +static StackExchange.Redis.RedisFeatures.operator !=(StackExchange.Redis.RedisFeatures left, StackExchange.Redis.RedisFeatures right) -> bool +static StackExchange.Redis.RedisFeatures.operator ==(StackExchange.Redis.RedisFeatures left, StackExchange.Redis.RedisFeatures right) -> bool +static StackExchange.Redis.RedisKey.implicit operator byte[]?(StackExchange.Redis.RedisKey key) -> byte[]? +static StackExchange.Redis.RedisKey.implicit operator StackExchange.Redis.RedisKey(byte[]? key) -> StackExchange.Redis.RedisKey +static StackExchange.Redis.RedisKey.implicit operator StackExchange.Redis.RedisKey(string? key) -> StackExchange.Redis.RedisKey +static StackExchange.Redis.RedisKey.implicit operator string?(StackExchange.Redis.RedisKey key) -> string? +static StackExchange.Redis.RedisKey.operator !=(byte[]! x, StackExchange.Redis.RedisKey y) -> bool +static StackExchange.Redis.RedisKey.operator !=(StackExchange.Redis.RedisKey x, byte[]! y) -> bool +static StackExchange.Redis.RedisKey.operator !=(StackExchange.Redis.RedisKey x, StackExchange.Redis.RedisKey y) -> bool +static StackExchange.Redis.RedisKey.operator !=(StackExchange.Redis.RedisKey x, string! y) -> bool +static StackExchange.Redis.RedisKey.operator !=(string! x, StackExchange.Redis.RedisKey y) -> bool +static StackExchange.Redis.RedisKey.operator +(StackExchange.Redis.RedisKey x, StackExchange.Redis.RedisKey y) -> StackExchange.Redis.RedisKey +static StackExchange.Redis.RedisKey.operator ==(byte[]! x, StackExchange.Redis.RedisKey y) -> bool +static StackExchange.Redis.RedisKey.operator ==(StackExchange.Redis.RedisKey x, byte[]! y) -> bool +static StackExchange.Redis.RedisKey.operator ==(StackExchange.Redis.RedisKey x, StackExchange.Redis.RedisKey y) -> bool +static StackExchange.Redis.RedisKey.operator ==(StackExchange.Redis.RedisKey x, string! y) -> bool +static StackExchange.Redis.RedisKey.operator ==(string! x, StackExchange.Redis.RedisKey y) -> bool +static StackExchange.Redis.RedisResult.Create(StackExchange.Redis.RedisChannel channel) -> StackExchange.Redis.RedisResult! +static StackExchange.Redis.RedisResult.Create(StackExchange.Redis.RedisKey key) -> StackExchange.Redis.RedisResult! +static StackExchange.Redis.RedisResult.Create(StackExchange.Redis.RedisResult![]! values) -> StackExchange.Redis.RedisResult! +static StackExchange.Redis.RedisResult.Create(StackExchange.Redis.RedisValue value, StackExchange.Redis.ResultType? resultType = null) -> StackExchange.Redis.RedisResult! +static StackExchange.Redis.RedisResult.Create(StackExchange.Redis.RedisValue[]! values) -> StackExchange.Redis.RedisResult! +static StackExchange.Redis.RedisResult.explicit operator bool(StackExchange.Redis.RedisResult! result) -> bool +static StackExchange.Redis.RedisResult.explicit operator bool?(StackExchange.Redis.RedisResult? result) -> bool? +static StackExchange.Redis.RedisResult.explicit operator bool[]?(StackExchange.Redis.RedisResult? result) -> bool[]? +static StackExchange.Redis.RedisResult.explicit operator byte[]?(StackExchange.Redis.RedisResult? result) -> byte[]? +static StackExchange.Redis.RedisResult.explicit operator byte[]?[]?(StackExchange.Redis.RedisResult? result) -> byte[]?[]? +static StackExchange.Redis.RedisResult.explicit operator double(StackExchange.Redis.RedisResult! result) -> double +static StackExchange.Redis.RedisResult.explicit operator double?(StackExchange.Redis.RedisResult? result) -> double? +static StackExchange.Redis.RedisResult.explicit operator double[]?(StackExchange.Redis.RedisResult? result) -> double[]? +static StackExchange.Redis.RedisResult.explicit operator int(StackExchange.Redis.RedisResult! result) -> int +static StackExchange.Redis.RedisResult.explicit operator int?(StackExchange.Redis.RedisResult? result) -> int? +static StackExchange.Redis.RedisResult.explicit operator int[]?(StackExchange.Redis.RedisResult? result) -> int[]? +static StackExchange.Redis.RedisResult.explicit operator long(StackExchange.Redis.RedisResult! result) -> long +static StackExchange.Redis.RedisResult.explicit operator long?(StackExchange.Redis.RedisResult? result) -> long? +static StackExchange.Redis.RedisResult.explicit operator long[]?(StackExchange.Redis.RedisResult? result) -> long[]? +static StackExchange.Redis.RedisResult.explicit operator StackExchange.Redis.RedisKey(StackExchange.Redis.RedisResult? result) -> StackExchange.Redis.RedisKey +static StackExchange.Redis.RedisResult.explicit operator StackExchange.Redis.RedisKey[]?(StackExchange.Redis.RedisResult? result) -> StackExchange.Redis.RedisKey[]? +static StackExchange.Redis.RedisResult.explicit operator StackExchange.Redis.RedisResult![]?(StackExchange.Redis.RedisResult? result) -> StackExchange.Redis.RedisResult![]? +static StackExchange.Redis.RedisResult.explicit operator StackExchange.Redis.RedisValue(StackExchange.Redis.RedisResult? result) -> StackExchange.Redis.RedisValue +static StackExchange.Redis.RedisResult.explicit operator StackExchange.Redis.RedisValue[]?(StackExchange.Redis.RedisResult? result) -> StackExchange.Redis.RedisValue[]? +static StackExchange.Redis.RedisResult.explicit operator string?(StackExchange.Redis.RedisResult? result) -> string? +static StackExchange.Redis.RedisResult.explicit operator string?[]?(StackExchange.Redis.RedisResult? result) -> string?[]? +static StackExchange.Redis.RedisResult.explicit operator ulong(StackExchange.Redis.RedisResult! result) -> ulong +static StackExchange.Redis.RedisResult.explicit operator ulong?(StackExchange.Redis.RedisResult? result) -> ulong? +static StackExchange.Redis.RedisResult.explicit operator ulong[]?(StackExchange.Redis.RedisResult? result) -> ulong[]? +static StackExchange.Redis.RedisValue.CreateFrom(System.IO.MemoryStream! stream) -> StackExchange.Redis.RedisValue +static StackExchange.Redis.RedisValue.EmptyString.get -> StackExchange.Redis.RedisValue +static StackExchange.Redis.RedisValue.explicit operator bool(StackExchange.Redis.RedisValue value) -> bool +static StackExchange.Redis.RedisValue.explicit operator bool?(StackExchange.Redis.RedisValue value) -> bool? +static StackExchange.Redis.RedisValue.explicit operator decimal(StackExchange.Redis.RedisValue value) -> decimal +static StackExchange.Redis.RedisValue.explicit operator decimal?(StackExchange.Redis.RedisValue value) -> decimal? +static StackExchange.Redis.RedisValue.explicit operator double(StackExchange.Redis.RedisValue value) -> double +static StackExchange.Redis.RedisValue.explicit operator double?(StackExchange.Redis.RedisValue value) -> double? +static StackExchange.Redis.RedisValue.explicit operator float(StackExchange.Redis.RedisValue value) -> float +static StackExchange.Redis.RedisValue.explicit operator float?(StackExchange.Redis.RedisValue value) -> float? +static StackExchange.Redis.RedisValue.explicit operator int(StackExchange.Redis.RedisValue value) -> int +static StackExchange.Redis.RedisValue.explicit operator int?(StackExchange.Redis.RedisValue value) -> int? +static StackExchange.Redis.RedisValue.explicit operator long(StackExchange.Redis.RedisValue value) -> long +static StackExchange.Redis.RedisValue.explicit operator long?(StackExchange.Redis.RedisValue value) -> long? +static StackExchange.Redis.RedisValue.explicit operator uint(StackExchange.Redis.RedisValue value) -> uint +static StackExchange.Redis.RedisValue.explicit operator uint?(StackExchange.Redis.RedisValue value) -> uint? +static StackExchange.Redis.RedisValue.explicit operator ulong(StackExchange.Redis.RedisValue value) -> ulong +static StackExchange.Redis.RedisValue.explicit operator ulong?(StackExchange.Redis.RedisValue value) -> ulong? +static StackExchange.Redis.RedisValue.implicit operator byte[]?(StackExchange.Redis.RedisValue value) -> byte[]? +static StackExchange.Redis.RedisValue.implicit operator StackExchange.Redis.RedisValue(bool value) -> StackExchange.Redis.RedisValue +static StackExchange.Redis.RedisValue.implicit operator StackExchange.Redis.RedisValue(bool? value) -> StackExchange.Redis.RedisValue +static StackExchange.Redis.RedisValue.implicit operator StackExchange.Redis.RedisValue(byte[]? value) -> StackExchange.Redis.RedisValue +static StackExchange.Redis.RedisValue.implicit operator StackExchange.Redis.RedisValue(double value) -> StackExchange.Redis.RedisValue +static StackExchange.Redis.RedisValue.implicit operator StackExchange.Redis.RedisValue(double? value) -> StackExchange.Redis.RedisValue +static StackExchange.Redis.RedisValue.implicit operator StackExchange.Redis.RedisValue(int value) -> StackExchange.Redis.RedisValue +static StackExchange.Redis.RedisValue.implicit operator StackExchange.Redis.RedisValue(int? value) -> StackExchange.Redis.RedisValue +static StackExchange.Redis.RedisValue.implicit operator StackExchange.Redis.RedisValue(long value) -> StackExchange.Redis.RedisValue +static StackExchange.Redis.RedisValue.implicit operator StackExchange.Redis.RedisValue(long? value) -> StackExchange.Redis.RedisValue +static StackExchange.Redis.RedisValue.implicit operator StackExchange.Redis.RedisValue(string? value) -> StackExchange.Redis.RedisValue +static StackExchange.Redis.RedisValue.implicit operator StackExchange.Redis.RedisValue(System.Memory value) -> StackExchange.Redis.RedisValue +static StackExchange.Redis.RedisValue.implicit operator StackExchange.Redis.RedisValue(System.ReadOnlyMemory value) -> StackExchange.Redis.RedisValue +static StackExchange.Redis.RedisValue.implicit operator StackExchange.Redis.RedisValue(uint value) -> StackExchange.Redis.RedisValue +static StackExchange.Redis.RedisValue.implicit operator StackExchange.Redis.RedisValue(uint? value) -> StackExchange.Redis.RedisValue +static StackExchange.Redis.RedisValue.implicit operator StackExchange.Redis.RedisValue(ulong value) -> StackExchange.Redis.RedisValue +static StackExchange.Redis.RedisValue.implicit operator StackExchange.Redis.RedisValue(ulong? value) -> StackExchange.Redis.RedisValue +static StackExchange.Redis.RedisValue.implicit operator string?(StackExchange.Redis.RedisValue value) -> string? +static StackExchange.Redis.RedisValue.implicit operator System.ReadOnlyMemory(StackExchange.Redis.RedisValue value) -> System.ReadOnlyMemory +static StackExchange.Redis.RedisValue.Null.get -> StackExchange.Redis.RedisValue +static StackExchange.Redis.RedisValue.operator !=(StackExchange.Redis.RedisValue x, StackExchange.Redis.RedisValue y) -> bool +static StackExchange.Redis.RedisValue.operator ==(StackExchange.Redis.RedisValue x, StackExchange.Redis.RedisValue y) -> bool +static StackExchange.Redis.RedisValue.Unbox(object? value) -> StackExchange.Redis.RedisValue +static StackExchange.Redis.SlotRange.operator !=(StackExchange.Redis.SlotRange x, StackExchange.Redis.SlotRange y) -> bool +static StackExchange.Redis.SlotRange.operator ==(StackExchange.Redis.SlotRange x, StackExchange.Redis.SlotRange y) -> bool +static StackExchange.Redis.SlotRange.TryParse(string! range, out StackExchange.Redis.SlotRange value) -> bool +static StackExchange.Redis.SocketManager.Shared.get -> StackExchange.Redis.SocketManager! +static StackExchange.Redis.SocketManager.ThreadPool.get -> StackExchange.Redis.SocketManager! +static StackExchange.Redis.SortedSetEntry.implicit operator StackExchange.Redis.SortedSetEntry(System.Collections.Generic.KeyValuePair value) -> StackExchange.Redis.SortedSetEntry +static StackExchange.Redis.SortedSetEntry.implicit operator System.Collections.Generic.KeyValuePair(StackExchange.Redis.SortedSetEntry value) -> System.Collections.Generic.KeyValuePair +static StackExchange.Redis.SortedSetEntry.operator !=(StackExchange.Redis.SortedSetEntry x, StackExchange.Redis.SortedSetEntry y) -> bool +static StackExchange.Redis.SortedSetEntry.operator ==(StackExchange.Redis.SortedSetEntry x, StackExchange.Redis.SortedSetEntry y) -> bool +static StackExchange.Redis.SortedSetPopResult.Null.get -> StackExchange.Redis.SortedSetPopResult +static StackExchange.Redis.StreamAutoClaimIdsOnlyResult.Null.get -> StackExchange.Redis.StreamAutoClaimIdsOnlyResult +static StackExchange.Redis.StreamAutoClaimResult.Null.get -> StackExchange.Redis.StreamAutoClaimResult +static StackExchange.Redis.StreamEntry.Null.get -> StackExchange.Redis.StreamEntry +static StackExchange.Redis.StreamPosition.Beginning.get -> StackExchange.Redis.RedisValue +static StackExchange.Redis.StreamPosition.NewMessages.get -> StackExchange.Redis.RedisValue +virtual StackExchange.Redis.Configuration.DefaultOptionsProvider.AbortOnConnectFail.get -> bool +virtual StackExchange.Redis.Configuration.DefaultOptionsProvider.AfterConnectAsync(StackExchange.Redis.ConnectionMultiplexer! multiplexer, System.Action! log) -> System.Threading.Tasks.Task! +virtual StackExchange.Redis.Configuration.DefaultOptionsProvider.AllowAdmin.get -> bool +virtual StackExchange.Redis.Configuration.DefaultOptionsProvider.BacklogPolicy.get -> StackExchange.Redis.BacklogPolicy! +virtual StackExchange.Redis.Configuration.DefaultOptionsProvider.CheckCertificateRevocation.get -> bool +virtual StackExchange.Redis.Configuration.DefaultOptionsProvider.CommandMap.get -> StackExchange.Redis.CommandMap? +virtual StackExchange.Redis.Configuration.DefaultOptionsProvider.ConfigCheckInterval.get -> System.TimeSpan +virtual StackExchange.Redis.Configuration.DefaultOptionsProvider.ConfigurationChannel.get -> string! +virtual StackExchange.Redis.Configuration.DefaultOptionsProvider.ConnectRetry.get -> int +virtual StackExchange.Redis.Configuration.DefaultOptionsProvider.ConnectTimeout.get -> System.TimeSpan? +virtual StackExchange.Redis.Configuration.DefaultOptionsProvider.DefaultVersion.get -> System.Version! +virtual StackExchange.Redis.Configuration.DefaultOptionsProvider.GetDefaultClientName() -> string! +virtual StackExchange.Redis.Configuration.DefaultOptionsProvider.GetDefaultSsl(StackExchange.Redis.EndPointCollection! endPoints) -> bool +virtual StackExchange.Redis.Configuration.DefaultOptionsProvider.GetSslHostFromEndpoints(StackExchange.Redis.EndPointCollection! endPoints) -> string? +virtual StackExchange.Redis.Configuration.DefaultOptionsProvider.HeartbeatConsistencyChecks.get -> bool +virtual StackExchange.Redis.Configuration.DefaultOptionsProvider.HeartbeatInterval.get -> System.TimeSpan +virtual StackExchange.Redis.Configuration.DefaultOptionsProvider.HighIntegrity.get -> bool +virtual StackExchange.Redis.Configuration.DefaultOptionsProvider.IncludeDetailInExceptions.get -> bool +virtual StackExchange.Redis.Configuration.DefaultOptionsProvider.IncludePerformanceCountersInExceptions.get -> bool +virtual StackExchange.Redis.Configuration.DefaultOptionsProvider.IsMatch(System.Net.EndPoint! endpoint) -> bool +virtual StackExchange.Redis.Configuration.DefaultOptionsProvider.KeepAliveInterval.get -> System.TimeSpan +virtual StackExchange.Redis.Configuration.DefaultOptionsProvider.LibraryName.get -> string! +virtual StackExchange.Redis.Configuration.DefaultOptionsProvider.LoggerFactory.get -> Microsoft.Extensions.Logging.ILoggerFactory? +virtual StackExchange.Redis.Configuration.DefaultOptionsProvider.Password.get -> string? +virtual StackExchange.Redis.Configuration.DefaultOptionsProvider.Proxy.get -> StackExchange.Redis.Proxy +virtual StackExchange.Redis.Configuration.DefaultOptionsProvider.ReconnectRetryPolicy.get -> StackExchange.Redis.IReconnectRetryPolicy? +virtual StackExchange.Redis.Configuration.DefaultOptionsProvider.ResolveDns.get -> bool +virtual StackExchange.Redis.Configuration.DefaultOptionsProvider.SetClientLibrary.get -> bool +virtual StackExchange.Redis.Configuration.DefaultOptionsProvider.SyncTimeout.get -> System.TimeSpan +virtual StackExchange.Redis.Configuration.DefaultOptionsProvider.TieBreaker.get -> string! +virtual StackExchange.Redis.Configuration.DefaultOptionsProvider.User.get -> string? +abstract StackExchange.Redis.RedisResult.ToString(out string? type) -> string? +override sealed StackExchange.Redis.RedisResult.ToString() -> string! +override StackExchange.Redis.Role.Master.Replica.ToString() -> string! +StackExchange.Redis.ClientInfo.Protocol.get -> StackExchange.Redis.RedisProtocol? +StackExchange.Redis.ConfigurationOptions.Protocol.get -> StackExchange.Redis.RedisProtocol? +StackExchange.Redis.ConfigurationOptions.Protocol.set -> void +StackExchange.Redis.IServer.Protocol.get -> StackExchange.Redis.RedisProtocol +StackExchange.Redis.RedisFeatures.ClientId.get -> bool +StackExchange.Redis.RedisFeatures.Equals(StackExchange.Redis.RedisFeatures other) -> bool +StackExchange.Redis.RedisFeatures.Resp3.get -> bool +StackExchange.Redis.RedisProtocol +StackExchange.Redis.RedisProtocol.Resp2 = 20000 -> StackExchange.Redis.RedisProtocol +StackExchange.Redis.RedisProtocol.Resp3 = 30000 -> StackExchange.Redis.RedisProtocol +StackExchange.Redis.RedisResult.Resp2Type.get -> StackExchange.Redis.ResultType +StackExchange.Redis.RedisResult.Resp3Type.get -> StackExchange.Redis.ResultType +StackExchange.Redis.RedisResult.Type.get -> StackExchange.Redis.ResultType +StackExchange.Redis.ResultType.Array = 5 -> StackExchange.Redis.ResultType +StackExchange.Redis.ResultType.Attribute = 29 -> StackExchange.Redis.ResultType +StackExchange.Redis.ResultType.BigInteger = 17 -> StackExchange.Redis.ResultType +StackExchange.Redis.ResultType.BlobError = 10 -> StackExchange.Redis.ResultType +StackExchange.Redis.ResultType.Boolean = 11 -> StackExchange.Redis.ResultType +StackExchange.Redis.ResultType.Double = 9 -> StackExchange.Redis.ResultType +StackExchange.Redis.ResultType.Map = 13 -> StackExchange.Redis.ResultType +StackExchange.Redis.ResultType.Null = 8 -> StackExchange.Redis.ResultType +StackExchange.Redis.ResultType.Push = 37 -> StackExchange.Redis.ResultType +StackExchange.Redis.ResultType.Set = 21 -> StackExchange.Redis.ResultType +StackExchange.Redis.ResultType.VerbatimString = 12 -> StackExchange.Redis.ResultType +static StackExchange.Redis.RedisResult.Create(StackExchange.Redis.RedisResult![]! values, StackExchange.Redis.ResultType resultType) -> StackExchange.Redis.RedisResult! +static StackExchange.Redis.RedisResult.Create(StackExchange.Redis.RedisValue[]! values, StackExchange.Redis.ResultType resultType) -> StackExchange.Redis.RedisResult! +virtual StackExchange.Redis.RedisResult.Length.get -> int +virtual StackExchange.Redis.RedisResult.this[int index].get -> StackExchange.Redis.RedisResult! +StackExchange.Redis.ConnectionMultiplexer.AddLibraryNameSuffix(string! suffix) -> void +StackExchange.Redis.IConnectionMultiplexer.AddLibraryNameSuffix(string! suffix) -> void +StackExchange.Redis.RedisFeatures.ShardedPubSub.get -> bool +static StackExchange.Redis.RedisChannel.Sharded(byte[]? value) -> StackExchange.Redis.RedisChannel +static StackExchange.Redis.RedisChannel.Sharded(string! value) -> StackExchange.Redis.RedisChannel +StackExchange.Redis.ClientInfo.ShardedSubscriptionCount.get -> int +StackExchange.Redis.ConfigurationOptions.SetUserPfxCertificate(string! userCertificatePath, string? password = null) -> void +StackExchange.Redis.Bitwise.AndOr = 6 -> StackExchange.Redis.Bitwise +StackExchange.Redis.Bitwise.Diff = 4 -> StackExchange.Redis.Bitwise +StackExchange.Redis.Bitwise.Diff1 = 5 -> StackExchange.Redis.Bitwise +StackExchange.Redis.Bitwise.One = 7 -> StackExchange.Redis.Bitwise +StackExchange.Redis.StreamTrimMode +StackExchange.Redis.StreamTrimMode.Acknowledged = 2 -> StackExchange.Redis.StreamTrimMode +StackExchange.Redis.StreamTrimMode.DeleteReferences = 1 -> StackExchange.Redis.StreamTrimMode +StackExchange.Redis.StreamTrimMode.KeepReferences = 0 -> StackExchange.Redis.StreamTrimMode +StackExchange.Redis.StreamTrimResult +StackExchange.Redis.StreamTrimResult.Deleted = 1 -> StackExchange.Redis.StreamTrimResult +StackExchange.Redis.StreamTrimResult.NotDeleted = 2 -> StackExchange.Redis.StreamTrimResult +StackExchange.Redis.StreamTrimResult.NotFound = -1 -> StackExchange.Redis.StreamTrimResult +StackExchange.Redis.IDatabase.HashFieldGetAndDelete(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue hashField, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue +StackExchange.Redis.IDatabase.HashFieldGetAndDelete(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! hashFields, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue[]! +StackExchange.Redis.IDatabase.HashFieldGetAndSetExpiry(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue hashField, System.DateTime expiry, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue +StackExchange.Redis.IDatabase.HashFieldGetAndSetExpiry(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue hashField, System.TimeSpan? expiry = null, bool persist = false, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue +StackExchange.Redis.IDatabase.HashFieldGetAndSetExpiry(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! hashFields, System.DateTime expiry, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue[]! +StackExchange.Redis.IDatabase.HashFieldGetAndSetExpiry(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! hashFields, System.TimeSpan? expiry = null, bool persist = false, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue[]! +StackExchange.Redis.IDatabase.HashFieldGetLeaseAndDelete(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue hashField, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.Lease? +StackExchange.Redis.IDatabase.HashFieldGetLeaseAndSetExpiry(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue hashField, System.DateTime expiry, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.Lease? +StackExchange.Redis.IDatabase.HashFieldGetLeaseAndSetExpiry(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue hashField, System.TimeSpan? expiry = null, bool persist = false, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.Lease? +StackExchange.Redis.IDatabase.HashFieldSetAndSetExpiry(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue field, StackExchange.Redis.RedisValue value, System.DateTime expiry, StackExchange.Redis.When when = StackExchange.Redis.When.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue +StackExchange.Redis.IDatabase.HashFieldSetAndSetExpiry(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue field, StackExchange.Redis.RedisValue value, System.TimeSpan? expiry = null, bool keepTtl = false, StackExchange.Redis.When when = StackExchange.Redis.When.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue +StackExchange.Redis.IDatabase.HashFieldSetAndSetExpiry(StackExchange.Redis.RedisKey key, StackExchange.Redis.HashEntry[]! hashFields, System.DateTime expiry, StackExchange.Redis.When when = StackExchange.Redis.When.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue +StackExchange.Redis.IDatabase.HashFieldSetAndSetExpiry(StackExchange.Redis.RedisKey key, StackExchange.Redis.HashEntry[]! hashFields, System.TimeSpan? expiry = null, bool keepTtl = false, StackExchange.Redis.When when = StackExchange.Redis.When.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue +StackExchange.Redis.IDatabaseAsync.HashFieldGetAndDeleteAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue hashField, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.HashFieldGetAndDeleteAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! hashFields, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.HashFieldGetAndSetExpiryAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue hashField, System.DateTime expiry, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.HashFieldGetAndSetExpiryAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue hashField, System.TimeSpan? expiry = null, bool persist = false, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.HashFieldGetAndSetExpiryAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! hashFields, System.DateTime expiry, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.HashFieldGetAndSetExpiryAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue[]! hashFields, System.TimeSpan? expiry = null, bool persist = false, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.HashFieldGetLeaseAndDeleteAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue hashField, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task?>! +StackExchange.Redis.IDatabaseAsync.HashFieldGetLeaseAndSetExpiryAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue hashField, System.DateTime expiry, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task?>! +StackExchange.Redis.IDatabaseAsync.HashFieldGetLeaseAndSetExpiryAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue hashField, System.TimeSpan? expiry = null, bool persist = false, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task?>! +StackExchange.Redis.IDatabaseAsync.HashFieldSetAndSetExpiryAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue field, StackExchange.Redis.RedisValue value, System.DateTime expiry, StackExchange.Redis.When when = StackExchange.Redis.When.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.HashFieldSetAndSetExpiryAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue field, StackExchange.Redis.RedisValue value, System.TimeSpan? expiry = null, bool keepTtl = false, StackExchange.Redis.When when = StackExchange.Redis.When.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.HashFieldSetAndSetExpiryAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.HashEntry[]! hashFields, System.DateTime expiry, StackExchange.Redis.When when = StackExchange.Redis.When.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.HashFieldSetAndSetExpiryAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.HashEntry[]! hashFields, System.TimeSpan? expiry = null, bool keepTtl = false, StackExchange.Redis.When when = StackExchange.Redis.When.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.RedisValue.CopyTo(System.Span destination) -> int +StackExchange.Redis.RedisValue.GetByteCount() -> int +StackExchange.Redis.RedisValue.GetLongByteCount() -> long +static StackExchange.Redis.Condition.SortedSetContainsStarting(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue prefix) -> StackExchange.Redis.Condition! +static StackExchange.Redis.Condition.SortedSetNotContainsStarting(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue prefix) -> StackExchange.Redis.Condition! +StackExchange.Redis.ConnectionMultiplexer.GetServer(StackExchange.Redis.RedisKey key, object? asyncState = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.IServer! +StackExchange.Redis.IConnectionMultiplexer.GetServer(StackExchange.Redis.RedisKey key, object? asyncState = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.IServer! +StackExchange.Redis.IServer.Execute(int? database, string! command, System.Collections.Generic.ICollection! args, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisResult! +StackExchange.Redis.IServer.ExecuteAsync(int? database, string! command, System.Collections.Generic.ICollection! args, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +[SER001]override StackExchange.Redis.VectorSetLink.ToString() -> string! +[SER001]override StackExchange.Redis.VectorSetSimilaritySearchResult.ToString() -> string! +[SER001]StackExchange.Redis.IDatabase.VectorSetAdd(StackExchange.Redis.RedisKey key, StackExchange.Redis.VectorSetAddRequest! request, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +[SER001]StackExchange.Redis.IDatabaseAsync.VectorSetAddAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.VectorSetAddRequest! request, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +[SER001]StackExchange.Redis.VectorSetAddRequest +[SER001]StackExchange.Redis.VectorSetAddRequest.BuildExplorationFactor.get -> int? +[SER001]StackExchange.Redis.VectorSetAddRequest.BuildExplorationFactor.set -> void +[SER001]StackExchange.Redis.VectorSetAddRequest.MaxConnections.get -> int? +[SER001]StackExchange.Redis.VectorSetAddRequest.MaxConnections.set -> void +[SER001]StackExchange.Redis.VectorSetAddRequest.Quantization.get -> StackExchange.Redis.VectorSetQuantization +[SER001]StackExchange.Redis.VectorSetAddRequest.Quantization.set -> void +[SER001]StackExchange.Redis.VectorSetAddRequest.ReducedDimensions.get -> int? +[SER001]StackExchange.Redis.VectorSetAddRequest.ReducedDimensions.set -> void +[SER001]StackExchange.Redis.VectorSetAddRequest.UseCheckAndSet.get -> bool +[SER001]StackExchange.Redis.VectorSetAddRequest.UseCheckAndSet.set -> void +[SER001]StackExchange.Redis.VectorSetSimilaritySearchRequest +[SER001]StackExchange.Redis.VectorSetSimilaritySearchRequest.Count.get -> int? +[SER001]StackExchange.Redis.VectorSetSimilaritySearchRequest.Count.set -> void +[SER001]StackExchange.Redis.VectorSetSimilaritySearchRequest.DisableThreading.get -> bool +[SER001]StackExchange.Redis.VectorSetSimilaritySearchRequest.DisableThreading.set -> void +[SER001]StackExchange.Redis.VectorSetSimilaritySearchRequest.Epsilon.get -> double? +[SER001]StackExchange.Redis.VectorSetSimilaritySearchRequest.Epsilon.set -> void +[SER001]StackExchange.Redis.VectorSetSimilaritySearchRequest.FilterExpression.get -> string? +[SER001]StackExchange.Redis.VectorSetSimilaritySearchRequest.FilterExpression.set -> void +[SER001]StackExchange.Redis.VectorSetSimilaritySearchRequest.MaxFilteringEffort.get -> int? +[SER001]StackExchange.Redis.VectorSetSimilaritySearchRequest.MaxFilteringEffort.set -> void +[SER001]StackExchange.Redis.VectorSetSimilaritySearchRequest.SearchExplorationFactor.get -> int? +[SER001]StackExchange.Redis.VectorSetSimilaritySearchRequest.SearchExplorationFactor.set -> void +[SER001]StackExchange.Redis.VectorSetSimilaritySearchRequest.UseExactSearch.get -> bool +[SER001]StackExchange.Redis.VectorSetSimilaritySearchRequest.UseExactSearch.set -> void +[SER001]StackExchange.Redis.VectorSetSimilaritySearchRequest.WithAttributes.get -> bool +[SER001]StackExchange.Redis.VectorSetSimilaritySearchRequest.WithAttributes.set -> void +[SER001]StackExchange.Redis.VectorSetSimilaritySearchRequest.WithScores.get -> bool +[SER001]StackExchange.Redis.VectorSetSimilaritySearchRequest.WithScores.set -> void +[SER001]StackExchange.Redis.IDatabase.VectorSetContains(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +[SER001]StackExchange.Redis.IDatabase.VectorSetDimension(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> int +[SER001]StackExchange.Redis.IDatabase.VectorSetGetApproximateVector(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.Lease? +[SER001]StackExchange.Redis.IDatabase.VectorSetGetAttributesJson(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> string? +[SER001]StackExchange.Redis.IDatabase.VectorSetGetLinks(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.Lease? +[SER001]StackExchange.Redis.IDatabase.VectorSetGetLinksWithScores(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.Lease? +[SER001]StackExchange.Redis.IDatabase.VectorSetInfo(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.VectorSetInfo? +[SER001]StackExchange.Redis.IDatabase.VectorSetLength(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> long +[SER001]StackExchange.Redis.IDatabase.VectorSetRandomMember(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue +[SER001]StackExchange.Redis.IDatabase.VectorSetRandomMembers(StackExchange.Redis.RedisKey key, long count, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue[]! +[SER001]StackExchange.Redis.IDatabase.VectorSetRemove(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +[SER001]StackExchange.Redis.IDatabase.VectorSetSetAttributesJson(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, string! attributesJson, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +[SER001]StackExchange.Redis.IDatabase.VectorSetSimilaritySearch(StackExchange.Redis.RedisKey key, StackExchange.Redis.VectorSetSimilaritySearchRequest! query, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.Lease? +[SER001]StackExchange.Redis.IDatabaseAsync.VectorSetContainsAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +[SER001]StackExchange.Redis.IDatabaseAsync.VectorSetDimensionAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +[SER001]StackExchange.Redis.IDatabaseAsync.VectorSetGetApproximateVectorAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task?>! +[SER001]StackExchange.Redis.IDatabaseAsync.VectorSetGetAttributesJsonAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +[SER001]StackExchange.Redis.IDatabaseAsync.VectorSetGetLinksAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task?>! +[SER001]StackExchange.Redis.IDatabaseAsync.VectorSetGetLinksWithScoresAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task?>! +[SER001]StackExchange.Redis.IDatabaseAsync.VectorSetInfoAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +[SER001]StackExchange.Redis.IDatabaseAsync.VectorSetLengthAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +[SER001]StackExchange.Redis.IDatabaseAsync.VectorSetRandomMemberAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +[SER001]StackExchange.Redis.IDatabaseAsync.VectorSetRandomMembersAsync(StackExchange.Redis.RedisKey key, long count, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +[SER001]StackExchange.Redis.IDatabaseAsync.VectorSetRemoveAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +[SER001]StackExchange.Redis.IDatabaseAsync.VectorSetSetAttributesJsonAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue member, string! attributesJson, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +[SER001]StackExchange.Redis.IDatabaseAsync.VectorSetSimilaritySearchAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.VectorSetSimilaritySearchRequest! query, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task?>! +[SER001]StackExchange.Redis.VectorSetInfo +[SER001]StackExchange.Redis.VectorSetInfo.Dimension.get -> int +[SER001]StackExchange.Redis.VectorSetInfo.HnswMaxNodeUid.get -> long +[SER001]StackExchange.Redis.VectorSetInfo.Length.get -> long +[SER001]StackExchange.Redis.VectorSetInfo.MaxLevel.get -> int +[SER001]StackExchange.Redis.VectorSetInfo.Quantization.get -> StackExchange.Redis.VectorSetQuantization +[SER001]StackExchange.Redis.VectorSetInfo.QuantizationRaw.get -> string? +[SER001]StackExchange.Redis.VectorSetInfo.VectorSetInfo() -> void +[SER001]StackExchange.Redis.VectorSetInfo.VectorSetInfo(StackExchange.Redis.VectorSetQuantization quantization, string? quantizationRaw, int dimension, long length, int maxLevel, long vectorSetUid, long hnswMaxNodeUid) -> void +[SER001]StackExchange.Redis.VectorSetInfo.VectorSetUid.get -> long +[SER001]StackExchange.Redis.VectorSetLink +[SER001]StackExchange.Redis.VectorSetLink.Member.get -> StackExchange.Redis.RedisValue +[SER001]StackExchange.Redis.VectorSetLink.Score.get -> double +[SER001]StackExchange.Redis.VectorSetLink.VectorSetLink() -> void +[SER001]StackExchange.Redis.VectorSetLink.VectorSetLink(StackExchange.Redis.RedisValue member, double score) -> void +[SER001]StackExchange.Redis.VectorSetQuantization +[SER001]StackExchange.Redis.VectorSetQuantization.Binary = 3 -> StackExchange.Redis.VectorSetQuantization +[SER001]StackExchange.Redis.VectorSetQuantization.Int8 = 2 -> StackExchange.Redis.VectorSetQuantization +[SER001]StackExchange.Redis.VectorSetQuantization.None = 1 -> StackExchange.Redis.VectorSetQuantization +[SER001]StackExchange.Redis.VectorSetQuantization.Unknown = 0 -> StackExchange.Redis.VectorSetQuantization +[SER001]StackExchange.Redis.VectorSetSimilaritySearchResult +[SER001]StackExchange.Redis.VectorSetSimilaritySearchResult.AttributesJson.get -> string? +[SER001]StackExchange.Redis.VectorSetSimilaritySearchResult.Member.get -> StackExchange.Redis.RedisValue +[SER001]StackExchange.Redis.VectorSetSimilaritySearchResult.Score.get -> double +[SER001]StackExchange.Redis.VectorSetSimilaritySearchResult.VectorSetSimilaritySearchResult() -> void +[SER001]StackExchange.Redis.VectorSetSimilaritySearchResult.VectorSetSimilaritySearchResult(StackExchange.Redis.RedisValue member, double score = NaN, string? attributesJson = null) -> void +[SER001]static StackExchange.Redis.VectorSetAddRequest.Member(StackExchange.Redis.RedisValue element, System.ReadOnlyMemory values, string? attributesJson = null) -> StackExchange.Redis.VectorSetAddRequest! +[SER001]static StackExchange.Redis.VectorSetSimilaritySearchRequest.ByMember(StackExchange.Redis.RedisValue member) -> StackExchange.Redis.VectorSetSimilaritySearchRequest! +[SER001]static StackExchange.Redis.VectorSetSimilaritySearchRequest.ByVector(System.ReadOnlyMemory vector) -> StackExchange.Redis.VectorSetSimilaritySearchRequest! +StackExchange.Redis.RedisChannel.WithKeyRouting() -> StackExchange.Redis.RedisChannel +StackExchange.Redis.IDatabase.StreamReadGroup(StackExchange.Redis.StreamPosition[]! streamPositions, StackExchange.Redis.RedisValue groupName, StackExchange.Redis.RedisValue consumerName, int? countPerStream = null, bool noAck = false, System.TimeSpan? claimMinIdleTime = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisStream[]! +StackExchange.Redis.IDatabase.StreamReadGroup(StackExchange.Redis.StreamPosition[]! streamPositions, StackExchange.Redis.RedisValue groupName, StackExchange.Redis.RedisValue consumerName, int? countPerStream, bool noAck, StackExchange.Redis.CommandFlags flags) -> StackExchange.Redis.RedisStream[]! +StackExchange.Redis.IDatabaseAsync.StreamReadGroupAsync(StackExchange.Redis.StreamPosition[]! streamPositions, StackExchange.Redis.RedisValue groupName, StackExchange.Redis.RedisValue consumerName, int? countPerStream = null, bool noAck = false, System.TimeSpan? claimMinIdleTime = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StreamReadGroupAsync(StackExchange.Redis.StreamPosition[]! streamPositions, StackExchange.Redis.RedisValue groupName, StackExchange.Redis.RedisValue consumerName, int? countPerStream, bool noAck, StackExchange.Redis.CommandFlags flags) -> System.Threading.Tasks.Task! +StackExchange.Redis.StreamEntry.DeliveryCount.get -> int +StackExchange.Redis.StreamEntry.IdleTime.get -> System.TimeSpan? +StackExchange.Redis.StreamEntry.StreamEntry(StackExchange.Redis.RedisValue id, StackExchange.Redis.NameValueEntry[]! values, System.TimeSpan? idleTime, int deliveryCount) -> void +StackExchange.Redis.IDatabase.StringSet(System.Collections.Generic.KeyValuePair[]! values, StackExchange.Redis.When when = StackExchange.Redis.When.Always, StackExchange.Redis.Expiration expiry = default(StackExchange.Redis.Expiration), StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IDatabaseAsync.StringSetAsync(System.Collections.Generic.KeyValuePair[]! values, StackExchange.Redis.When when = StackExchange.Redis.When.Always, StackExchange.Redis.Expiration expiry = default(StackExchange.Redis.Expiration), StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.Expiration +StackExchange.Redis.Expiration.Expiration() -> void +StackExchange.Redis.Expiration.Expiration(System.DateTime when) -> void +StackExchange.Redis.Expiration.Expiration(System.TimeSpan ttl) -> void +override StackExchange.Redis.Expiration.Equals(object? obj) -> bool +override StackExchange.Redis.Expiration.GetHashCode() -> int +override StackExchange.Redis.Expiration.ToString() -> string! +static StackExchange.Redis.Expiration.Default.get -> StackExchange.Redis.Expiration +static StackExchange.Redis.Expiration.KeepTtl.get -> StackExchange.Redis.Expiration +static StackExchange.Redis.Expiration.Persist.get -> StackExchange.Redis.Expiration +static StackExchange.Redis.Expiration.implicit operator StackExchange.Redis.Expiration(System.DateTime when) -> StackExchange.Redis.Expiration +static StackExchange.Redis.Expiration.implicit operator StackExchange.Redis.Expiration(System.TimeSpan ttl) -> StackExchange.Redis.Expiration +override StackExchange.Redis.ValueCondition.Equals(object? obj) -> bool +override StackExchange.Redis.ValueCondition.GetHashCode() -> int +override StackExchange.Redis.ValueCondition.ToString() -> string! +StackExchange.Redis.RedisFeatures.DeleteWithValueCheck.get -> bool +StackExchange.Redis.RedisFeatures.SetWithValueCheck.get -> bool +StackExchange.Redis.ValueCondition +StackExchange.Redis.ValueCondition.ValueCondition() -> void +static StackExchange.Redis.ValueCondition.Always.get -> StackExchange.Redis.ValueCondition +static StackExchange.Redis.ValueCondition.Exists.get -> StackExchange.Redis.ValueCondition +static StackExchange.Redis.ValueCondition.implicit operator StackExchange.Redis.ValueCondition(StackExchange.Redis.When when) -> StackExchange.Redis.ValueCondition +static StackExchange.Redis.ValueCondition.NotExists.get -> StackExchange.Redis.ValueCondition +static StackExchange.Redis.ValueCondition.operator !(in StackExchange.Redis.ValueCondition value) -> StackExchange.Redis.ValueCondition +StackExchange.Redis.IDatabase.StringDelete(StackExchange.Redis.RedisKey key, StackExchange.Redis.ValueCondition when, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IDatabaseAsync.StringDeleteAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.ValueCondition when, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabase.StringSet(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue value, StackExchange.Redis.Expiration expiry = default(StackExchange.Redis.Expiration), StackExchange.Redis.ValueCondition when = default(StackExchange.Redis.ValueCondition), StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IDatabase.StringSet(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue value, System.TimeSpan? expiry, bool keepTtl, StackExchange.Redis.When when = StackExchange.Redis.When.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +StackExchange.Redis.IDatabaseAsync.StringSetAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue value, StackExchange.Redis.Expiration expiry = default(StackExchange.Redis.Expiration), StackExchange.Redis.ValueCondition when = default(StackExchange.Redis.ValueCondition), StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +StackExchange.Redis.IDatabaseAsync.StringSetAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue value, System.TimeSpan? expiry, bool keepTtl, StackExchange.Redis.When when = StackExchange.Redis.When.Always, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +[SER002]StackExchange.Redis.IDatabase.StringDigest(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.ValueCondition? +[SER002]StackExchange.Redis.IDatabaseAsync.StringDigestAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +[SER002]StackExchange.Redis.ValueCondition.AsDigest() -> StackExchange.Redis.ValueCondition +[SER002]StackExchange.Redis.ValueCondition.Value.get -> StackExchange.Redis.RedisValue +[SER002]static StackExchange.Redis.ValueCondition.CalculateDigest(System.ReadOnlySpan value) -> StackExchange.Redis.ValueCondition +[SER002]static StackExchange.Redis.ValueCondition.DigestEqual(in StackExchange.Redis.RedisValue value) -> StackExchange.Redis.ValueCondition +[SER002]static StackExchange.Redis.ValueCondition.DigestNotEqual(in StackExchange.Redis.RedisValue value) -> StackExchange.Redis.ValueCondition +[SER002]static StackExchange.Redis.ValueCondition.Equal(in StackExchange.Redis.RedisValue value) -> StackExchange.Redis.ValueCondition +[SER002]static StackExchange.Redis.ValueCondition.NotEqual(in StackExchange.Redis.RedisValue value) -> StackExchange.Redis.ValueCondition +[SER002]static StackExchange.Redis.ValueCondition.ParseDigest(System.ReadOnlySpan digest) -> StackExchange.Redis.ValueCondition +[SER002]static StackExchange.Redis.ValueCondition.ParseDigest(System.ReadOnlySpan digest) -> StackExchange.Redis.ValueCondition +StackExchange.Redis.ChannelMessage.TryParseKeyNotification(System.ReadOnlySpan keyPrefix, out StackExchange.Redis.KeyNotification notification) -> bool +StackExchange.Redis.KeyNotification +StackExchange.Redis.KeyNotification.GetChannel() -> StackExchange.Redis.RedisChannel +StackExchange.Redis.KeyNotification.GetKeyByteCount() -> int +StackExchange.Redis.KeyNotification.GetKeyCharCount() -> int +StackExchange.Redis.KeyNotification.GetKeyMaxByteCount() -> int +StackExchange.Redis.KeyNotification.GetKeyMaxCharCount() -> int +StackExchange.Redis.KeyNotification.GetValue() -> StackExchange.Redis.RedisValue +StackExchange.Redis.KeyNotification.IsType(System.ReadOnlySpan type) -> bool +StackExchange.Redis.KeyNotification.KeyStartsWith(System.ReadOnlySpan prefix) -> bool +StackExchange.Redis.KeyNotification.TryCopyKey(System.Span destination, out int charsWritten) -> bool +StackExchange.Redis.KeyNotification.Database.get -> int +StackExchange.Redis.KeyNotification.GetKey() -> StackExchange.Redis.RedisKey +StackExchange.Redis.KeyNotification.IsKeyEvent.get -> bool +StackExchange.Redis.KeyNotification.IsKeySpace.get -> bool +StackExchange.Redis.KeyNotification.KeyNotification() -> void +StackExchange.Redis.KeyNotification.TryCopyKey(System.Span destination, out int bytesWritten) -> bool +StackExchange.Redis.KeyNotification.Type.get -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.RedisValue.StartsWith(System.ReadOnlySpan value) -> bool +static StackExchange.Redis.KeyNotification.TryParse(scoped in StackExchange.Redis.RedisChannel channel, scoped in StackExchange.Redis.RedisValue value, out StackExchange.Redis.KeyNotification notification) -> bool +StackExchange.Redis.ChannelMessage.TryParseKeyNotification(out StackExchange.Redis.KeyNotification notification) -> bool +static StackExchange.Redis.KeyNotification.TryParse(scoped in System.ReadOnlySpan keyPrefix, scoped in StackExchange.Redis.RedisChannel channel, scoped in StackExchange.Redis.RedisValue value, out StackExchange.Redis.KeyNotification notification) -> bool +static StackExchange.Redis.RedisChannel.KeyEvent(StackExchange.Redis.KeyNotificationType type, int? database = null) -> StackExchange.Redis.RedisChannel +static StackExchange.Redis.RedisChannel.KeyEvent(System.ReadOnlySpan type, int? database) -> StackExchange.Redis.RedisChannel +static StackExchange.Redis.RedisChannel.KeySpacePattern(in StackExchange.Redis.RedisKey pattern, int? database = null) -> StackExchange.Redis.RedisChannel +StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.Append = 1 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.Copy = 2 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.Del = 3 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.Evicted = 1001 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.Expire = 4 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.Expired = 1000 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.HDel = 5 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.HExpired = 6 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.HIncrBy = 8 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.HIncrByFloat = 7 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.HPersist = 9 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.HSet = 10 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.IncrBy = 12 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.IncrByFloat = 11 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.LInsert = 13 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.LPop = 14 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.LPush = 15 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.LRem = 16 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.LSet = 17 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.LTrim = 18 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.MoveFrom = 19 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.MoveTo = 20 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.New = 1002 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.Overwritten = 1003 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.Persist = 21 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.RenameFrom = 22 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.RenameTo = 23 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.Restore = 24 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.RPop = 25 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.RPush = 26 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.SAdd = 27 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.Set = 28 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.SetRange = 29 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.SortStore = 30 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.SPop = 32 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.SRem = 31 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.TypeChanged = 1004 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.Unknown = 0 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.XAdd = 33 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.XDel = 34 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.XGroupCreate = 36 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.XGroupCreateConsumer = 35 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.XGroupDelConsumer = 37 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.XGroupDestroy = 38 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.XGroupSetId = 39 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.XSetId = 40 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.XTrim = 41 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.ZAdd = 42 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.ZDiffStore = 43 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.ZIncr = 46 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.ZInterStore = 44 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.ZRem = 49 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.ZRemByRank = 47 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.ZRemByScore = 48 -> StackExchange.Redis.KeyNotificationType +StackExchange.Redis.KeyNotificationType.ZUnionStore = 45 -> StackExchange.Redis.KeyNotificationType +static StackExchange.Redis.RedisChannel.KeySpacePrefix(in StackExchange.Redis.RedisKey prefix, int? database = null) -> StackExchange.Redis.RedisChannel +static StackExchange.Redis.RedisChannel.KeySpacePrefix(System.ReadOnlySpan prefix, int? database = null) -> StackExchange.Redis.RedisChannel +static StackExchange.Redis.RedisChannel.KeySpaceSingleKey(in StackExchange.Redis.RedisKey key, int database) -> StackExchange.Redis.RedisChannel +[SER003]StackExchange.Redis.HotKeysMetrics +[SER003]StackExchange.Redis.HotKeysMetrics.Cpu = 1 -> StackExchange.Redis.HotKeysMetrics +[SER003]StackExchange.Redis.HotKeysMetrics.Network = 2 -> StackExchange.Redis.HotKeysMetrics +[SER003]StackExchange.Redis.HotKeysMetrics.None = 0 -> StackExchange.Redis.HotKeysMetrics +[SER003]StackExchange.Redis.HotKeysResult +[SER003]StackExchange.Redis.HotKeysResult.AllCommandsAllSlotsNetworkBytes.get -> long +[SER003]StackExchange.Redis.HotKeysResult.AllCommandsAllSlotsTime.get -> System.TimeSpan +[SER003]StackExchange.Redis.HotKeysResult.AllCommandsSelectedSlotsNetworkBytes.get -> long? +[SER003]StackExchange.Redis.HotKeysResult.AllCommandsSelectedSlotsTime.get -> System.TimeSpan? +[SER003]StackExchange.Redis.HotKeysResult.CollectionDuration.get -> System.TimeSpan +[SER003]StackExchange.Redis.HotKeysResult.CollectionStartTime.get -> System.DateTime +[SER003]StackExchange.Redis.HotKeysResult.CpuByKey.get -> System.ReadOnlySpan +[SER003]StackExchange.Redis.HotKeysResult.IsSampled.get -> bool +[SER003]StackExchange.Redis.HotKeysResult.IsSlotFiltered.get -> bool +[SER003]StackExchange.Redis.HotKeysResult.MetricKeyBytes +[SER003]StackExchange.Redis.HotKeysResult.MetricKeyBytes.Bytes.get -> long +[SER003]StackExchange.Redis.HotKeysResult.MetricKeyBytes.Key.get -> StackExchange.Redis.RedisKey +[SER003]StackExchange.Redis.HotKeysResult.MetricKeyBytes.MetricKeyBytes() -> void +[SER003]StackExchange.Redis.HotKeysResult.MetricKeyBytes.MetricKeyBytes(in StackExchange.Redis.RedisKey key, long bytes) -> void +[SER003]StackExchange.Redis.HotKeysResult.MetricKeyCpu +[SER003]StackExchange.Redis.HotKeysResult.MetricKeyCpu.Duration.get -> System.TimeSpan +[SER003]StackExchange.Redis.HotKeysResult.MetricKeyCpu.Key.get -> StackExchange.Redis.RedisKey +[SER003]StackExchange.Redis.HotKeysResult.MetricKeyCpu.MetricKeyCpu() -> void +[SER003]StackExchange.Redis.HotKeysResult.MetricKeyCpu.MetricKeyCpu(in StackExchange.Redis.RedisKey key, long durationMicroseconds) -> void +[SER003]StackExchange.Redis.HotKeysResult.Metrics.get -> StackExchange.Redis.HotKeysMetrics +[SER003]StackExchange.Redis.HotKeysResult.NetworkBytesByKey.get -> System.ReadOnlySpan +[SER003]StackExchange.Redis.HotKeysResult.SampledCommandsSelectedSlotsNetworkBytes.get -> long? +[SER003]StackExchange.Redis.HotKeysResult.SampledCommandsSelectedSlotsTime.get -> System.TimeSpan? +[SER003]StackExchange.Redis.HotKeysResult.SampleRatio.get -> long +[SER003]StackExchange.Redis.HotKeysResult.SelectedSlots.get -> System.ReadOnlySpan +[SER003]StackExchange.Redis.HotKeysResult.TotalCpuTime.get -> System.TimeSpan? +[SER003]StackExchange.Redis.HotKeysResult.TotalCpuTimeSystem.get -> System.TimeSpan? +[SER003]StackExchange.Redis.HotKeysResult.TotalCpuTimeUser.get -> System.TimeSpan? +[SER003]StackExchange.Redis.HotKeysResult.TotalNetworkBytes.get -> long? +[SER003]StackExchange.Redis.HotKeysResult.TrackingActive.get -> bool +[SER003]StackExchange.Redis.IServer.HotKeysGet(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.HotKeysResult? +[SER003]StackExchange.Redis.IServer.HotKeysGetAsync(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +[SER003]StackExchange.Redis.IServer.HotKeysReset(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> void +[SER003]StackExchange.Redis.IServer.HotKeysResetAsync(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +[SER003]StackExchange.Redis.IServer.HotKeysStart(StackExchange.Redis.HotKeysMetrics metrics = (StackExchange.Redis.HotKeysMetrics)-1, long count = 0, System.TimeSpan duration = default(System.TimeSpan), long sampleRatio = 1, int[]? slots = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> void +[SER003]StackExchange.Redis.IServer.HotKeysStartAsync(StackExchange.Redis.HotKeysMetrics metrics = (StackExchange.Redis.HotKeysMetrics)-1, long count = 0, System.TimeSpan duration = default(System.TimeSpan), long sampleRatio = 1, int[]? slots = null, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +[SER003]StackExchange.Redis.IServer.HotKeysStop(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> bool +[SER003]StackExchange.Redis.IServer.HotKeysStopAsync(StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +[SER003]override StackExchange.Redis.HotKeysResult.MetricKeyBytes.Equals(object? obj) -> bool +[SER003]override StackExchange.Redis.HotKeysResult.MetricKeyBytes.GetHashCode() -> int +[SER003]override StackExchange.Redis.HotKeysResult.MetricKeyBytes.ToString() -> string! +[SER003]override StackExchange.Redis.HotKeysResult.MetricKeyCpu.Equals(object? obj) -> bool +[SER003]override StackExchange.Redis.HotKeysResult.MetricKeyCpu.GetHashCode() -> int +[SER003]override StackExchange.Redis.HotKeysResult.MetricKeyCpu.ToString() -> string! +[SER003]override StackExchange.Redis.StreamIdempotentId.Equals(object? obj) -> bool +[SER003]override StackExchange.Redis.StreamIdempotentId.GetHashCode() -> int +[SER003]override StackExchange.Redis.StreamIdempotentId.ToString() -> string! +[SER003]StackExchange.Redis.IDatabase.StreamAdd(StackExchange.Redis.RedisKey key, StackExchange.Redis.NameValueEntry[]! streamPairs, StackExchange.Redis.StreamIdempotentId idempotentId, long? maxLength = null, bool useApproximateMaxLength = false, long? limit = null, StackExchange.Redis.StreamTrimMode trimMode = StackExchange.Redis.StreamTrimMode.KeepReferences, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue +[SER003]StackExchange.Redis.IDatabase.StreamAdd(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue streamField, StackExchange.Redis.RedisValue streamValue, StackExchange.Redis.StreamIdempotentId idempotentId, long? maxLength = null, bool useApproximateMaxLength = false, long? limit = null, StackExchange.Redis.StreamTrimMode trimMode = StackExchange.Redis.StreamTrimMode.KeepReferences, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.RedisValue +[SER003]StackExchange.Redis.IDatabase.StreamConfigure(StackExchange.Redis.RedisKey key, StackExchange.Redis.StreamConfiguration! configuration, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> void +[SER003]StackExchange.Redis.IDatabaseAsync.StreamAddAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.NameValueEntry[]! streamPairs, StackExchange.Redis.StreamIdempotentId idempotentId, long? maxLength = null, bool useApproximateMaxLength = false, long? limit = null, StackExchange.Redis.StreamTrimMode trimMode = StackExchange.Redis.StreamTrimMode.KeepReferences, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +[SER003]StackExchange.Redis.IDatabaseAsync.StreamAddAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue streamField, StackExchange.Redis.RedisValue streamValue, StackExchange.Redis.StreamIdempotentId idempotentId, long? maxLength = null, bool useApproximateMaxLength = false, long? limit = null, StackExchange.Redis.StreamTrimMode trimMode = StackExchange.Redis.StreamTrimMode.KeepReferences, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +[SER003]StackExchange.Redis.IDatabaseAsync.StreamConfigureAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.StreamConfiguration! configuration, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task! +[SER003]StackExchange.Redis.StreamConfiguration +[SER003]StackExchange.Redis.StreamConfiguration.IdmpDuration.get -> long? +[SER003]StackExchange.Redis.StreamConfiguration.IdmpDuration.set -> void +[SER003]StackExchange.Redis.StreamConfiguration.IdmpMaxSize.get -> long? +[SER003]StackExchange.Redis.StreamConfiguration.IdmpMaxSize.set -> void +[SER003]StackExchange.Redis.StreamConfiguration.StreamConfiguration() -> void +[SER003]StackExchange.Redis.StreamIdempotentId +[SER003]StackExchange.Redis.StreamIdempotentId.IdempotentId.get -> StackExchange.Redis.RedisValue +[SER003]StackExchange.Redis.StreamIdempotentId.ProducerId.get -> StackExchange.Redis.RedisValue +[SER003]StackExchange.Redis.StreamIdempotentId.StreamIdempotentId() -> void +[SER003]StackExchange.Redis.StreamIdempotentId.StreamIdempotentId(StackExchange.Redis.RedisValue producerId) -> void +[SER003]StackExchange.Redis.StreamIdempotentId.StreamIdempotentId(StackExchange.Redis.RedisValue producerId, StackExchange.Redis.RedisValue idempotentId) -> void +[SER003]StackExchange.Redis.StreamInfo.IdmpDuration.get -> long +[SER003]StackExchange.Redis.StreamInfo.IdmpMaxSize.get -> long +[SER003]StackExchange.Redis.StreamInfo.IidsAdded.get -> long +[SER003]StackExchange.Redis.StreamInfo.IidsDuplicates.get -> long +[SER003]StackExchange.Redis.StreamInfo.IidsTracked.get -> long +[SER003]StackExchange.Redis.StreamInfo.PidsTracked.get -> long +StackExchange.Redis.StreamInfo.EntriesAdded.get -> long +StackExchange.Redis.StreamInfo.MaxDeletedEntryId.get -> StackExchange.Redis.RedisValue +StackExchange.Redis.StreamInfo.RecordedFirstEntryId.get -> StackExchange.Redis.RedisValue +StackExchange.Redis.Lease.IsEmpty.get -> bool +[SER001]StackExchange.Redis.IDatabase.VectorSetRange(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue start = default(StackExchange.Redis.RedisValue), StackExchange.Redis.RedisValue end = default(StackExchange.Redis.RedisValue), long count = -1, StackExchange.Redis.Exclude exclude = StackExchange.Redis.Exclude.None, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> StackExchange.Redis.Lease! +[SER001]StackExchange.Redis.IDatabase.VectorSetRangeEnumerate(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue start = default(StackExchange.Redis.RedisValue), StackExchange.Redis.RedisValue end = default(StackExchange.Redis.RedisValue), long count = 100, StackExchange.Redis.Exclude exclude = StackExchange.Redis.Exclude.None, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Collections.Generic.IEnumerable! +[SER001]StackExchange.Redis.IDatabaseAsync.VectorSetRangeAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue start = default(StackExchange.Redis.RedisValue), StackExchange.Redis.RedisValue end = default(StackExchange.Redis.RedisValue), long count = -1, StackExchange.Redis.Exclude exclude = StackExchange.Redis.Exclude.None, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Threading.Tasks.Task?>! +[SER001]StackExchange.Redis.IDatabaseAsync.VectorSetRangeEnumerateAsync(StackExchange.Redis.RedisKey key, StackExchange.Redis.RedisValue start = default(StackExchange.Redis.RedisValue), StackExchange.Redis.RedisValue end = default(StackExchange.Redis.RedisValue), long count = 100, StackExchange.Redis.Exclude exclude = StackExchange.Redis.Exclude.None, StackExchange.Redis.CommandFlags flags = StackExchange.Redis.CommandFlags.None) -> System.Collections.Generic.IAsyncEnumerable! +override StackExchange.Redis.LCSMatchResult.LCSMatch.Equals(object? obj) -> bool +override StackExchange.Redis.LCSMatchResult.LCSMatch.GetHashCode() -> int +override StackExchange.Redis.LCSMatchResult.LCSMatch.ToString() -> string! +override StackExchange.Redis.LCSMatchResult.LCSPosition.Equals(object? obj) -> bool +override StackExchange.Redis.LCSMatchResult.LCSPosition.GetHashCode() -> int +override StackExchange.Redis.LCSMatchResult.LCSPosition.ToString() -> string! +StackExchange.Redis.LCSMatchResult.LCSMatch.Equals(in StackExchange.Redis.LCSMatchResult.LCSMatch other) -> bool +StackExchange.Redis.LCSMatchResult.LCSMatch.First.get -> StackExchange.Redis.LCSMatchResult.LCSPosition +StackExchange.Redis.LCSMatchResult.LCSMatch.Second.get -> StackExchange.Redis.LCSMatchResult.LCSPosition +StackExchange.Redis.LCSMatchResult.LCSPosition +StackExchange.Redis.LCSMatchResult.LCSPosition.End.get -> long +StackExchange.Redis.LCSMatchResult.LCSPosition.Equals(in StackExchange.Redis.LCSMatchResult.LCSPosition other) -> bool +StackExchange.Redis.LCSMatchResult.LCSPosition.LCSPosition() -> void +StackExchange.Redis.LCSMatchResult.LCSPosition.LCSPosition(long start, long end) -> void +StackExchange.Redis.LCSMatchResult.LCSPosition.Start.get -> long +StackExchange.Redis.RedisType.VectorSet = 8 -> StackExchange.Redis.RedisType diff --git a/src/StackExchange.Redis/PublicAPI/PublicAPI.Unshipped.txt b/src/StackExchange.Redis/PublicAPI/PublicAPI.Unshipped.txt new file mode 100644 index 000000000..ab058de62 --- /dev/null +++ b/src/StackExchange.Redis/PublicAPI/PublicAPI.Unshipped.txt @@ -0,0 +1 @@ +#nullable enable diff --git a/src/StackExchange.Redis/PublicAPI/net6.0/PublicAPI.Shipped.txt b/src/StackExchange.Redis/PublicAPI/net6.0/PublicAPI.Shipped.txt new file mode 100644 index 000000000..fae4f65ce --- /dev/null +++ b/src/StackExchange.Redis/PublicAPI/net6.0/PublicAPI.Shipped.txt @@ -0,0 +1,4 @@ +StackExchange.Redis.ConfigurationOptions.SslClientAuthenticationOptions.get -> System.Func? +StackExchange.Redis.ConfigurationOptions.SslClientAuthenticationOptions.set -> void +System.Runtime.CompilerServices.IsExternalInit (forwarded, contained in System.Runtime) +StackExchange.Redis.ConfigurationOptions.SetUserPemCertificate(string! userCertificatePath, string? userKeyPath = null) -> void \ No newline at end of file diff --git a/src/StackExchange.Redis/README.md b/src/StackExchange.Redis/README.md new file mode 100644 index 000000000..9cc0fe157 --- /dev/null +++ b/src/StackExchange.Redis/README.md @@ -0,0 +1,6 @@ +StackExchange.Redis is a high-performance RESP (Redis, etc) client for .NET, available under the MIT license. + +- Release notes: [https://stackexchange.github.io/StackExchange.Redis/ReleaseNotes](https://stackexchange.github.io/StackExchange.Redis/ReleaseNotes) +- NuGet package: [https://www.nuget.org/packages/StackExchange.Redis/](https://www.nuget.org/packages/StackExchange.Redis/) +- General docs: [https://stackexchange.github.io/StackExchange.Redis/](https://stackexchange.github.io/StackExchange.Redis/) +- Code: [https://github.com/StackExchange/StackExchange.Redis/](https://github.com/StackExchange/StackExchange.Redis/) \ No newline at end of file diff --git a/src/StackExchange.Redis/RawResult.cs b/src/StackExchange.Redis/RawResult.cs index e2f9e44ef..2b2b3989a 100644 --- a/src/StackExchange.Redis/RawResult.cs +++ b/src/StackExchange.Redis/RawResult.cs @@ -1,7 +1,6 @@ using System; using System.Buffers; using System.Runtime.CompilerServices; -using System.Runtime.InteropServices; using System.Text; using Pipelines.Sockets.Unofficial.Arenas; @@ -12,18 +11,51 @@ internal readonly struct RawResult internal ref RawResult this[int index] => ref GetItems()[index]; internal int ItemsCount => (int)_items.Length; - internal ReadOnlySequence Payload { get; } - internal static readonly RawResult NullMultiBulk = new RawResult(default(Sequence), isNull: true); - internal static readonly RawResult EmptyMultiBulk = new RawResult(default(Sequence), isNull: false); + public delegate bool ScalarParser(scoped ReadOnlySpan span, out T value); + + internal bool TryParse(ScalarParser parser, out T value) + => _payload.IsSingleSegment ? parser(_payload.First.Span, out value) : TryParseSlow(parser, out value); + + private bool TryParseSlow(ScalarParser parser, out T value) + { + // linearize a multi-segment payload into a single span for parsing + const int MAX_STACK = 64; + var len = checked((int)_payload.Length); + byte[]? lease = null; + try + { + Span span = + (len <= MAX_STACK ? stackalloc byte[MAX_STACK] : (lease = ArrayPool.Shared.Rent(len))) + .Slice(0, len); + _payload.CopyTo(span); + return parser(span, out value); + } + finally + { + if (lease is not null) ArrayPool.Shared.Return(lease); + } + } + + private readonly ReadOnlySequence _payload; + internal ReadOnlySequence Payload => _payload; + internal static readonly RawResult Nil = default; - // note: can't use Memory here - struct recursion breaks runtimr + // Note: can't use Memory here - struct recursion breaks runtime private readonly Sequence _items; - private readonly ResultType _type; + private readonly ResultType _resultType; + private readonly ResultFlags _flags; - private const ResultType NonNullFlag = (ResultType)128; + [Flags] + internal enum ResultFlags + { + None = 0, + HasValue = 1 << 0, // simply indicates "not the default" (always set in .ctor) + NonNull = 1 << 1, // defines explicit null; isn't "IsNull" because we want default to be null + Resp3 = 1 << 2, // was the connection in RESP3 mode? + } - public RawResult(ResultType resultType, in ReadOnlySequence payload, bool isNull) + public RawResult(ResultType resultType, in ReadOnlySequence payload, ResultFlags flags) { switch (resultType) { @@ -31,47 +63,77 @@ public RawResult(ResultType resultType, in ReadOnlySequence payload, bool case ResultType.Error: case ResultType.Integer: case ResultType.BulkString: + case ResultType.Double: + case ResultType.Boolean: + case ResultType.BlobError: + case ResultType.VerbatimString: + case ResultType.BigInteger: + break; + case ResultType.Null: + flags &= ~ResultFlags.NonNull; break; default: - throw new ArgumentOutOfRangeException(nameof(resultType)); + ThrowInvalidType(resultType); + break; } - if (!isNull) resultType |= NonNullFlag; - _type = resultType; - Payload = payload; + _resultType = resultType; + _flags = flags | ResultFlags.HasValue; + _payload = payload; _items = default; } - public RawResult(Sequence items, bool isNull) + public RawResult(ResultType resultType, Sequence items, ResultFlags flags) { - _type = isNull ? ResultType.MultiBulk : (ResultType.MultiBulk | NonNullFlag); - Payload = default; + switch (resultType) + { + case ResultType.Array: + case ResultType.Map: + case ResultType.Set: + case ResultType.Attribute: + case ResultType.Push: + break; + case ResultType.Null: + flags &= ~ResultFlags.NonNull; + break; + default: + ThrowInvalidType(resultType); + break; + } + _resultType = resultType; + _flags = flags | ResultFlags.HasValue; + _payload = default; _items = items.Untyped(); } - public bool IsError => Type == ResultType.Error; + private static void ThrowInvalidType(ResultType resultType) + => throw new ArgumentOutOfRangeException(nameof(resultType), $"Invalid result-type: {resultType}"); - public ResultType Type => _type & ~NonNullFlag; + public bool IsError => _resultType.IsError(); - internal bool IsNull => (_type & NonNullFlag) == 0; - public bool HasValue => Type != ResultType.None; + public ResultType Resp3Type => _resultType; + + // if null, assume string + public ResultType Resp2TypeBulkString => _resultType == ResultType.Null ? ResultType.BulkString : _resultType.ToResp2(); + // if null, assume array + public ResultType Resp2TypeArray => _resultType == ResultType.Null ? ResultType.Array : _resultType.ToResp2(); + + internal bool IsNull => (_flags & ResultFlags.NonNull) == 0; + + public bool HasValue => (_flags & ResultFlags.HasValue) != 0; + + public bool IsResp3 => (_flags & ResultFlags.Resp3) != 0; public override string ToString() { if (IsNull) return "(null)"; - switch (Type) + return _resultType.ToResp2() switch { - case ResultType.SimpleString: - case ResultType.Integer: - case ResultType.Error: - return $"{Type}: {GetString()}"; - case ResultType.BulkString: - return $"{Type}: {Payload.Length} bytes"; - case ResultType.MultiBulk: - return $"{Type}: {ItemsCount} items"; - default: - return $"(unknown: {Type})"; - } + ResultType.SimpleString or ResultType.Integer or ResultType.Error => $"{Resp3Type}: {GetString()}", + ResultType.BulkString => $"{Resp3Type}: {Payload.Length} bytes", + ResultType.Array => $"{Resp3Type}: {ItemsCount} items", + _ => $"(unknown: {Resp3Type})", + }; } public Tokenizer GetInlineTokenizer() => new Tokenizer(Payload); @@ -84,7 +146,7 @@ internal ref struct Tokenizer public Tokenizer GetEnumerator() => this; private BufferReader _value; - public Tokenizer(in ReadOnlySequence value) + public Tokenizer(scoped in ReadOnlySequence value) { _value = new BufferReader(value); Current = default; @@ -124,43 +186,61 @@ public bool MoveNext() } public ReadOnlySequence Current { get; private set; } } - internal RedisChannel AsRedisChannel(byte[] channelPrefix, RedisChannel.PatternMode mode) + + internal RedisChannel AsRedisChannel(byte[]? channelPrefix, RedisChannel.RedisChannelOptions options) { - switch (Type) + switch (Resp2TypeBulkString) { case ResultType.SimpleString: case ResultType.BulkString: - if (channelPrefix == null) + if (channelPrefix is null) { - return new RedisChannel(GetBlob(), mode); + // no channel-prefix enabled, just use as-is + return new RedisChannel(GetBlob(), options); } if (StartsWith(channelPrefix)) { + // we have a channel-prefix, and it matches; strip it byte[] copy = Payload.Slice(channelPrefix.Length).ToArray(); - return new RedisChannel(copy, mode); + + return new RedisChannel(copy, options); + } + + // we shouldn't get unexpected events, so to get here: we've received a notification + // on a channel that doesn't match our prefix; this *should* be limited to + // key notifications (see: IgnoreChannelPrefix), but: we need to be sure + if (StartsWith("__keyspace@"u8) || StartsWith("__keyevent@"u8)) + { + // use as-is + return new RedisChannel(GetBlob(), options); } - return default(RedisChannel); + return default; default: - throw new InvalidCastException("Cannot convert to RedisChannel: " + Type); + throw new InvalidCastException("Cannot convert to RedisChannel: " + Resp3Type); } } internal RedisKey AsRedisKey() { - switch (Type) + return Resp2TypeBulkString switch { - case ResultType.SimpleString: - case ResultType.BulkString: - return (RedisKey)GetBlob(); - default: - throw new InvalidCastException("Cannot convert to RedisKey: " + Type); - } + ResultType.SimpleString or ResultType.BulkString => (RedisKey)GetBlob(), + _ => throw new InvalidCastException("Cannot convert to RedisKey: " + Resp3Type), + }; } internal RedisValue AsRedisValue() { if (IsNull) return RedisValue.Null; - switch (Type) + if (Resp3Type == ResultType.Boolean && Payload.Length == 1) + { + switch (Payload.First.Span[0]) + { + case (byte)'t': return (RedisValue)true; + case (byte)'f': return (RedisValue)false; + } + } + switch (Resp2TypeBulkString) { case ResultType.Integer: long i64; @@ -170,13 +250,13 @@ internal RedisValue AsRedisValue() case ResultType.BulkString: return (RedisValue)GetBlob(); } - throw new InvalidCastException("Cannot convert to RedisValue: " + Type); + throw new InvalidCastException("Cannot convert to RedisValue: " + Resp3Type); } - internal Lease AsLease() + internal Lease? AsLease() { if (IsNull) return null; - switch (Type) + switch (Resp2TypeBulkString) { case ResultType.SimpleString: case ResultType.BulkString: @@ -185,7 +265,7 @@ internal Lease AsLease() payload.CopyTo(lease.Span); return lease; } - throw new InvalidCastException("Cannot convert to Lease: " + Type); + throw new InvalidCastException("Cannot convert to Lease: " + Resp3Type); } internal bool IsEqual(in CommandBytes expected) @@ -194,10 +274,14 @@ internal bool IsEqual(in CommandBytes expected) return new CommandBytes(Payload).Equals(expected); } - internal unsafe bool IsEqual(byte[] expected) + internal bool IsEqual(byte[]? expected) { if (expected == null) throw new ArgumentNullException(nameof(expected)); + return IsEqual(new ReadOnlySpan(expected)); + } + internal bool IsEqual(ReadOnlySpan expected) + { var rangeToCheck = Payload; if (expected.Length != rangeToCheck.Length) return false; @@ -207,7 +291,7 @@ internal unsafe bool IsEqual(byte[] expected) foreach (var segment in rangeToCheck) { var from = segment.Span; - var to = new Span(expected, offset, from.Length); + var to = expected.Slice(offset, from.Length); if (!from.SequenceEqual(to)) return false; offset += from.Length; @@ -223,19 +307,18 @@ internal bool StartsWith(in CommandBytes expected) var rangeToCheck = Payload.Slice(0, len); return new CommandBytes(rangeToCheck).Equals(expected); } - internal bool StartsWith(byte[] expected) + internal bool StartsWith(ReadOnlySpan expected) { - if (expected == null) throw new ArgumentNullException(nameof(expected)); if (expected.Length > Payload.Length) return false; var rangeToCheck = Payload.Slice(0, expected.Length); if (rangeToCheck.IsSingleSegment) return rangeToCheck.First.Span.SequenceEqual(expected); int offset = 0; - foreach(var segment in rangeToCheck) + foreach (var segment in rangeToCheck) { var from = segment.Span; - var to = new Span(expected, offset, from.Length); + var to = expected.Slice(offset, from.Length); if (!from.SequenceEqual(to)) return false; offset += from.Length; @@ -243,7 +326,7 @@ internal bool StartsWith(byte[] expected) return true; } - internal byte[] GetBlob() + internal byte[]? GetBlob() { if (IsNull) return null; @@ -255,25 +338,43 @@ internal byte[] GetBlob() internal bool GetBoolean() { if (Payload.Length != 1) throw new InvalidCastException(); - switch (Payload.First.Span[0]) + if (Resp3Type == ResultType.Boolean) { - case (byte)'1': return true; - case (byte)'0': return false; - default: throw new InvalidCastException(); + return Payload.First.Span[0] switch + { + (byte)'t' => true, + (byte)'f' => false, + _ => throw new InvalidCastException(), + }; } + return Payload.First.Span[0] switch + { + (byte)'1' => true, + (byte)'0' => false, + _ => throw new InvalidCastException(), + }; } [MethodImpl(MethodImplOptions.AggressiveInlining)] internal Sequence GetItems() => _items.Cast(); [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal RedisKey[] GetItemsAsKeys() => this.ToArray((in RawResult x) => x.AsRedisKey()); + internal double?[]? GetItemsAsDoubles() => this.ToArray((in RawResult x) => x.TryGetDouble(out double val) ? val : null); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal RedisKey[]? GetItemsAsKeys() => this.ToArray((in RawResult x) => x.AsRedisKey()); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal RedisValue[]? GetItemsAsValues() => this.ToArray((in RawResult x) => x.AsRedisValue()); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal string?[]? GetItemsAsStrings() => this.ToArray((in RawResult x) => (string?)x.AsRedisValue()); [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal RedisValue[] GetItemsAsValues() => this.ToArray((in RawResult x) => x.AsRedisValue()); + internal string[]? GetItemsAsStringsNotNullable() => this.ToArray((in RawResult x) => (string)x.AsRedisValue()!); [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal string[] GetItemsAsStrings() => this.ToArray((in RawResult x) => (string)x.AsRedisValue()); + internal bool[]? GetItemsAsBooleans() => this.ToArray((in RawResult x) => (bool)x.AsRedisValue()); internal GeoPosition? GetItemsAsGeoPosition() { @@ -291,6 +392,21 @@ internal bool GetBoolean() return AsGeoPosition(root.GetItems()); } + internal SortedSetEntry[]? GetItemsAsSortedSetEntryArray() => this.ToArray((in RawResult item) => AsSortedSetEntry(item.GetItems())); + + private static SortedSetEntry AsSortedSetEntry(in Sequence elements) + { + if (elements.IsSingleSegment) + { + var span = elements.FirstSpan; + return new SortedSetEntry(span[0].AsRedisValue(), span[1].TryGetDouble(out double val) ? val : double.NaN); + } + else + { + return new SortedSetEntry(elements[0].AsRedisValue(), elements[1].TryGetDouble(out double val) ? val : double.NaN); + } + } + private static GeoPosition AsGeoPosition(in Sequence coords) { double longitude, latitude; @@ -309,26 +425,34 @@ private static GeoPosition AsGeoPosition(in Sequence coords) return new GeoPosition(longitude, latitude); } - internal GeoPosition?[] GetItemsAsGeoPositionArray() - => this.ToArray((in RawResult item) => item.IsNull ? (GeoPosition?)null : AsGeoPosition(item.GetItems())); + internal GeoPosition?[]? GetItemsAsGeoPositionArray() + => this.ToArray((in RawResult item) => item.IsNull ? default : AsGeoPosition(item.GetItems())); - internal unsafe string GetString() + internal unsafe string? GetString() => GetString(out _); + internal unsafe string? GetString(out ReadOnlySpan verbatimPrefix) { + verbatimPrefix = default; if (IsNull) return null; if (Payload.IsEmpty) return ""; + string s; if (Payload.IsSingleSegment) { - return Format.GetString(Payload.First.Span); + s = Format.GetString(Payload.First.Span); + return Resp3Type == ResultType.VerbatimString ? GetVerbatimString(s, out verbatimPrefix) : s; } +#if NET + // use system-provided sequence decoder + return Encoding.UTF8.GetString(in _payload); +#else var decoder = Encoding.UTF8.GetDecoder(); int charCount = 0; - foreach(var segment in Payload) + foreach (var segment in Payload) { var span = segment.Span; if (span.IsEmpty) continue; - fixed(byte* bPtr = span) + fixed (byte* bPtr = span) { charCount += decoder.GetCharCount(bPtr, span.Length, false); } @@ -336,7 +460,7 @@ internal unsafe string GetString() decoder.Reset(); - string s = new string((char)0, charCount); + s = new string((char)0, charCount); fixed (char* sPtr = s) { char* cPtr = sPtr; @@ -348,17 +472,39 @@ internal unsafe string GetString() fixed (byte* bPtr = span) { var written = decoder.GetChars(bPtr, span.Length, cPtr, charCount, false); + if (written < 0 || written > charCount) Throw(); // protect against hypothetical cPtr weirdness cPtr += written; charCount -= written; } } } - return s; + + return Resp3Type == ResultType.VerbatimString ? GetVerbatimString(s, out verbatimPrefix) : s; + + static void Throw() => throw new InvalidOperationException("Invalid result from GetChars"); +#endif + static string? GetVerbatimString(string? value, out ReadOnlySpan type) + { + // The first three bytes provide information about the format of the following string, which + // can be txt for plain text, or mkd for markdown. The fourth byte is always `:`. + // Then the real string follows. + if (value is not null + && value.Length >= 4 && value[3] == ':') + { + type = value.AsSpan().Slice(0, 3); + value = value.Substring(4); + } + else + { + type = default; + } + return value; + } } internal bool TryGetDouble(out double val) { - if (IsNull) + if (IsNull || Payload.IsEmpty) { val = 0; return false; @@ -368,12 +514,20 @@ internal bool TryGetDouble(out double val) val = i64; return true; } + + if (Payload.IsSingleSegment) return Format.TryParseDouble(Payload.First.Span, out val); + if (Payload.Length < 64) + { + Span span = stackalloc byte[(int)Payload.Length]; + Payload.CopyTo(span); + return Format.TryParseDouble(span, out val); + } return Format.TryParseDouble(GetString(), out val); } internal bool TryGetInt64(out long value) { - if (IsNull || Payload.IsEmpty || Payload.Length > PhysicalConnection.MaxInt64TextLen) + if (IsNull || Payload.IsEmpty || Payload.Length > Format.MaxInt64TextLen) { value = 0; return false; @@ -385,6 +539,11 @@ internal bool TryGetInt64(out long value) Payload.CopyTo(span); return Format.TryParseInt64(span, out value); } + + internal bool Is(char value) + { + var span = Payload.First.Span; + return span.Length == 1 && (char)span[0] == value && Payload.IsSingleSegment; + } } } - diff --git a/src/StackExchange.Redis/RedisBase.cs b/src/StackExchange.Redis/RedisBase.cs index c2aad6300..095835efd 100644 --- a/src/StackExchange.Redis/RedisBase.cs +++ b/src/StackExchange.Redis/RedisBase.cs @@ -1,5 +1,5 @@ using System; -using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; using System.Threading.Tasks; namespace StackExchange.Redis @@ -8,9 +8,9 @@ internal abstract partial class RedisBase : IRedis { internal static readonly DateTime UnixEpoch = new DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc); internal readonly ConnectionMultiplexer multiplexer; - protected readonly object asyncState; + protected readonly object? asyncState; - internal RedisBase(ConnectionMultiplexer multiplexer, object asyncState) + internal RedisBase(ConnectionMultiplexer multiplexer, object? asyncState) { this.multiplexer = multiplexer; this.asyncState = asyncState; @@ -40,28 +40,36 @@ public virtual Task PingAsync(CommandFlags flags = CommandFlags.None) public void WaitAll(params Task[] tasks) => multiplexer.WaitAll(tasks); - internal virtual Task ExecuteAsync(Message message, ResultProcessor processor, ServerEndPoint server = null) + internal virtual Task ExecuteAsync(Message? message, ResultProcessor? processor, T defaultValue, ServerEndPoint? server = null) { - if (message == null) return CompletedTask.Default(asyncState); + if (message is null) return CompletedTask.FromDefault(defaultValue, asyncState); + multiplexer.CheckMessage(message); + return multiplexer.ExecuteAsyncImpl(message, processor, asyncState, server, defaultValue); + } + + internal virtual Task ExecuteAsync(Message? message, ResultProcessor? processor, ServerEndPoint? server = null) + { + if (message is null) return CompletedTask.Default(asyncState); multiplexer.CheckMessage(message); return multiplexer.ExecuteAsyncImpl(message, processor, asyncState, server); } - internal virtual T ExecuteSync(Message message, ResultProcessor processor, ServerEndPoint server = null) + [return: NotNullIfNotNull("defaultValue")] + internal virtual T? ExecuteSync(Message? message, ResultProcessor? processor, ServerEndPoint? server = null, T? defaultValue = default) { - if (message == null) return default(T); // no-op + if (message is null) return defaultValue; // no-op multiplexer.CheckMessage(message); - return multiplexer.ExecuteSyncImpl(message, processor, server); + return multiplexer.ExecuteSyncImpl(message, processor, server, defaultValue); } - internal virtual RedisFeatures GetFeatures(in RedisKey key, CommandFlags flags, out ServerEndPoint server) + internal virtual RedisFeatures GetFeatures(in RedisKey key, CommandFlags flags, RedisCommand command, out ServerEndPoint? server) { - server = multiplexer.SelectServer(RedisCommand.PING, flags, key); + server = multiplexer.SelectServer(command, flags, key); var version = server == null ? multiplexer.RawConfig.DefaultVersion : server.Version; return new RedisFeatures(version); } - protected void WhenAlwaysOrExists(When when) + protected static void WhenAlwaysOrExists(When when) { switch (when) { @@ -73,7 +81,7 @@ protected void WhenAlwaysOrExists(When when) } } - protected void WhenAlwaysOrExistsOrNotExists(When when) + protected static void WhenAlwaysOrExistsOrNotExists(When when) { switch (when) { @@ -86,7 +94,7 @@ protected void WhenAlwaysOrExistsOrNotExists(When when) } } - protected void WhenAlwaysOrNotExists(When when) + protected static void WhenAlwaysOrNotExists(When when) { switch (when) { @@ -109,7 +117,7 @@ private ResultProcessor.TimingProcessor.TimerMessage GetTimerMessage(CommandFlag if (map.IsAvailable(RedisCommand.ECHO)) return ResultProcessor.TimingProcessor.CreateMessage(-1, flags, RedisCommand.ECHO, RedisLiterals.PING); // as our fallback, we'll do something odd... we'll treat a key like a value, out of sheer desperation - // note: this usually means: twemproxy - in which case we're fine anyway, since the proxy does the routing + // note: this usually means: twemproxy/envoyproxy - in which case we're fine anyway, since the proxy does the routing return ResultProcessor.TimingProcessor.CreateMessage(0, flags, RedisCommand.EXISTS, (RedisValue)multiplexer.UniqueId); } @@ -123,7 +131,7 @@ internal static bool IsNil(in RedisValue pattern) { if (pattern.IsNullOrEmpty) return true; if (pattern.IsInteger) return false; - byte[] rawValue = pattern; + byte[] rawValue = pattern!; return rawValue.Length == 1 && rawValue[0] == '*'; } } diff --git a/src/StackExchange.Redis/RedisBatch.cs b/src/StackExchange.Redis/RedisBatch.cs index d364dfa18..0ef97f365 100644 --- a/src/StackExchange.Redis/RedisBatch.cs +++ b/src/StackExchange.Redis/RedisBatch.cs @@ -4,11 +4,11 @@ namespace StackExchange.Redis { - internal class RedisBatch : RedisDatabase, IBatch + internal sealed class RedisBatch : RedisDatabase, IBatch { - private List pending; + private List? pending; - public RedisBatch(RedisDatabase wrapped, object asyncState) : base(wrapped.multiplexer, wrapped.Database, asyncState ?? wrapped.AsyncState) {} + public RedisBatch(RedisDatabase wrapped, object? asyncState) : base(wrapped.multiplexer, wrapped.Database, asyncState ?? wrapped.AsyncState) { } public void Execute() { @@ -20,28 +20,28 @@ public void Execute() var byBridge = new Dictionary>(); // optimisation: assume most things are in a single bridge - PhysicalBridge lastBridge = null; - List lastList = null; + PhysicalBridge? lastBridge = null; + List? lastList = null; foreach (var message in snapshot) { var server = multiplexer.SelectServer(message); if (server == null) { - FailNoServer(snapshot); + FailNoServer(multiplexer, snapshot); throw ExceptionFactory.NoConnectionAvailable(multiplexer, message, server); } - var bridge = server.GetBridge(message.Command); + var bridge = server.GetBridge(message); if (bridge == null) { - FailNoServer(snapshot); + FailNoServer(multiplexer, snapshot); throw ExceptionFactory.NoConnectionAvailable(multiplexer, message, server); } // identity a list - List list; + List? list; if (bridge == lastBridge) { - list = lastList; + list = lastList!; } else if (!byBridge.TryGetValue(bridge, out list)) { @@ -58,21 +58,21 @@ public void Execute() { if (!pair.Key.TryEnqueue(pair.Value, pair.Key.ServerEndPoint.IsReplica)) { - FailNoServer(pair.Value); + FailNoServer(multiplexer, pair.Value); } } } - internal override Task ExecuteAsync(Message message, ResultProcessor processor, ServerEndPoint server = null) + internal override Task ExecuteAsync(Message? message, ResultProcessor? processor, T defaultValue, ServerEndPoint? server = null) { - if (message == null) return CompletedTask.Default(asyncState); + if (message == null) return CompletedTask.FromDefault(defaultValue, asyncState); multiplexer.CheckMessage(message); // prepare the inner command as a task Task task; if (message.IsFireAndForget) { - task = CompletedTask.Default(null); // F+F explicitly does not get async-state + task = CompletedTask.FromDefault(defaultValue, null); // F+F explicitly does not get async-state } else { @@ -82,21 +82,42 @@ internal override Task ExecuteAsync(Message message, ResultProcessor pr } // store it - (pending ?? (pending = new List())).Add(message); - return task; + (pending ??= new List()).Add(message); + return task!; } - internal override T ExecuteSync(Message message, ResultProcessor processor, ServerEndPoint server = null) + internal override Task ExecuteAsync(Message? message, ResultProcessor? processor, ServerEndPoint? server = null) where T : default { - throw new NotSupportedException("ExecuteSync cannot be used inside a batch"); + if (message == null) return CompletedTask.Default(asyncState); + multiplexer.CheckMessage(message); + + // prepare the inner command as a task + Task task; + if (message.IsFireAndForget) + { + task = CompletedTask.Default(null); // F+F explicitly does not get async-state + } + else + { + var source = TaskResultBox.Create(out var tcs, asyncState); + task = tcs.Task; + message.SetSource(source!, processor); + } + + // store it + (pending ??= new List()).Add(message); + return task; } - private void FailNoServer(List messages) + internal override T ExecuteSync(Message? message, ResultProcessor? processor, ServerEndPoint? server = null, T? defaultValue = default) where T : default + => throw new NotSupportedException("ExecuteSync cannot be used inside a batch"); + + private static void FailNoServer(ConnectionMultiplexer muxer, List messages) { if (messages == null) return; - foreach(var msg in messages) + foreach (var msg in messages) { - msg.Fail(ConnectionFailureType.UnableToResolvePhysicalConnection, null, "unable to write batch"); + msg.Fail(ConnectionFailureType.UnableToResolvePhysicalConnection, null, "unable to write batch", muxer); msg.Complete(); } } diff --git a/src/StackExchange.Redis/RedisChannel.cs b/src/StackExchange.Redis/RedisChannel.cs index b830e6f27..c3acf1493 100644 --- a/src/StackExchange.Redis/RedisChannel.cs +++ b/src/StackExchange.Redis/RedisChannel.cs @@ -1,188 +1,462 @@ using System; +using System.Buffers; +using System.Diagnostics; +using System.Runtime.CompilerServices; using System.Text; namespace StackExchange.Redis { /// - /// Represents a pub/sub channel name + /// Represents a pub/sub channel name. /// public readonly struct RedisChannel : IEquatable { - internal readonly byte[] Value; - internal readonly bool IsPatternBased; + internal readonly byte[]? Value; + + internal ReadOnlySpan Span => Value is null ? default : Value.AsSpan(); + + internal ReadOnlySpan RoutingSpan + { + get + { + var span = Span; + if ((Options & (RedisChannelOptions.KeyRouted | RedisChannelOptions.IgnoreChannelPrefix | + RedisChannelOptions.Sharded | RedisChannelOptions.MultiNode | RedisChannelOptions.Pattern)) + == (RedisChannelOptions.KeyRouted | RedisChannelOptions.IgnoreChannelPrefix)) + { + // this *could* be a single-key __keyspace@{db}__:{key} subscription, in which case we want to use the key + // part for routing, but to avoid overhead we'll only even look if the channel starts with an underscore + if (span.Length >= 16 && span[0] == (byte)'_') span = StripKeySpacePrefix(span); + } + return span; + } + } + + internal static ReadOnlySpan StripKeySpacePrefix(ReadOnlySpan span) + { + if (span.Length >= 16 && span.StartsWith("__keyspace@"u8)) + { + var subspan = span.Slice(12); + int end = subspan.IndexOf("__:"u8); + if (end >= 0) return subspan.Slice(end + 3); + } + return span; + } + + internal readonly RedisChannelOptions Options; + + [Flags] + internal enum RedisChannelOptions + { + None = 0, + Pattern = 1 << 0, + Sharded = 1 << 1, + KeyRouted = 1 << 2, + MultiNode = 1 << 3, + IgnoreChannelPrefix = 1 << 4, + } + + // we don't consider Routed for equality - it's an implementation detail, not a fundamental feature + private const RedisChannelOptions EqualityMask = + ~(RedisChannelOptions.KeyRouted | RedisChannelOptions.MultiNode | RedisChannelOptions.IgnoreChannelPrefix); + + internal RedisCommand GetPublishCommand() + { + return (Options & (RedisChannelOptions.Sharded | RedisChannelOptions.MultiNode)) switch + { + RedisChannelOptions.None => RedisCommand.PUBLISH, + RedisChannelOptions.Sharded => RedisCommand.SPUBLISH, + _ => ThrowKeyRouted(), + }; + + static RedisCommand ThrowKeyRouted() => throw new InvalidOperationException("Publishing is not supported for multi-node channels"); + } + + /// + /// Should we use cluster routing for this channel? This applies *either* to sharded (SPUBLISH) scenarios, + /// or to scenarios using . + /// + internal bool IsKeyRouted => (Options & RedisChannelOptions.KeyRouted) != 0; + + /// + /// Should this channel be subscribed to on all nodes? This is only relevant for cluster scenarios and keyspace notifications. + /// + internal bool IsMultiNode => (Options & RedisChannelOptions.MultiNode) != 0; + + /// + /// Should the channel prefix be ignored when writing this channel. + /// + internal bool IgnoreChannelPrefix => (Options & RedisChannelOptions.IgnoreChannelPrefix) != 0; /// - /// Indicates whether the channel-name is either null or a zero-length value + /// Indicates whether the channel-name is either null or a zero-length value. /// public bool IsNullOrEmpty => Value == null || Value.Length == 0; + /// + /// Indicates whether this channel represents a wildcard pattern (see PSUBSCRIBE). + /// + public bool IsPattern => (Options & RedisChannelOptions.Pattern) != 0; + + /// + /// Indicates whether this channel represents a shard channel (see SSUBSCRIBE). + /// + public bool IsSharded => (Options & RedisChannelOptions.Sharded) != 0; + internal bool IsNull => Value == null; /// - /// Create a new redis channel from a buffer, explicitly controlling the pattern mode + /// Indicates whether channels should use when no + /// is specified; this is enabled by default, but can be disabled to avoid unexpected wildcard scenarios. + /// + public static bool UseImplicitAutoPattern + { + get => s_DefaultPatternMode == PatternMode.Auto; + set => s_DefaultPatternMode = value ? PatternMode.Auto : PatternMode.Literal; + } + + private static PatternMode s_DefaultPatternMode = PatternMode.Auto; + + /// + /// Creates a new that does not act as a wildcard subscription. In cluster + /// environments, this channel will be freely routed to any applicable server - different client nodes + /// will generally connect to different servers; this is suitable for distributing pub/sub in scenarios with + /// very few channels. In non-cluster environments, routing is not a consideration. + /// + public static RedisChannel Literal(string value) => new(value, RedisChannelOptions.None); + + /// + /// Creates a new that does not act as a wildcard subscription. In cluster + /// environments, this channel will be freely routed to any applicable server - different client nodes + /// will generally connect to different servers; this is suitable for distributing pub/sub in scenarios with + /// very few channels. In non-cluster environments, routing is not a consideration. + /// + public static RedisChannel Literal(byte[] value) => new(value, RedisChannelOptions.None); + + /// + /// In cluster environments, this channel will be routed using similar rules to , which is suitable + /// for distributing pub/sub in scenarios with lots of channels. In non-cluster environments, routing is not + /// a consideration. + /// + /// Note that channels from Sharded are always routed. + public RedisChannel WithKeyRouting() + { + if (IsMultiNode) Throw(); + return new(Value, Options | RedisChannelOptions.KeyRouted); + + static void Throw() => throw new InvalidOperationException("Key routing is not supported for multi-node channels"); + } + + /// + /// Creates a new that acts as a wildcard subscription. In cluster + /// environments, this channel will be freely routed to any applicable server - different client nodes + /// will generally connect to different servers; this is suitable for distributing pub/sub in scenarios with + /// very few channels. In non-cluster environments, routing is not a consideration. + /// + public static RedisChannel Pattern(string value) => new(value, RedisChannelOptions.Pattern); + + /// + /// Creates a new that acts as a wildcard subscription. In cluster + /// environments, this channel will be freely routed to any applicable server - different client nodes + /// will generally connect to different servers; this is suitable for distributing pub/sub in scenarios with + /// very few channels. In non-cluster environments, routing is not a consideration. + /// + public static RedisChannel Pattern(byte[] value) => new(value, RedisChannelOptions.Pattern); + + /// + /// Create a new redis channel from a buffer, explicitly controlling the pattern mode. /// /// The name of the channel to create. /// The mode for name matching. - public RedisChannel(byte[] value, PatternMode mode) : this(value, DeterminePatternBased(value, mode)) {} + public RedisChannel(byte[]? value, PatternMode mode) : this( + value, DeterminePatternBased(value, mode) ? RedisChannelOptions.Pattern : RedisChannelOptions.None) + { + } /// - /// Create a new redis channel from a string, explicitly controlling the pattern mode + /// Create a new redis channel from a string, explicitly controlling the pattern mode. /// /// The string name of the channel to create. /// The mode for name matching. - public RedisChannel(string value, PatternMode mode) : this(value == null ? null : Encoding.UTF8.GetBytes(value), mode) {} + // ReSharper disable once ConditionIsAlwaysTrueOrFalseAccordingToNullableAPIContract + public RedisChannel(string value, PatternMode mode) : this( + // ReSharper disable once ConditionIsAlwaysTrueOrFalseAccordingToNullableAPIContract + value is null ? null : Encoding.UTF8.GetBytes(value), mode) + { + } + + /// + /// Create a new redis channel from a buffer, representing a sharded channel. In cluster + /// environments, this channel will be routed using similar rules to , which is suitable + /// for distributing pub/sub in scenarios with lots of channels. In non-cluster environments, routing is not + /// a consideration. + /// + /// The name of the channel to create. + /// Note that sharded subscriptions are completely separate to regular subscriptions; subscriptions + /// using sharded channels must also be published with sharded channels (and vice versa). + public static RedisChannel Sharded(byte[]? value) => + new(value, RedisChannelOptions.Sharded | RedisChannelOptions.KeyRouted); + + /// + /// Create a new redis channel from a string, representing a sharded channel. In cluster + /// environments, this channel will be routed using similar rules to , which is suitable + /// for distributing pub/sub in scenarios with lots of channels. In non-cluster environments, routing is not + /// a consideration. + /// + /// The string name of the channel to create. + /// Note that sharded subscriptions are completely separate to regular subscriptions; subscriptions + /// using sharded channels must also be published with sharded channels (and vice versa). + public static RedisChannel Sharded(string value) => + new(value, RedisChannelOptions.Sharded | RedisChannelOptions.KeyRouted); + + /// + /// Create a key-notification channel for a single key in a single database. + /// + public static RedisChannel KeySpaceSingleKey(in RedisKey key, int database) + // note we can allow patterns, because we aren't using PSUBSCRIBE + => BuildKeySpaceChannel(key, database, RedisChannelOptions.KeyRouted, default, false, true); + + /// + /// Create a key-notification channel for a pattern, optionally in a specified database. + /// + public static RedisChannel KeySpacePattern(in RedisKey pattern, int? database = null) + => BuildKeySpaceChannel(pattern, database, RedisChannelOptions.Pattern | RedisChannelOptions.MultiNode, default, appendStar: pattern.IsNull, allowKeyPatterns: true); - private RedisChannel(byte[] value, bool isPatternBased) +#pragma warning disable RS0026 // competing overloads - disambiguated via OverloadResolutionPriority + /// + /// Create a key-notification channel using a raw prefix, optionally in a specified database. + /// + public static RedisChannel KeySpacePrefix(in RedisKey prefix, int? database = null) { - Value = value; - IsPatternBased = isPatternBased; + if (prefix.IsEmpty) Throw(); + return BuildKeySpaceChannel(prefix, database, RedisChannelOptions.Pattern | RedisChannelOptions.MultiNode, default, true, false); + static void Throw() => throw new ArgumentNullException(nameof(prefix)); } - private static bool DeterminePatternBased(byte[] value, PatternMode mode) + /// + /// Create a key-notification channel using a raw prefix, optionally in a specified database. + /// + [OverloadResolutionPriority(1)] + public static RedisChannel KeySpacePrefix(ReadOnlySpan prefix, int? database = null) { - switch (mode) + if (prefix.IsEmpty) Throw(); + return BuildKeySpaceChannel(RedisKey.Null, database, RedisChannelOptions.Pattern | RedisChannelOptions.MultiNode, prefix, true, false); + static void Throw() => throw new ArgumentNullException(nameof(prefix)); + } +#pragma warning restore RS0026 // competing overloads - disambiguated via OverloadResolutionPriority + + private const int DatabaseScratchBufferSize = 16; // largest non-negative int32 is 10 digits + + private static ReadOnlySpan AppendDatabase(Span target, int? database, RedisChannelOptions options) + { + if (database is null) { - case PatternMode.Auto: - return value != null && Array.IndexOf(value, (byte)'*') >= 0; - case PatternMode.Literal: return false; - case PatternMode.Pattern: return true; - default: - throw new ArgumentOutOfRangeException(nameof(mode)); + if ((options & RedisChannelOptions.Pattern) == 0) throw new ArgumentNullException(nameof(database)); + return "*"u8; // don't worry about the inbound scratch buffer, this is fine + } + else + { + var db32 = database.GetValueOrDefault(); + if (db32 == 0) return "0"u8; // so common, we might as well special case + if (db32 < 0) throw new ArgumentOutOfRangeException(nameof(database)); + return target.Slice(0, Format.FormatInt32(db32, target)); } } /// - /// Indicate whether two channel names are not equal + /// Create an event-notification channel for a given event type, optionally in a specified database. + /// +#pragma warning disable RS0027 + public static RedisChannel KeyEvent(KeyNotificationType type, int? database = null) +#pragma warning restore RS0027 + => KeyEvent(KeyNotificationTypeMetadata.GetRawBytes(type), database); + + /// + /// Create an event-notification channel for a given event type, optionally in a specified database. + /// + /// This API is intended for use with custom/unknown event types; for well-known types, use . + public static RedisChannel KeyEvent(ReadOnlySpan type, int? database) + { + if (type.IsEmpty) throw new ArgumentNullException(nameof(type)); + + RedisChannelOptions options = RedisChannelOptions.MultiNode; + if (database is null) options |= RedisChannelOptions.Pattern; + var db = AppendDatabase(stackalloc byte[DatabaseScratchBufferSize], database, options); + + // __keyevent@{db}__:{type} + var arr = new byte[14 + db.Length + type.Length]; + + var target = AppendAndAdvance(arr.AsSpan(), "__keyevent@"u8); + target = AppendAndAdvance(target, db); + target = AppendAndAdvance(target, "__:"u8); + target = AppendAndAdvance(target, type); + Debug.Assert(target.IsEmpty); // should have calculated length correctly + + return new RedisChannel(arr, options | RedisChannelOptions.IgnoreChannelPrefix); + } + + private static Span AppendAndAdvance(Span target, scoped ReadOnlySpan value) + { + value.CopyTo(target); + return target.Slice(value.Length); + } + + private static RedisChannel BuildKeySpaceChannel(in RedisKey key, int? database, RedisChannelOptions options, ReadOnlySpan suffix, bool appendStar, bool allowKeyPatterns) + { + int fullKeyLength = key.TotalLength() + suffix.Length + (appendStar ? 1 : 0); + if (appendStar & (options & RedisChannelOptions.Pattern) == 0) throw new ArgumentNullException(nameof(key)); + if (fullKeyLength == 0) throw new ArgumentOutOfRangeException(nameof(key)); + + var db = AppendDatabase(stackalloc byte[DatabaseScratchBufferSize], database, options); + + // __keyspace@{db}__:{key}[*] + var arr = new byte[14 + db.Length + fullKeyLength]; + + var target = AppendAndAdvance(arr.AsSpan(), "__keyspace@"u8); + target = AppendAndAdvance(target, db); + target = AppendAndAdvance(target, "__:"u8); + var keySpan = target; // remember this for if we need to check for patterns + var keyLen = key.CopyTo(target); + target = target.Slice(keyLen); + target = AppendAndAdvance(target, suffix); + if (!allowKeyPatterns) + { + keySpan = keySpan.Slice(0, keyLen + suffix.Length); + if (keySpan.IndexOfAny((byte)'*', (byte)'?', (byte)'[') >= 0) ThrowPattern(); + } + if (appendStar) + { + target[0] = (byte)'*'; + target = target.Slice(1); + } + Debug.Assert(target.IsEmpty, "length calculated incorrectly"); + return new RedisChannel(arr, options | RedisChannelOptions.IgnoreChannelPrefix); + + static void ThrowPattern() => throw new ArgumentException("The supplied key contains pattern characters, but patterns are not supported in this context."); + } + + internal RedisChannel(byte[]? value, RedisChannelOptions options) + { + Value = value; + Options = options; + } + + internal RedisChannel(string? value, RedisChannelOptions options) + { + Value = value is null ? null : Encoding.UTF8.GetBytes(value); + Options = options; + } + + private static bool DeterminePatternBased(byte[]? value, PatternMode mode) => mode switch + { + PatternMode.Auto => value != null && Array.IndexOf(value, (byte)'*') >= 0, + PatternMode.Literal => false, + PatternMode.Pattern => true, + _ => throw new ArgumentOutOfRangeException(nameof(mode)), + }; + + /// + /// Indicate whether two channel names are not equal. /// /// The first to compare. /// The second to compare. -#pragma warning disable RCS1231 // Make parameter ref read-only. - public API public static bool operator !=(RedisChannel x, RedisChannel y) => !(x == y); -#pragma warning restore RCS1231 // Make parameter ref read-only. /// - /// Indicate whether two channel names are not equal + /// Indicate whether two channel names are not equal. /// /// The first to compare. /// The second to compare. -#pragma warning disable RCS1231 // Make parameter ref read-only. - public API public static bool operator !=(string x, RedisChannel y) => !(x == y); -#pragma warning restore RCS1231 // Make parameter ref read-only. /// - /// Indicate whether two channel names are not equal + /// Indicate whether two channel names are not equal. /// /// The first to compare. /// The second to compare. -#pragma warning disable RCS1231 // Make parameter ref read-only. - public API public static bool operator !=(byte[] x, RedisChannel y) => !(x == y); -#pragma warning restore RCS1231 // Make parameter ref read-only. /// - /// Indicate whether two channel names are not equal + /// Indicate whether two channel names are not equal. /// /// The first to compare. /// The second to compare. -#pragma warning disable RCS1231 // Make parameter ref read-only. - public API public static bool operator !=(RedisChannel x, string y) => !(x == y); -#pragma warning restore RCS1231 // Make parameter ref read-only. /// - /// Indicate whether two channel names are not equal + /// Indicate whether two channel names are not equal. /// /// The first to compare. /// The second to compare. -#pragma warning disable RCS1231 // Make parameter ref read-only. - public API public static bool operator !=(RedisChannel x, byte[] y) => !(x == y); -#pragma warning restore RCS1231 // Make parameter ref read-only. /// - /// Indicate whether two channel names are equal + /// Indicate whether two channel names are equal. /// /// The first to compare. /// The second to compare. -#pragma warning disable RCS1231 // Make parameter ref read-only. - public API public static bool operator ==(RedisChannel x, RedisChannel y) => - x.IsPatternBased == y.IsPatternBased && RedisValue.Equals(x.Value, y.Value); -#pragma warning restore RCS1231 // Make parameter ref read-only. + (x.Options & EqualityMask) == (y.Options & EqualityMask) + && RedisValue.Equals(x.Value, y.Value); /// - /// Indicate whether two channel names are equal + /// Indicate whether two channel names are equal. /// /// The first to compare. /// The second to compare. -#pragma warning disable RCS1231 // Make parameter ref read-only. - public API public static bool operator ==(string x, RedisChannel y) => - RedisValue.Equals(x == null ? null : Encoding.UTF8.GetBytes(x), y.Value); -#pragma warning restore RCS1231 // Make parameter ref read-only. + // ReSharper disable once ConditionIsAlwaysTrueOrFalseAccordingToNullableAPIContract + RedisValue.Equals(x is null ? null : Encoding.UTF8.GetBytes(x), y.Value); /// - /// Indicate whether two channel names are equal + /// Indicate whether two channel names are equal. /// /// The first to compare. /// The second to compare. -#pragma warning disable RCS1231 // Make parameter ref read-only. - public API public static bool operator ==(byte[] x, RedisChannel y) => RedisValue.Equals(x, y.Value); -#pragma warning restore RCS1231 // Make parameter ref read-only. /// - /// Indicate whether two channel names are equal + /// Indicate whether two channel names are equal. /// /// The first to compare. /// The second to compare. -#pragma warning disable RCS1231 // Make parameter ref read-only. - public API public static bool operator ==(RedisChannel x, string y) => - RedisValue.Equals(x.Value, y == null ? null : Encoding.UTF8.GetBytes(y)); -#pragma warning restore RCS1231 // Make parameter ref read-only. + // ReSharper disable once ConditionIsAlwaysTrueOrFalseAccordingToNullableAPIContract + RedisValue.Equals(x.Value, y is null ? null : Encoding.UTF8.GetBytes(y)); /// - /// Indicate whether two channel names are equal + /// Indicate whether two channel names are equal. /// /// The first to compare. /// The second to compare. -#pragma warning disable RCS1231 // Make parameter ref read-only. - public API public static bool operator ==(RedisChannel x, byte[] y) => RedisValue.Equals(x.Value, y); -#pragma warning restore RCS1231 // Make parameter ref read-only. /// - /// See Object.Equals + /// See . /// /// The to compare to. - public override bool Equals(object obj) + public override bool Equals(object? obj) => obj switch { - if (obj is RedisChannel rcObj) - { - return RedisValue.Equals(Value, (rcObj).Value); - } - if (obj is string sObj) - { - return RedisValue.Equals(Value, Encoding.UTF8.GetBytes(sObj)); - } - if (obj is byte[] bObj) - { - return RedisValue.Equals(Value, bObj); - } - return false; - } + RedisChannel rcObj => RedisValue.Equals(Value, rcObj.Value), + string sObj => RedisValue.Equals(Value, Encoding.UTF8.GetBytes(sObj)), + byte[] bObj => RedisValue.Equals(Value, bObj), + _ => false, + }; /// - /// Indicate whether two channel names are equal + /// Indicate whether two channel names are equal. /// /// The to compare to. - public bool Equals(RedisChannel other) => IsPatternBased == other.IsPatternBased && RedisValue.Equals(Value, other.Value); + public bool Equals(RedisChannel other) => (Options & EqualityMask) == (other.Options & EqualityMask) + && RedisValue.Equals(Value, other.Value); - /// - /// See Object.GetHashCode - /// - public override int GetHashCode() => RedisValue.GetHashCode(Value) + (IsPatternBased ? 1 : 0); + /// + public override int GetHashCode() => RedisValue.GetHashCode(Value) ^ (int)(Options & EqualityMask); /// - /// Obtains a string representation of the channel name + /// Obtains a string representation of the channel name. /// - public override string ToString() - { - return ((string)this) ?? "(null)"; - } + public override string ToString() => ((string?)this) ?? "(null)"; internal static bool AssertStarts(byte[] value, byte[] expected) { @@ -198,73 +472,98 @@ internal void AssertNotNull() if (IsNull) throw new ArgumentException("A null key is not valid in this context"); } - internal RedisChannel Clone() => (byte[])Value?.Clone(); + internal RedisChannel Clone() + { + if (Value is null || Value.Length == 0) + { + // no need to duplicate anything + return this; + } + var copy = (byte[])Value.Clone(); // defensive array copy + return new RedisChannel(copy, Options); + } /// - /// The matching pattern for this channel + /// The matching pattern for this channel. /// public enum PatternMode { /// - /// Will be treated as a pattern if it includes * + /// Will be treated as a pattern if it includes *. /// Auto = 0, + /// - /// Never a pattern + /// Never a pattern. /// Literal = 1, + /// - /// Always a pattern + /// Always a pattern. /// - Pattern = 2 + Pattern = 2, } /// /// Create a channel name from a . /// /// The string to get a channel from. + [Obsolete("It is preferable to explicitly specify a " + nameof(PatternMode) + ", or use the " + nameof(Literal) + "/" + nameof(Pattern) + " methods", error: false)] public static implicit operator RedisChannel(string key) { - if (key == null) return default(RedisChannel); - return new RedisChannel(Encoding.UTF8.GetBytes(key), PatternMode.Auto); + // ReSharper disable once ConditionIsAlwaysTrueOrFalseAccordingToNullableAPIContract + if (key is null) return default; + return new RedisChannel(Encoding.UTF8.GetBytes(key), s_DefaultPatternMode); } /// - /// Create a channel name from a . + /// Create a channel name from a byte[]. /// /// The byte array to get a channel from. - public static implicit operator RedisChannel(byte[] key) - { - if (key == null) return default(RedisChannel); - return new RedisChannel(key, PatternMode.Auto); - } + [Obsolete("It is preferable to explicitly specify a " + nameof(PatternMode) + ", or use the " + nameof(Literal) + "/" + nameof(Pattern) + " methods", error: false)] + public static implicit operator RedisChannel(byte[]? key) + => key is null ? default : new RedisChannel(key, s_DefaultPatternMode); /// - /// Obtain the channel name as a . + /// Obtain the channel name as a byte[]. /// /// The channel to get a byte[] from. -#pragma warning disable RCS1231 // Make parameter ref read-only. - public API - public static implicit operator byte[] (RedisChannel key) => key.Value; -#pragma warning restore RCS1231 // Make parameter ref read-only. + public static implicit operator byte[]?(RedisChannel key) => key.Value; /// /// Obtain the channel name as a . /// /// The channel to get a string from. -#pragma warning disable RCS1231 // Make parameter ref read-only. - public API - public static implicit operator string (RedisChannel key) -#pragma warning restore RCS1231 // Make parameter ref read-only. + public static implicit operator string?(RedisChannel key) { var arr = key.Value; - if (arr == null) return null; + if (arr is null) + { + return null; + } try { return Encoding.UTF8.GetString(arr); } - catch + catch (Exception e) when // Only catch exception thrown by Encoding.UTF8.GetString + (e is DecoderFallbackException or ArgumentException or ArgumentNullException) { - return BitConverter.ToString(arr); + return BitConverter.ToString(arr); } } + +#if DEBUG + // these exist *purely* to ensure that we never add them later *without* + // giving due consideration to the default pattern mode (UseImplicitAutoPattern) + // (since we don't ship them, we don't need them in release) + [Obsolete("Watch for " + nameof(UseImplicitAutoPattern), error: true)] + // ReSharper disable once UnusedMember.Local + // ReSharper disable once UnusedParameter.Local + private RedisChannel(string value) => throw new NotSupportedException(); + [Obsolete("Watch for " + nameof(UseImplicitAutoPattern), error: true)] + // ReSharper disable once UnusedMember.Local + // ReSharper disable once UnusedParameter.Local + private RedisChannel(byte[]? value) => throw new NotSupportedException(); +#endif } } diff --git a/src/StackExchange.Redis/RedisDatabase.Strings.cs b/src/StackExchange.Redis/RedisDatabase.Strings.cs new file mode 100644 index 000000000..6fcb7dd3b --- /dev/null +++ b/src/StackExchange.Redis/RedisDatabase.Strings.cs @@ -0,0 +1,81 @@ +using System; +using System.Runtime.CompilerServices; +using System.Threading.Tasks; + +namespace StackExchange.Redis; + +internal partial class RedisDatabase +{ + public bool StringDelete(RedisKey key, ValueCondition when, CommandFlags flags = CommandFlags.None) + { + var msg = GetStringDeleteMessage(key, when, flags); + return ExecuteSync(msg, ResultProcessor.Boolean); + } + + public Task StringDeleteAsync(RedisKey key, ValueCondition when, CommandFlags flags = CommandFlags.None) + { + var msg = GetStringDeleteMessage(key, when, flags); + return ExecuteAsync(msg, ResultProcessor.Boolean); + } + + private Message GetStringDeleteMessage(in RedisKey key, in ValueCondition when, CommandFlags flags, [CallerMemberName] string? operation = null) + { + switch (when.Kind) + { + case ValueCondition.ConditionKind.Always: + case ValueCondition.ConditionKind.Exists: + return Message.Create(Database, flags, RedisCommand.DEL, key); + case ValueCondition.ConditionKind.ValueEquals: + case ValueCondition.ConditionKind.ValueNotEquals: + case ValueCondition.ConditionKind.DigestEquals: + case ValueCondition.ConditionKind.DigestNotEquals: + return Message.Create(Database, flags, RedisCommand.DELEX, key, when); + default: + when.ThrowInvalidOperation(operation); + goto case ValueCondition.ConditionKind.Always; // not reached + } + } + + public ValueCondition? StringDigest(RedisKey key, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.DIGEST, key); + return ExecuteSync(msg, ResultProcessor.Digest); + } + + public Task StringDigestAsync(RedisKey key, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.DIGEST, key); + return ExecuteAsync(msg, ResultProcessor.Digest); + } + + public Task StringSetAsync(RedisKey key, RedisValue value, Expiration expiry, ValueCondition when, CommandFlags flags = CommandFlags.None) + { + var msg = GetStringSetMessage(key, value, expiry, when, flags); + return ExecuteAsync(msg, ResultProcessor.Boolean); + } + + public bool StringSet(RedisKey key, RedisValue value, Expiration expiry, ValueCondition when, CommandFlags flags = CommandFlags.None) + { + var msg = GetStringSetMessage(key, value, expiry, when, flags); + return ExecuteSync(msg, ResultProcessor.Boolean); + } + + private Message GetStringSetMessage(in RedisKey key, in RedisValue value, Expiration expiry, in ValueCondition when, CommandFlags flags, [CallerMemberName] string? operation = null) + { + switch (when.Kind) + { + case ValueCondition.ConditionKind.Exists: + case ValueCondition.ConditionKind.NotExists: + case ValueCondition.ConditionKind.Always: + return GetStringSetMessage(key, value, expiry: expiry, when: when.AsWhen(), flags: flags); + case ValueCondition.ConditionKind.ValueEquals: + case ValueCondition.ConditionKind.ValueNotEquals: + case ValueCondition.ConditionKind.DigestEquals: + case ValueCondition.ConditionKind.DigestNotEquals: + return Message.Create(Database, flags, RedisCommand.SET, key, value, expiry, when); + default: + when.ThrowInvalidOperation(operation); + goto case ValueCondition.ConditionKind.Always; // not reached + } + } +} diff --git a/src/StackExchange.Redis/RedisDatabase.VectorSets.cs b/src/StackExchange.Redis/RedisDatabase.VectorSets.cs new file mode 100644 index 000000000..f10693dc5 --- /dev/null +++ b/src/StackExchange.Redis/RedisDatabase.VectorSets.cs @@ -0,0 +1,297 @@ +using System; +using System.Collections.Generic; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Threading.Tasks; + +// ReSharper disable once CheckNamespace +namespace StackExchange.Redis; + +internal partial class RedisDatabase +{ + public bool VectorSetAdd( + RedisKey key, + VectorSetAddRequest request, + CommandFlags flags = CommandFlags.None) + { + var msg = request.ToMessage(key, Database, flags); + return ExecuteSync(msg, ResultProcessor.Boolean); + } + + public long VectorSetLength(RedisKey key, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.VCARD, key); + return ExecuteSync(msg, ResultProcessor.Int64); + } + + public int VectorSetDimension(RedisKey key, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.VDIM, key); + return ExecuteSync(msg, ResultProcessor.Int32); + } + + public Lease? VectorSetGetApproximateVector(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.VEMB, key, member); + return ExecuteSync(msg, ResultProcessor.LeaseFloat32); + } + + public string? VectorSetGetAttributesJson(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.VGETATTR, key, member); + return ExecuteSync(msg, ResultProcessor.String); + } + + public VectorSetInfo? VectorSetInfo(RedisKey key, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.VINFO, key); + return ExecuteSync(msg, ResultProcessor.VectorSetInfo); + } + + public bool VectorSetContains(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.VISMEMBER, key, member); + return ExecuteSync(msg, ResultProcessor.Boolean); + } + + public Lease? VectorSetGetLinks(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.VLINKS, key, member); + return ExecuteSync(msg, ResultProcessor.VectorSetLinks); + } + + public Lease? VectorSetGetLinksWithScores(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.VLINKS, key, member, RedisLiterals.WITHSCORES); + return ExecuteSync(msg, ResultProcessor.VectorSetLinksWithScores); + } + + public RedisValue VectorSetRandomMember(RedisKey key, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.VRANDMEMBER, key); + return ExecuteSync(msg, ResultProcessor.RedisValue); + } + + public RedisValue[] VectorSetRandomMembers(RedisKey key, long count, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.VRANDMEMBER, key, count); + return ExecuteSync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); + } + + public bool VectorSetRemove(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.VREM, key, member); + return ExecuteSync(msg, ResultProcessor.Boolean); + } + + public bool VectorSetSetAttributesJson(RedisKey key, RedisValue member, string attributesJson, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.VSETATTR, key, member, attributesJson); + return ExecuteSync(msg, ResultProcessor.Boolean); + } + + public Lease? VectorSetSimilaritySearch( + RedisKey key, + VectorSetSimilaritySearchRequest query, + CommandFlags flags = CommandFlags.None) + { + if (query == null) throw new ArgumentNullException(nameof(query)); + var msg = query.ToMessage(key, Database, flags); + return ExecuteSync(msg, msg.GetResultProcessor()); + } + + // Vector Set async operations + public Task VectorSetAddAsync( + RedisKey key, + VectorSetAddRequest request, + CommandFlags flags = CommandFlags.None) + { + var msg = request.ToMessage(key, Database, flags); + return ExecuteAsync(msg, ResultProcessor.Boolean); + } + + public Task VectorSetLengthAsync(RedisKey key, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.VCARD, key); + return ExecuteAsync(msg, ResultProcessor.Int64); + } + + public Task VectorSetDimensionAsync(RedisKey key, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.VDIM, key); + return ExecuteAsync(msg, ResultProcessor.Int32); + } + + public Task?> VectorSetGetApproximateVectorAsync(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.VEMB, key, member); + return ExecuteAsync(msg, ResultProcessor.LeaseFloat32); + } + + public Task VectorSetGetAttributesJsonAsync(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.VGETATTR, key, member); + return ExecuteAsync(msg, ResultProcessor.String); + } + + public Task VectorSetInfoAsync(RedisKey key, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.VINFO, key); + return ExecuteAsync(msg, ResultProcessor.VectorSetInfo); + } + + public Task VectorSetContainsAsync(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.VISMEMBER, key, member); + return ExecuteAsync(msg, ResultProcessor.Boolean); + } + + public Task?> VectorSetGetLinksAsync(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.VLINKS, key, member); + return ExecuteAsync(msg, ResultProcessor.VectorSetLinks); + } + + public Task?> VectorSetGetLinksWithScoresAsync(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.VLINKS, key, member, RedisLiterals.WITHSCORES); + return ExecuteAsync(msg, ResultProcessor.VectorSetLinksWithScores); + } + + public Task VectorSetRandomMemberAsync(RedisKey key, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.VRANDMEMBER, key); + return ExecuteAsync(msg, ResultProcessor.RedisValue); + } + + public Task VectorSetRandomMembersAsync(RedisKey key, long count, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.VRANDMEMBER, key, count); + return ExecuteAsync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); + } + + public Task VectorSetRemoveAsync(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.VREM, key, member); + return ExecuteAsync(msg, ResultProcessor.Boolean); + } + + public Task VectorSetSetAttributesJsonAsync(RedisKey key, RedisValue member, string attributesJson, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.VSETATTR, key, member, attributesJson); + return ExecuteAsync(msg, ResultProcessor.Boolean); + } + + public Task?> VectorSetSimilaritySearchAsync( + RedisKey key, + VectorSetSimilaritySearchRequest query, + CommandFlags flags = CommandFlags.None) + { + if (query == null) throw new ArgumentNullException(nameof(query)); + var msg = query.ToMessage(key, Database, flags); + return ExecuteAsync(msg, msg.GetResultProcessor()); + } + + private Message GetVectorSetRangeMessage( + in RedisKey key, + in RedisValue start, + in RedisValue end, + long count, + Exclude exclude, + CommandFlags flags) + { + static RedisValue GetTerminator(RedisValue value, Exclude exclude, bool isStart) + { + if (value.IsNull) return isStart ? RedisLiterals.MinusSymbol : RedisLiterals.PlusSymbol; + var mask = isStart ? Exclude.Start : Exclude.Stop; + var isExclusive = (exclude & mask) != 0; + return (isExclusive ? "(" : "[") + value; + } + + var from = GetTerminator(start, exclude, true); + var to = GetTerminator(end, exclude, false); + return count < 0 + ? Message.Create(Database, flags, RedisCommand.VRANGE, key, from, to) + : Message.Create(Database, flags, RedisCommand.VRANGE, key, from, to, count); + } + + public Lease VectorSetRange( + RedisKey key, + RedisValue start = default, + RedisValue end = default, + long count = -1, + Exclude exclude = Exclude.None, + CommandFlags flags = CommandFlags.None) + { + var msg = GetVectorSetRangeMessage(key, start, end, count, exclude, flags); + return ExecuteSync(msg, ResultProcessor.LeaseRedisValue)!; + } + + public Task?> VectorSetRangeAsync( + RedisKey key, + RedisValue start = default, + RedisValue end = default, + long count = -1, + Exclude exclude = Exclude.None, + CommandFlags flags = CommandFlags.None) + { + var msg = GetVectorSetRangeMessage(key, start, end, count, exclude, flags); + return ExecuteAsync(msg, ResultProcessor.LeaseRedisValue); + } + + public IEnumerable VectorSetRangeEnumerate( + RedisKey key, + RedisValue start = default, + RedisValue end = default, + long count = 100, + Exclude exclude = Exclude.None, + CommandFlags flags = CommandFlags.None) + { + // intentionally not using "scan" naming in case a VSCAN command is added later + while (true) + { + using var batch = VectorSetRange(key, start, end, count, exclude, flags); + exclude |= Exclude.Start; // on subsequent iterations, exclude the start (we've already yielded it) + + if (batch is null || batch.IsEmpty) yield break; + var segment = batch.ArraySegment; + for (int i = 0; i < segment.Count; i++) + { + // note side effect: use the last value as the exclusive start of the next batch + yield return start = segment.Array![segment.Offset + i]; + } + if (batch.Length < count || (!end.IsNull && end == start)) yield break; // no need to issue a final query + } + } + + public IAsyncEnumerable VectorSetRangeEnumerateAsync( + RedisKey key, + RedisValue start = default, + RedisValue end = default, + long count = 100, + Exclude exclude = Exclude.None, + CommandFlags flags = CommandFlags.None) + { + // intentionally not using "scan" naming in case a VSCAN command is added later + return WithCancellationSupport(CancellationToken.None); + + async IAsyncEnumerable WithCancellationSupport([EnumeratorCancellation] CancellationToken cancellationToken) + { + while (true) + { + cancellationToken.ThrowIfCancellationRequested(); + using var batch = await VectorSetRangeAsync(key, start, end, count, exclude, flags); + exclude |= Exclude.Start; // on subsequent iterations, exclude the start (we've already yielded it) + + if (batch is null || batch.IsEmpty) yield break; + var segment = batch.ArraySegment; + for (int i = 0; i < segment.Count; i++) + { + // note side effect: use the last value as the exclusive start of the next batch + yield return start = segment.Array![segment.Offset + i]; + } + if (batch.Length < count || (!end.IsNull && end == start)) yield break; // no need to issue a final query + } + } + } +} diff --git a/src/StackExchange.Redis/RedisDatabase.cs b/src/StackExchange.Redis/RedisDatabase.cs index 223a23313..ac3c14bcc 100644 --- a/src/StackExchange.Redis/RedisDatabase.cs +++ b/src/StackExchange.Redis/RedisDatabase.cs @@ -1,38 +1,39 @@ using System; using System.Buffers; using System.Collections.Generic; +using System.Diagnostics; using System.Net; -using System.Text; +using System.Runtime.CompilerServices; using System.Threading.Tasks; using Pipelines.Sockets.Unofficial.Arenas; namespace StackExchange.Redis { - internal class RedisDatabase : RedisBase, IDatabase + internal partial class RedisDatabase : RedisBase, IDatabase { - internal RedisDatabase(ConnectionMultiplexer multiplexer, int db, object asyncState) + internal RedisDatabase(ConnectionMultiplexer multiplexer, int db, object? asyncState) : base(multiplexer, asyncState) { Database = db; } - public object AsyncState => asyncState; + public object? AsyncState => asyncState; public int Database { get; } - public IBatch CreateBatch(object asyncState) + public IBatch CreateBatch(object? asyncState) { if (this is IBatch) throw new NotSupportedException("Nested batches are not supported"); return new RedisBatch(this, asyncState); } - public ITransaction CreateTransaction(object asyncState) + public ITransaction CreateTransaction(object? asyncState) { if (this is IBatch) throw new NotSupportedException("Nested transactions are not supported"); return new RedisTransaction(this, asyncState); } - private ITransaction CreateTransactionIfAvailable(object asyncState) + private ITransaction? CreateTransactionIfAvailable(object? asyncState) { var map = multiplexer.CommandMap; if (!map.IsAvailable(RedisCommand.MULTI) || !map.IsAvailable(RedisCommand.EXEC)) @@ -110,31 +111,31 @@ public Task GeoRemoveAsync(RedisKey key, RedisValue member, CommandFlags f return ExecuteAsync(msg, ResultProcessor.NullableDouble); } - public string[] GeoHash(RedisKey key, RedisValue[] members, CommandFlags flags = CommandFlags.None) + public string?[] GeoHash(RedisKey key, RedisValue[] members, CommandFlags flags = CommandFlags.None) { if (members == null) throw new ArgumentNullException(nameof(members)); var redisValues = new RedisValue[members.Length]; for (var i = 0; i < members.Length; i++) redisValues[i] = members[i]; var msg = Message.Create(Database, flags, RedisCommand.GEOHASH, key, redisValues); - return ExecuteSync(msg, ResultProcessor.StringArray); + return ExecuteSync(msg, ResultProcessor.NullableStringArray, defaultValue: Array.Empty()); } - public Task GeoHashAsync(RedisKey key, RedisValue[] members, CommandFlags flags = CommandFlags.None) + public Task GeoHashAsync(RedisKey key, RedisValue[] members, CommandFlags flags = CommandFlags.None) { if (members == null) throw new ArgumentNullException(nameof(members)); var redisValues = new RedisValue[members.Length]; for (var i = 0; i < members.Length; i++) redisValues[i] = members[i]; var msg = Message.Create(Database, flags, RedisCommand.GEOHASH, key, redisValues); - return ExecuteAsync(msg, ResultProcessor.StringArray); + return ExecuteAsync(msg, ResultProcessor.NullableStringArray, defaultValue: Array.Empty()); } - public string GeoHash(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) + public string? GeoHash(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, RedisCommand.GEOHASH, key, member); return ExecuteSync(msg, ResultProcessor.String); } - public Task GeoHashAsync(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) + public Task GeoHashAsync(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, RedisCommand.GEOHASH, key, member); return ExecuteAsync(msg, ResultProcessor.String); @@ -146,7 +147,7 @@ public Task GeoHashAsync(RedisKey key, RedisValue member, CommandFlags f var redisValues = new RedisValue[members.Length]; for (var i = 0; i < members.Length; i++) redisValues[i] = members[i]; var msg = Message.Create(Database, flags, RedisCommand.GEOPOS, key, redisValues); - return ExecuteSync(msg, ResultProcessor.RedisGeoPositionArray); + return ExecuteSync(msg, ResultProcessor.RedisGeoPositionArray, defaultValue: Array.Empty()); } public Task GeoPositionAsync(RedisKey key, RedisValue[] members, CommandFlags flags = CommandFlags.None) @@ -155,7 +156,7 @@ public Task GeoHashAsync(RedisKey key, RedisValue member, CommandFlags f var redisValues = new RedisValue[members.Length]; for (var i = 0; i < members.Length; i++) redisValues[i] = members[i]; var msg = Message.Create(Database, flags, RedisCommand.GEOPOS, key, redisValues); - return ExecuteAsync(msg, ResultProcessor.RedisGeoPositionArray); + return ExecuteAsync(msg, ResultProcessor.RedisGeoPositionArray, defaultValue: Array.Empty()); } public GeoPosition? GeoPosition(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) @@ -170,16 +171,57 @@ public Task GeoHashAsync(RedisKey key, RedisValue member, CommandFlags f return ExecuteAsync(msg, ResultProcessor.RedisGeoPosition); } - private static readonly RedisValue - WITHCOORD = Encoding.ASCII.GetBytes("WITHCOORD"), - WITHDIST = Encoding.ASCII.GetBytes("WITHDIST"), - WITHHASH = Encoding.ASCII.GetBytes("WITHHASH"), - COUNT = Encoding.ASCII.GetBytes("COUNT"), - ASC = Encoding.ASCII.GetBytes("ASC"), - DESC = Encoding.ASCII.GetBytes("DESC"); + private Message GetGeoSearchMessage(in RedisKey sourceKey, in RedisKey destinationKey, RedisValue? member, double longitude, double latitude, GeoSearchShape shape, int count, bool demandClosest, bool storeDistances, Order? order, GeoRadiusOptions options, CommandFlags flags) + { + var redisValues = new List(15); + if (member != null) + { + redisValues.Add(RedisLiterals.FROMMEMBER); + redisValues.Add(member.Value); + } + else + { + redisValues.Add(RedisLiterals.FROMLONLAT); + redisValues.Add(longitude); + redisValues.Add(latitude); + } + + shape.AddArgs(redisValues); + + if (order != null) + { + redisValues.Add(order.Value.ToLiteral()); + } + if (count >= 0) + { + redisValues.Add(RedisLiterals.COUNT); + redisValues.Add(count); + } + + if (!demandClosest) + { + if (count < 0) + { + throw new ArgumentException($"{nameof(demandClosest)} must be true if you are not limiting the count for a GEOSEARCH"); + } + redisValues.Add(RedisLiterals.ANY); + } + + options.AddArgs(redisValues); + + if (storeDistances) + { + redisValues.Add(RedisLiterals.STOREDIST); + } + + return destinationKey.IsNull + ? Message.Create(Database, flags, RedisCommand.GEOSEARCH, sourceKey, redisValues.ToArray()) + : Message.Create(Database, flags, RedisCommand.GEOSEARCHSTORE, destinationKey, sourceKey, redisValues.ToArray()); + } + private Message GetGeoRadiusMessage(in RedisKey key, RedisValue? member, double longitude, double latitude, double radius, GeoUnit unit, int count, Order? order, GeoRadiusOptions options, CommandFlags flags) { - var redisValues = new List(); + var redisValues = new List(10); RedisCommand command; if (member == null) { @@ -192,24 +234,19 @@ private Message GetGeoRadiusMessage(in RedisKey key, RedisValue? member, double redisValues.Add(member.Value); command = RedisCommand.GEORADIUSBYMEMBER; } + redisValues.Add(radius); - redisValues.Add(StackExchange.Redis.GeoPosition.GetRedisUnit(unit)); - if ((options & GeoRadiusOptions.WithCoordinates) != 0) redisValues.Add(WITHCOORD); - if ((options & GeoRadiusOptions.WithDistance) != 0) redisValues.Add(WITHDIST); - if ((options & GeoRadiusOptions.WithGeoHash) != 0) redisValues.Add(WITHHASH); + redisValues.Add(Redis.GeoPosition.GetRedisUnit(unit)); + options.AddArgs(redisValues); + if (count > 0) { - redisValues.Add(COUNT); + redisValues.Add(RedisLiterals.COUNT); redisValues.Add(count); } if (order != null) { - switch (order.Value) - { - case Order.Ascending: redisValues.Add(ASC); break; - case Order.Descending: redisValues.Add(DESC); break; - default: throw new ArgumentOutOfRangeException(nameof(order)); - } + redisValues.Add(order.Value.ToLiteral()); } return Message.Create(Database, flags, command, key, redisValues.ToArray()); @@ -222,7 +259,7 @@ public GeoRadiusResult[] GeoRadius(RedisKey key, RedisValue member, double radiu { throw new ArgumentException("Member should not be a double, you likely want the GeoRadius(RedisKey, double, double, ...) overload.", nameof(member)); } - return ExecuteSync(GetGeoRadiusMessage(key, member, double.NaN, double.NaN, radius, unit, count, order, options, flags), ResultProcessor.GeoRadiusArray(options)); + return ExecuteSync(GetGeoRadiusMessage(key, member, double.NaN, double.NaN, radius, unit, count, order, options, flags), ResultProcessor.GeoRadiusArray(options), defaultValue: Array.Empty()); } public Task GeoRadiusAsync(RedisKey key, RedisValue member, double radius, GeoUnit unit, int count, Order? order, GeoRadiusOptions options, CommandFlags flags) @@ -232,17 +269,65 @@ public Task GeoRadiusAsync(RedisKey key, RedisValue member, d { throw new ArgumentException("Member should not be a double, you likely want the GeoRadius(RedisKey, double, double, ...) overload.", nameof(member)); } - return ExecuteAsync(GetGeoRadiusMessage(key, member, double.NaN, double.NaN, radius, unit, count, order, options, flags), ResultProcessor.GeoRadiusArray(options)); + return ExecuteAsync(GetGeoRadiusMessage(key, member, double.NaN, double.NaN, radius, unit, count, order, options, flags), ResultProcessor.GeoRadiusArray(options), defaultValue: Array.Empty()); } public GeoRadiusResult[] GeoRadius(RedisKey key, double longitude, double latitude, double radius, GeoUnit unit, int count, Order? order, GeoRadiusOptions options, CommandFlags flags) { - return ExecuteSync(GetGeoRadiusMessage(key, null, longitude, latitude, radius, unit, count, order, options, flags), ResultProcessor.GeoRadiusArray(options)); + return ExecuteSync(GetGeoRadiusMessage(key, null, longitude, latitude, radius, unit, count, order, options, flags), ResultProcessor.GeoRadiusArray(options), defaultValue: Array.Empty()); } public Task GeoRadiusAsync(RedisKey key, double longitude, double latitude, double radius, GeoUnit unit, int count, Order? order, GeoRadiusOptions options, CommandFlags flags) { - return ExecuteAsync(GetGeoRadiusMessage(key, null, longitude, latitude, radius, unit, count, order, options, flags), ResultProcessor.GeoRadiusArray(options)); + return ExecuteAsync(GetGeoRadiusMessage(key, null, longitude, latitude, radius, unit, count, order, options, flags), ResultProcessor.GeoRadiusArray(options), defaultValue: Array.Empty()); + } + + public GeoRadiusResult[] GeoSearch(RedisKey key, RedisValue member, GeoSearchShape shape, int count = -1, bool demandClosest = true, Order? order = null, GeoRadiusOptions options = GeoRadiusOptions.Default, CommandFlags flags = CommandFlags.None) + { + var msg = GetGeoSearchMessage(key, RedisKey.Null, member, double.NaN, double.NaN, shape, count, demandClosest, false, order, options, flags); + return ExecuteSync(msg, ResultProcessor.GeoRadiusArray(options), defaultValue: Array.Empty()); + } + + public GeoRadiusResult[] GeoSearch(RedisKey key, double longitude, double latitude, GeoSearchShape shape, int count = -1, bool demandClosest = true, Order? order = null, GeoRadiusOptions options = GeoRadiusOptions.Default, CommandFlags flags = CommandFlags.None) + { + var msg = GetGeoSearchMessage(key, RedisKey.Null, null, longitude, latitude, shape, count, demandClosest, false, order, options, flags); + return ExecuteSync(msg, ResultProcessor.GeoRadiusArray(options), defaultValue: Array.Empty()); + } + + public Task GeoSearchAsync(RedisKey key, RedisValue member, GeoSearchShape shape, int count = -1, bool demandClosest = true, Order? order = null, GeoRadiusOptions options = GeoRadiusOptions.Default, CommandFlags flags = CommandFlags.None) + { + var msg = GetGeoSearchMessage(key, RedisKey.Null, member, double.NaN, double.NaN, shape, count, demandClosest, false, order, options, flags); + return ExecuteAsync(msg, ResultProcessor.GeoRadiusArray(options), defaultValue: Array.Empty()); + } + + public Task GeoSearchAsync(RedisKey key, double longitude, double latitude, GeoSearchShape shape, int count = -1, bool demandClosest = true, Order? order = null, GeoRadiusOptions options = GeoRadiusOptions.Default, CommandFlags flags = CommandFlags.None) + { + var msg = GetGeoSearchMessage(key, RedisKey.Null, null, longitude, latitude, shape, count, demandClosest, false, order, options, flags); + return ExecuteAsync(msg, ResultProcessor.GeoRadiusArray(options), defaultValue: Array.Empty()); + } + + public long GeoSearchAndStore(RedisKey sourceKey, RedisKey destinationKey, RedisValue member, GeoSearchShape shape, int count = -1, bool demandClosest = true, Order? order = null, bool storeDistances = false, CommandFlags flags = CommandFlags.None) + { + var msg = GetGeoSearchMessage(sourceKey, destinationKey, member, double.NaN, double.NaN, shape, count, demandClosest, storeDistances, order, GeoRadiusOptions.None, flags); + return ExecuteSync(msg, ResultProcessor.Int64); + } + + public long GeoSearchAndStore(RedisKey sourceKey, RedisKey destinationKey, double longitude, double latitude, GeoSearchShape shape, int count = -1, bool demandClosest = true, Order? order = null, bool storeDistances = false, CommandFlags flags = CommandFlags.None) + { + var msg = GetGeoSearchMessage(sourceKey, destinationKey, null, longitude, latitude, shape, count, demandClosest, storeDistances, order, GeoRadiusOptions.None, flags); + return ExecuteSync(msg, ResultProcessor.Int64); + } + + public Task GeoSearchAndStoreAsync(RedisKey sourceKey, RedisKey destinationKey, RedisValue member, GeoSearchShape shape, int count = -1, bool demandClosest = true, Order? order = null, bool storeDistances = false, CommandFlags flags = CommandFlags.None) + { + var msg = GetGeoSearchMessage(sourceKey, destinationKey, member, double.NaN, double.NaN, shape, count, demandClosest, storeDistances, order, GeoRadiusOptions.None, flags); + return ExecuteAsync(msg, ResultProcessor.Int64); + } + + public Task GeoSearchAndStoreAsync(RedisKey sourceKey, RedisKey destinationKey, double longitude, double latitude, GeoSearchShape shape, int count = -1, bool demandClosest = true, Order? order = null, bool storeDistances = false, CommandFlags flags = CommandFlags.None) + { + var msg = GetGeoSearchMessage(sourceKey, destinationKey, null, longitude, latitude, shape, count, demandClosest, storeDistances, order, GeoRadiusOptions.None, flags); + return ExecuteAsync(msg, ResultProcessor.Int64); } public long HashDecrement(RedisKey key, RedisValue hashField, long value = 1, CommandFlags flags = CommandFlags.None) @@ -304,13 +389,383 @@ public Task HashExistsAsync(RedisKey key, RedisValue hashField, CommandFla return ExecuteAsync(msg, ResultProcessor.Boolean); } + public ExpireResult[] HashFieldExpire(RedisKey key, RedisValue[] hashFields, TimeSpan expiry, ExpireWhen when = ExpireWhen.Always, CommandFlags flags = CommandFlags.None) + { + long milliseconds = expiry.Ticks / TimeSpan.TicksPerMillisecond; + return HashFieldExpireExecute(key, milliseconds, when, PickExpireCommandByPrecision, SyncCustomArrExecutor>, ResultProcessor.ExpireResultArray, flags, hashFields); + } + + public ExpireResult[] HashFieldExpire(RedisKey key, RedisValue[] hashFields, DateTime expiry, ExpireWhen when = ExpireWhen.Always, CommandFlags flags = CommandFlags.None) + { + long milliseconds = Expiration.GetUnixTimeMilliseconds(expiry); + return HashFieldExpireExecute(key, milliseconds, when, PickExpireAtCommandByPrecision, SyncCustomArrExecutor>, ResultProcessor.ExpireResultArray, flags, hashFields); + } + + public Task HashFieldExpireAsync(RedisKey key, RedisValue[] hashFields, TimeSpan expiry, ExpireWhen when = ExpireWhen.Always, CommandFlags flags = CommandFlags.None) + { + long milliseconds = expiry.Ticks / TimeSpan.TicksPerMillisecond; + return HashFieldExpireExecute(key, milliseconds, when, PickExpireCommandByPrecision, AsyncCustomArrExecutor>, ResultProcessor.ExpireResultArray, flags, hashFields); + } + + public Task HashFieldExpireAsync(RedisKey key, RedisValue[] hashFields, DateTime expiry, ExpireWhen when = ExpireWhen.Always, CommandFlags flags = CommandFlags.None) + { + long milliseconds = Expiration.GetUnixTimeMilliseconds(expiry); + return HashFieldExpireExecute(key, milliseconds, when, PickExpireAtCommandByPrecision, AsyncCustomArrExecutor>, ResultProcessor.ExpireResultArray, flags, hashFields); + } + + private T HashFieldExpireExecute(RedisKey key, long milliseconds, ExpireWhen when, Func getCmd, CustomExecutor executor, TProcessor processor, CommandFlags flags, params RedisValue[] hashFields) + { + if (hashFields == null) throw new ArgumentNullException(nameof(hashFields)); + var useSeconds = milliseconds % 1000 == 0; + var cmd = getCmd(useSeconds); + long expiry = useSeconds ? (milliseconds / 1000) : milliseconds; + + var values = when switch + { + ExpireWhen.Always => new List { expiry, RedisLiterals.FIELDS, hashFields.Length }, + _ => new List { expiry, when.ToLiteral(), RedisLiterals.FIELDS, hashFields.Length }, + }; + values.AddRange(hashFields); + var msg = Message.Create(Database, flags, cmd, key, values.ToArray()); + return executor(msg, processor); + } + + private static RedisCommand PickExpireCommandByPrecision(bool useSeconds) => useSeconds ? RedisCommand.HEXPIRE : RedisCommand.HPEXPIRE; + + private static RedisCommand PickExpireAtCommandByPrecision(bool useSeconds) => useSeconds ? RedisCommand.HEXPIREAT : RedisCommand.HPEXPIREAT; + + private T HashFieldExecute(RedisCommand cmd, RedisKey key, CustomExecutor executor, TProcessor processor, CommandFlags flags = CommandFlags.None, params RedisValue[] hashFields) + { + var values = new List { RedisLiterals.FIELDS, hashFields.Length }; + values.AddRange(hashFields); + var msg = Message.Create(Database, flags, cmd, key, values.ToArray()); + return executor(msg, processor); + } + + private delegate T CustomExecutor(Message msg, TProcessor processor); + + private T[] SyncCustomArrExecutor(Message msg, TProcessor processor) where TProcessor : ResultProcessor => ExecuteSync(msg, processor)!; + + private Task AsyncCustomArrExecutor(Message msg, TProcessor processor) where TProcessor : ResultProcessor => ExecuteAsync(msg, processor)!; + + public RedisValue HashFieldGetAndDelete(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.HGETDEL, key, RedisLiterals.FIELDS, 1, hashField); + return ExecuteSync(msg, ResultProcessor.RedisValueFromArray); + } + + public Lease? HashFieldGetLeaseAndDelete(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.HGETDEL, key, RedisLiterals.FIELDS, 1, hashField); + return ExecuteSync(msg, ResultProcessor.LeaseFromArray); + } + + public RedisValue[] HashFieldGetAndDelete(RedisKey key, RedisValue[] hashFields, CommandFlags flags = CommandFlags.None) + { + if (hashFields == null) throw new ArgumentNullException(nameof(hashFields)); + if (hashFields.Length == 0) return Array.Empty(); + var msg = Message.Create(Database, flags, RedisCommand.HGETDEL, key, RedisLiterals.FIELDS, hashFields.Length, hashFields); + return ExecuteSync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); + } + + public Task HashFieldGetAndDeleteAsync(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.HGETDEL, key, RedisLiterals.FIELDS, 1, hashField); + return ExecuteAsync(msg, ResultProcessor.RedisValueFromArray); + } + + public Task?> HashFieldGetLeaseAndDeleteAsync(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.HGETDEL, key, RedisLiterals.FIELDS, 1, hashField); + return ExecuteAsync(msg, ResultProcessor.LeaseFromArray); + } + + public Task HashFieldGetAndDeleteAsync(RedisKey key, RedisValue[] hashFields, CommandFlags flags = CommandFlags.None) + { + if (hashFields == null) throw new ArgumentNullException(nameof(hashFields)); + if (hashFields.Length == 0) return CompletedTask.FromDefault(Array.Empty(), asyncState); + var msg = Message.Create(Database, flags, RedisCommand.HGETDEL, key, RedisLiterals.FIELDS, hashFields.Length, hashFields); + return ExecuteAsync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); + } + + private Message HashFieldGetAndSetExpiryMessage(in RedisKey key, in RedisValue hashField, Expiration expiry, CommandFlags flags) => + expiry.TokenCount switch + { + // expiry, for example EX 10 + 2 => Message.Create(Database, flags, RedisCommand.HGETEX, key, expiry.Operand, expiry.Value, RedisLiterals.FIELDS, 1, hashField), + // keyword only, for example PERSIST + 1 => Message.Create(Database, flags, RedisCommand.HGETEX, key, expiry.Operand, RedisLiterals.FIELDS, 1, hashField), + // default case when neither expiry nor persist are set + _ => Message.Create(Database, flags, RedisCommand.HGETEX, key, RedisLiterals.FIELDS, 1, hashField), + }; + + private Message HashFieldGetAndSetExpiryMessage(in RedisKey key, RedisValue[] hashFields, Expiration expiry, CommandFlags flags) + { + if (hashFields is null) throw new ArgumentNullException(nameof(hashFields)); + if (hashFields.Length == 1) + { + return HashFieldGetAndSetExpiryMessage(key, in hashFields[0], expiry, flags); + } + + // precision, time, FIELDS, hashFields.Length, {N x fields} + int extraTokens = expiry.TokenCount + 2; + RedisValue[] values = new RedisValue[extraTokens + hashFields.Length]; + + int index = 0; + // add PERSIST or expiry values + switch (expiry.TokenCount) + { + case 2: + values[index++] = expiry.Operand; + values[index++] = expiry.Value; + break; + case 1: + values[index++] = expiry.Operand; + break; + } + // add the fields + values[index++] = RedisLiterals.FIELDS; + values[index++] = hashFields.Length; + // check we've added everything we expected to + Debug.Assert(index == extraTokens, $"token mismatch: {index} vs {extraTokens}"); + + // Add hash fields to the array + hashFields.AsSpan().CopyTo(values.AsSpan(index)); + + return Message.Create(Database, flags, RedisCommand.HGETEX, key, values); + } + + public RedisValue HashFieldGetAndSetExpiry(RedisKey key, RedisValue hashField, TimeSpan? expiry = null, bool persist = false, CommandFlags flags = CommandFlags.None) + { + var msg = HashFieldGetAndSetExpiryMessage(key, hashField, Expiration.CreateOrPersist(expiry, persist), flags); + return ExecuteSync(msg, ResultProcessor.RedisValueFromArray); + } + + public RedisValue HashFieldGetAndSetExpiry(RedisKey key, RedisValue hashField, DateTime expiry, CommandFlags flags = CommandFlags.None) + { + var msg = HashFieldGetAndSetExpiryMessage(key, hashField, new(expiry), flags); + return ExecuteSync(msg, ResultProcessor.RedisValueFromArray); + } + + public Lease? HashFieldGetLeaseAndSetExpiry(RedisKey key, RedisValue hashField, TimeSpan? expiry = null, bool persist = false, CommandFlags flags = CommandFlags.None) + { + var msg = HashFieldGetAndSetExpiryMessage(key, hashField, Expiration.CreateOrPersist(expiry, persist), flags); + return ExecuteSync(msg, ResultProcessor.LeaseFromArray); + } + + public Lease? HashFieldGetLeaseAndSetExpiry(RedisKey key, RedisValue hashField, DateTime expiry, CommandFlags flags = CommandFlags.None) + { + var msg = HashFieldGetAndSetExpiryMessage(key, hashField, new(expiry), flags); + return ExecuteSync(msg, ResultProcessor.LeaseFromArray); + } + + public RedisValue[] HashFieldGetAndSetExpiry(RedisKey key, RedisValue[] hashFields, TimeSpan? expiry = null, bool persist = false, CommandFlags flags = CommandFlags.None) + { + if (hashFields == null) throw new ArgumentNullException(nameof(hashFields)); + if (hashFields.Length == 0) return Array.Empty(); + var msg = HashFieldGetAndSetExpiryMessage(key, hashFields, Expiration.CreateOrPersist(expiry, persist), flags); + return ExecuteSync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); + } + + public RedisValue[] HashFieldGetAndSetExpiry(RedisKey key, RedisValue[] hashFields, DateTime expiry, CommandFlags flags = CommandFlags.None) + { + if (hashFields == null) throw new ArgumentNullException(nameof(hashFields)); + if (hashFields.Length == 0) return Array.Empty(); + var msg = HashFieldGetAndSetExpiryMessage(key, hashFields, new(expiry), flags); + return ExecuteSync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); + } + + public Task HashFieldGetAndSetExpiryAsync(RedisKey key, RedisValue hashField, TimeSpan? expiry = null, bool persist = false, CommandFlags flags = CommandFlags.None) + { + var msg = HashFieldGetAndSetExpiryMessage(key, hashField, Expiration.CreateOrPersist(expiry, persist), flags); + return ExecuteAsync(msg, ResultProcessor.RedisValueFromArray); + } + + public Task HashFieldGetAndSetExpiryAsync(RedisKey key, RedisValue hashField, DateTime expiry, CommandFlags flags = CommandFlags.None) + { + var msg = HashFieldGetAndSetExpiryMessage(key, hashField, new(expiry), flags); + return ExecuteAsync(msg, ResultProcessor.RedisValueFromArray); + } + + public Task?> HashFieldGetLeaseAndSetExpiryAsync(RedisKey key, RedisValue hashField, TimeSpan? expiry = null, bool persist = false, CommandFlags flags = CommandFlags.None) + { + var msg = HashFieldGetAndSetExpiryMessage(key, hashField, Expiration.CreateOrPersist(expiry, persist), flags); + return ExecuteAsync(msg, ResultProcessor.LeaseFromArray); + } + + public Task?> HashFieldGetLeaseAndSetExpiryAsync(RedisKey key, RedisValue hashField, DateTime expiry, CommandFlags flags = CommandFlags.None) + { + var msg = HashFieldGetAndSetExpiryMessage(key, hashField, new(expiry), flags); + return ExecuteAsync(msg, ResultProcessor.LeaseFromArray); + } + + public Task HashFieldGetAndSetExpiryAsync(RedisKey key, RedisValue[] hashFields, TimeSpan? expiry = null, bool persist = false, CommandFlags flags = CommandFlags.None) + { + if (hashFields == null) throw new ArgumentNullException(nameof(hashFields)); + if (hashFields.Length == 0) return CompletedTask.FromDefault(Array.Empty(), asyncState); + var msg = HashFieldGetAndSetExpiryMessage(key, hashFields, Expiration.CreateOrPersist(expiry, persist), flags); + return ExecuteAsync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); + } + + public Task HashFieldGetAndSetExpiryAsync(RedisKey key, RedisValue[] hashFields, DateTime expiry, CommandFlags flags = CommandFlags.None) + { + if (hashFields == null) throw new ArgumentNullException(nameof(hashFields)); + if (hashFields.Length == 0) return CompletedTask.FromDefault(Array.Empty(), asyncState); + var msg = HashFieldGetAndSetExpiryMessage(key, hashFields, new(expiry), flags); + return ExecuteAsync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); + } + + private Message HashFieldSetAndSetExpiryMessage(in RedisKey key, in RedisValue field, in RedisValue value, Expiration expiry, When when, CommandFlags flags) + { + if (when == When.Always) + { + return expiry.TokenCount switch + { + 2 => Message.Create(Database, flags, RedisCommand.HSETEX, key, expiry.Operand, expiry.Value, RedisLiterals.FIELDS, 1, field, value), + 1 => Message.Create(Database, flags, RedisCommand.HSETEX, key, expiry.Operand, RedisLiterals.FIELDS, 1, field, value), + _ => Message.Create(Database, flags, RedisCommand.HSETEX, key, RedisLiterals.FIELDS, 1, field, value), + }; + } + else + { + // we need an extra token + var existance = when switch + { + When.Exists => RedisLiterals.FXX, + When.NotExists => RedisLiterals.FNX, + _ => throw new ArgumentOutOfRangeException(nameof(when)), + }; + + return expiry.TokenCount switch + { + 2 => Message.Create(Database, flags, RedisCommand.HSETEX, key, existance, expiry.Operand, expiry.Value, RedisLiterals.FIELDS, 1, field, value), + 1 => Message.Create(Database, flags, RedisCommand.HSETEX, key, existance, expiry.Operand, RedisLiterals.FIELDS, 1, field, value), + _ => Message.Create(Database, flags, RedisCommand.HSETEX, key, existance, RedisLiterals.FIELDS, 1, field, value), + }; + } + } + + private Message HashFieldSetAndSetExpiryMessage(in RedisKey key, HashEntry[] hashFields, Expiration expiry, When when, CommandFlags flags) + { + if (hashFields.Length == 1) + { + var field = hashFields[0]; + return HashFieldSetAndSetExpiryMessage(key, field.Name, field.Value, expiry, when, flags); + } + // Determine the base array size + var extraTokens = expiry.TokenCount + (when == When.Always ? 2 : 3); // [FXX|FNX] {expiry} FIELDS {length} + RedisValue[] values = new RedisValue[(hashFields.Length * 2) + extraTokens]; + + int index = 0; + switch (when) + { + case When.Always: + break; + case When.Exists: + values[index++] = RedisLiterals.FXX; + break; + case When.NotExists: + values[index++] = RedisLiterals.FNX; + break; + default: + throw new ArgumentOutOfRangeException(nameof(when)); + } + switch (expiry.TokenCount) + { + case 2: + values[index++] = expiry.Operand; + values[index++] = expiry.Value; + break; + case 1: + values[index++] = expiry.Operand; + break; + } + values[index++] = RedisLiterals.FIELDS; + values[index++] = hashFields.Length; + for (int i = 0; i < hashFields.Length; i++) + { + values[index++] = hashFields[i].name; + values[index++] = hashFields[i].value; + } + Debug.Assert(index == values.Length); + return Message.Create(Database, flags, RedisCommand.HSETEX, key, values); + } + + public RedisValue HashFieldSetAndSetExpiry(RedisKey key, RedisValue field, RedisValue value, TimeSpan? expiry = null, bool keepTtl = false, When when = When.Always, CommandFlags flags = CommandFlags.None) + { + var msg = HashFieldSetAndSetExpiryMessage(key, field, value, Expiration.CreateOrKeepTtl(expiry, keepTtl), when, flags); + return ExecuteSync(msg, ResultProcessor.RedisValue); + } + + public RedisValue HashFieldSetAndSetExpiry(RedisKey key, RedisValue field, RedisValue value, DateTime expiry, When when = When.Always, CommandFlags flags = CommandFlags.None) + { + var msg = HashFieldSetAndSetExpiryMessage(key, field, value, new(expiry), when, flags); + return ExecuteSync(msg, ResultProcessor.RedisValue); + } + + public RedisValue HashFieldSetAndSetExpiry(RedisKey key, HashEntry[] hashFields, TimeSpan? expiry = null, bool keepTtl = false, When when = When.Always, CommandFlags flags = CommandFlags.None) + { + if (hashFields == null) throw new ArgumentNullException(nameof(hashFields)); + var msg = HashFieldSetAndSetExpiryMessage(key, hashFields, Expiration.CreateOrKeepTtl(expiry, keepTtl), when, flags); + return ExecuteSync(msg, ResultProcessor.RedisValue); + } + public RedisValue HashFieldSetAndSetExpiry(RedisKey key, HashEntry[] hashFields, DateTime expiry, When when = When.Always, CommandFlags flags = CommandFlags.None) + { + if (hashFields == null) throw new ArgumentNullException(nameof(hashFields)); + var msg = HashFieldSetAndSetExpiryMessage(key, hashFields, new(expiry), when, flags); + return ExecuteSync(msg, ResultProcessor.RedisValue); + } + + public Task HashFieldSetAndSetExpiryAsync(RedisKey key, RedisValue field, RedisValue value, TimeSpan? expiry = null, bool keepTtl = false, When when = When.Always, CommandFlags flags = CommandFlags.None) + { + var msg = HashFieldSetAndSetExpiryMessage(key, field, value, Expiration.CreateOrKeepTtl(expiry, keepTtl), when, flags); + return ExecuteAsync(msg, ResultProcessor.RedisValue); + } + + public Task HashFieldSetAndSetExpiryAsync(RedisKey key, RedisValue field, RedisValue value, DateTime expiry, When when = When.Always, CommandFlags flags = CommandFlags.None) + { + var msg = HashFieldSetAndSetExpiryMessage(key, field, value, new(expiry), when, flags); + return ExecuteAsync(msg, ResultProcessor.RedisValue); + } + + public Task HashFieldSetAndSetExpiryAsync(RedisKey key, HashEntry[] hashFields, TimeSpan? expiry = null, bool keepTtl = false, When when = When.Always, CommandFlags flags = CommandFlags.None) + { + if (hashFields == null) throw new ArgumentNullException(nameof(hashFields)); + var msg = HashFieldSetAndSetExpiryMessage(key, hashFields, Expiration.CreateOrKeepTtl(expiry, keepTtl), when, flags); + return ExecuteAsync(msg, ResultProcessor.RedisValue); + } + public Task HashFieldSetAndSetExpiryAsync(RedisKey key, HashEntry[] hashFields, DateTime expiry, When when = When.Always, CommandFlags flags = CommandFlags.None) + { + if (hashFields == null) throw new ArgumentNullException(nameof(hashFields)); + var msg = HashFieldSetAndSetExpiryMessage(key, hashFields, new(expiry), when, flags); + return ExecuteAsync(msg, ResultProcessor.RedisValue); + } + + public long[] HashFieldGetExpireDateTime(RedisKey key, RedisValue[] hashFields, CommandFlags flags = CommandFlags.None) => + HashFieldExecute(RedisCommand.HPEXPIRETIME, key, SyncCustomArrExecutor>, ResultProcessor.Int64Array, flags, hashFields); + + public Task HashFieldGetExpireDateTimeAsync(RedisKey key, RedisValue[] hashFields, CommandFlags flags = CommandFlags.None) => + HashFieldExecute(RedisCommand.HPEXPIRETIME, key, AsyncCustomArrExecutor>, ResultProcessor.Int64Array, flags, hashFields); + + public PersistResult[] HashFieldPersist(RedisKey key, RedisValue[] hashFields, CommandFlags flags = CommandFlags.None) => + HashFieldExecute(RedisCommand.HPERSIST, key, SyncCustomArrExecutor>, ResultProcessor.PersistResultArray, flags, hashFields); + + public Task HashFieldPersistAsync(RedisKey key, RedisValue[] hashFields, CommandFlags flags = CommandFlags.None) => + HashFieldExecute(RedisCommand.HPERSIST, key, AsyncCustomArrExecutor>, ResultProcessor.PersistResultArray, flags, hashFields); + + public long[] HashFieldGetTimeToLive(RedisKey key, RedisValue[] hashFields, CommandFlags flags = CommandFlags.None) => + HashFieldExecute(RedisCommand.HPTTL, key, SyncCustomArrExecutor>, ResultProcessor.Int64Array, flags, hashFields); + + public Task HashFieldGetTimeToLiveAsync(RedisKey key, RedisValue[] hashFields, CommandFlags flags = CommandFlags.None) => + HashFieldExecute(RedisCommand.HPTTL, key, AsyncCustomArrExecutor>, ResultProcessor.Int64Array, flags, hashFields); + public RedisValue HashGet(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, RedisCommand.HGET, key, hashField); return ExecuteSync(msg, ResultProcessor.RedisValue); } - public Lease HashGetLease(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None) + public Lease? HashGetLease(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, RedisCommand.HGET, key, hashField); return ExecuteSync(msg, ResultProcessor.Lease); @@ -321,19 +776,19 @@ public RedisValue[] HashGet(RedisKey key, RedisValue[] hashFields, CommandFlags if (hashFields == null) throw new ArgumentNullException(nameof(hashFields)); if (hashFields.Length == 0) return Array.Empty(); var msg = Message.Create(Database, flags, RedisCommand.HMGET, key, hashFields); - return ExecuteSync(msg, ResultProcessor.RedisValueArray); + return ExecuteSync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); } public HashEntry[] HashGetAll(RedisKey key, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, RedisCommand.HGETALL, key); - return ExecuteSync(msg, ResultProcessor.HashEntryArray); + return ExecuteSync(msg, ResultProcessor.HashEntryArray, defaultValue: Array.Empty()); } public Task HashGetAllAsync(RedisKey key, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, RedisCommand.HGETALL, key); - return ExecuteAsync(msg, ResultProcessor.HashEntryArray); + return ExecuteAsync(msg, ResultProcessor.HashEntryArray, defaultValue: Array.Empty()); } public Task HashGetAsync(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None) @@ -342,7 +797,7 @@ public Task HashGetAsync(RedisKey key, RedisValue hashField, Command return ExecuteAsync(msg, ResultProcessor.RedisValue); } - public Task> HashGetLeaseAsync(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None) + public Task?> HashGetLeaseAsync(RedisKey key, RedisValue hashField, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, RedisCommand.HGET, key, hashField); return ExecuteAsync(msg, ResultProcessor.Lease); @@ -351,9 +806,9 @@ public Task> HashGetLeaseAsync(RedisKey key, RedisValue hashField, C public Task HashGetAsync(RedisKey key, RedisValue[] hashFields, CommandFlags flags = CommandFlags.None) { if (hashFields == null) throw new ArgumentNullException(nameof(hashFields)); - if (hashFields.Length == 0) return CompletedTask.FromResult(new RedisValue[0], asyncState); + if (hashFields.Length == 0) return CompletedTask.FromDefault(Array.Empty(), asyncState); var msg = Message.Create(Database, flags, RedisCommand.HMGET, key, hashFields); - return ExecuteAsync(msg, ResultProcessor.RedisValueArray); + return ExecuteAsync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); } public long HashIncrement(RedisKey key, RedisValue hashField, long value = 1, CommandFlags flags = CommandFlags.None) @@ -387,13 +842,13 @@ public Task HashIncrementAsync(RedisKey key, RedisValue hashField, doubl public RedisValue[] HashKeys(RedisKey key, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, RedisCommand.HKEYS, key); - return ExecuteSync(msg, ResultProcessor.RedisValueArray); + return ExecuteSync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); } public Task HashKeysAsync(RedisKey key, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, RedisCommand.HKEYS, key); - return ExecuteAsync(msg, ResultProcessor.RedisValueArray); + return ExecuteAsync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); } public long HashLength(RedisKey key, CommandFlags flags = CommandFlags.None) @@ -402,12 +857,48 @@ public long HashLength(RedisKey key, CommandFlags flags = CommandFlags.None) return ExecuteSync(msg, ResultProcessor.Int64); } + public RedisValue HashRandomField(RedisKey key, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.HRANDFIELD, key); + return ExecuteSync(msg, ResultProcessor.RedisValue); + } + + public RedisValue[] HashRandomFields(RedisKey key, long count, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.HRANDFIELD, key, count); + return ExecuteSync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); + } + + public HashEntry[] HashRandomFieldsWithValues(RedisKey key, long count, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.HRANDFIELD, key, count, RedisLiterals.WITHVALUES); + return ExecuteSync(msg, ResultProcessor.HashEntryArray, defaultValue: Array.Empty()); + } + public Task HashLengthAsync(RedisKey key, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, RedisCommand.HLEN, key); return ExecuteAsync(msg, ResultProcessor.Int64); } + public Task HashRandomFieldAsync(RedisKey key, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.HRANDFIELD, key); + return ExecuteAsync(msg, ResultProcessor.RedisValue); + } + + public Task HashRandomFieldsAsync(RedisKey key, long count, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.HRANDFIELD, key, count); + return ExecuteAsync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); + } + + public Task HashRandomFieldsWithValuesAsync(RedisKey key, long count, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.HRANDFIELD, key, count, RedisLiterals.WITHVALUES); + return ExecuteAsync(msg, ResultProcessor.HashEntryArray, defaultValue: Array.Empty()); + } + IEnumerable IDatabase.HashScan(RedisKey key, RedisValue pattern, int pageSize, CommandFlags flags) => HashScanAsync(key, pattern, pageSize, CursorUtils.Origin, 0, flags); @@ -428,6 +919,23 @@ private CursorEnumerable HashScanAsync(RedisKey key, RedisValue patte throw ExceptionFactory.NotSupported(true, RedisCommand.HSCAN); } + IEnumerable IDatabase.HashScanNoValues(RedisKey key, RedisValue pattern, int pageSize, long cursor, int pageOffset, CommandFlags flags) + => HashScanNoValuesAsync(key, pattern, pageSize, cursor, pageOffset, flags); + + IAsyncEnumerable IDatabaseAsync.HashScanNoValuesAsync(RedisKey key, RedisValue pattern, int pageSize, long cursor, int pageOffset, CommandFlags flags) + => HashScanNoValuesAsync(key, pattern, pageSize, cursor, pageOffset, flags); + + private CursorEnumerable HashScanNoValuesAsync(RedisKey key, RedisValue pattern, int pageSize, long cursor, int pageOffset, CommandFlags flags) + { + var scan = TryScan(key, pattern, pageSize, cursor, pageOffset, flags, RedisCommand.HSCAN, SetScanResultProcessor.Default, out var server, true); + if (scan != null) return scan; + + if (cursor != 0) throw ExceptionFactory.NoCursor(RedisCommand.HKEYS); + + if (pattern.IsNull) return CursorEnumerable.From(this, server, HashKeysAsync(key, flags), pageOffset); + throw ExceptionFactory.NotSupported(true, RedisCommand.HSCAN); + } + public bool HashSet(RedisKey key, RedisValue hashField, RedisValue value, When when = When.Always, CommandFlags flags = CommandFlags.None) { WhenAlwaysOrNotExists(when); @@ -450,7 +958,6 @@ public long HashStringLength(RedisKey key, RedisValue hashField, CommandFlags fl return ExecuteSync(msg, ResultProcessor.Int64); } - public Task HashSetAsync(RedisKey key, RedisValue hashField, RedisValue value, When when = When.Always, CommandFlags flags = CommandFlags.None) { WhenAlwaysOrNotExists(when); @@ -481,13 +988,13 @@ public Task HashSetIfNotExistsAsync(RedisKey key, RedisValue hashField, Re public RedisValue[] HashValues(RedisKey key, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, RedisCommand.HVALS, key); - return ExecuteSync(msg, ResultProcessor.RedisValueArray); + return ExecuteSync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); } public Task HashValuesAsync(RedisKey key, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, RedisCommand.HVALS, key); - return ExecuteAsync(msg, ResultProcessor.RedisValueArray); + return ExecuteAsync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); } public bool HyperLogLogAdd(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None) @@ -516,46 +1023,46 @@ public Task HyperLogLogAddAsync(RedisKey key, RedisValue[] values, Command public long HyperLogLogLength(RedisKey key, CommandFlags flags = CommandFlags.None) { - var features = GetFeatures(key, flags, out ServerEndPoint server); + var features = GetFeatures(key, flags, RedisCommand.PFCOUNT, out ServerEndPoint? server); var cmd = Message.Create(Database, flags, RedisCommand.PFCOUNT, key); - // technically a write / master-only command until 2.8.18 - if (server != null && !features.HyperLogLogCountReplicaSafe) cmd.SetMasterOnly(); + // technically a write / primary-only command until 2.8.18 + if (server != null && !features.HyperLogLogCountReplicaSafe) cmd.SetPrimaryOnly(); return ExecuteSync(cmd, ResultProcessor.Int64, server); } public long HyperLogLogLength(RedisKey[] keys, CommandFlags flags = CommandFlags.None) { if (keys == null) throw new ArgumentNullException(nameof(keys)); - ServerEndPoint server = null; + ServerEndPoint? server = null; var cmd = Message.Create(Database, flags, RedisCommand.PFCOUNT, keys); if (keys.Length != 0) { - var features = GetFeatures(keys[0], flags, out server); - // technically a write / master-only command until 2.8.18 - if (server != null && !features.HyperLogLogCountReplicaSafe) cmd.SetMasterOnly(); + var features = GetFeatures(keys[0], flags, RedisCommand.PFCOUNT, out server); + // technically a write / primary-only command until 2.8.18 + if (server != null && !features.HyperLogLogCountReplicaSafe) cmd.SetPrimaryOnly(); } return ExecuteSync(cmd, ResultProcessor.Int64, server); } public Task HyperLogLogLengthAsync(RedisKey key, CommandFlags flags = CommandFlags.None) { - var features = GetFeatures(key, flags, out ServerEndPoint server); + var features = GetFeatures(key, flags, RedisCommand.PFCOUNT, out ServerEndPoint? server); var cmd = Message.Create(Database, flags, RedisCommand.PFCOUNT, key); - // technically a write / master-only command until 2.8.18 - if (server != null && !features.HyperLogLogCountReplicaSafe) cmd.SetMasterOnly(); + // technically a write / primary-only command until 2.8.18 + if (server != null && !features.HyperLogLogCountReplicaSafe) cmd.SetPrimaryOnly(); return ExecuteAsync(cmd, ResultProcessor.Int64, server); } public Task HyperLogLogLengthAsync(RedisKey[] keys, CommandFlags flags = CommandFlags.None) { if (keys == null) throw new ArgumentNullException(nameof(keys)); - ServerEndPoint server = null; + ServerEndPoint? server = null; var cmd = Message.Create(Database, flags, RedisCommand.PFCOUNT, keys); if (keys.Length != 0) { - var features = GetFeatures(keys[0], flags, out server); - // technically a write / master-only command until 2.8.18 - if (server != null && !features.HyperLogLogCountReplicaSafe) cmd.SetMasterOnly(); + var features = GetFeatures(keys[0], flags, RedisCommand.PFCOUNT, out server); + // technically a write / primary-only command until 2.8.18 + if (server != null && !features.HyperLogLogCountReplicaSafe) cmd.SetPrimaryOnly(); } return ExecuteAsync(cmd, ResultProcessor.Int64, server); } @@ -584,13 +1091,13 @@ public Task HyperLogLogMergeAsync(RedisKey destination, RedisKey[] sourceKeys, C return ExecuteAsync(cmd, ResultProcessor.DemandOK); } - public EndPoint IdentifyEndpoint(RedisKey key = default(RedisKey), CommandFlags flags = CommandFlags.None) + public EndPoint? IdentifyEndpoint(RedisKey key = default, CommandFlags flags = CommandFlags.None) { var msg = key.IsNull ? Message.Create(-1, flags, RedisCommand.PING) : Message.Create(Database, flags, RedisCommand.EXISTS, key); return ExecuteSync(msg, ResultProcessor.ConnectionIdentity); } - public Task IdentifyEndpointAsync(RedisKey key = default(RedisKey), CommandFlags flags = CommandFlags.None) + public Task IdentifyEndpointAsync(RedisKey key = default, CommandFlags flags = CommandFlags.None) { var msg = key.IsNull ? Message.Create(-1, flags, RedisCommand.PING) : Message.Create(Database, flags, RedisCommand.EXISTS, key); return ExecuteAsync(msg, ResultProcessor.ConnectionIdentity); @@ -602,6 +1109,18 @@ public bool IsConnected(RedisKey key, CommandFlags flags = CommandFlags.None) return server?.IsConnected == true; } + public bool KeyCopy(RedisKey sourceKey, RedisKey destinationKey, int destinationDatabase = -1, bool replace = false, CommandFlags flags = CommandFlags.None) + { + var msg = GetCopyMessage(sourceKey, destinationKey, destinationDatabase, replace, flags); + return ExecuteSync(msg, ResultProcessor.Boolean); + } + + public Task KeyCopyAsync(RedisKey sourceKey, RedisKey destinationKey, int destinationDatabase = -1, bool replace = false, CommandFlags flags = CommandFlags.None) + { + var msg = GetCopyMessage(sourceKey, destinationKey, destinationDatabase, replace, flags); + return ExecuteAsync(msg, ResultProcessor.Boolean); + } + public bool KeyDelete(RedisKey key, CommandFlags flags = CommandFlags.None) { var cmd = GetDeleteCommand(key, flags, out var server); @@ -640,9 +1159,9 @@ public Task KeyDeleteAsync(RedisKey[] keys, CommandFlags flags = CommandFl return CompletedTask.Default(0); } - private RedisCommand GetDeleteCommand(RedisKey key, CommandFlags flags, out ServerEndPoint server) + private RedisCommand GetDeleteCommand(RedisKey key, CommandFlags flags, out ServerEndPoint? server) { - var features = GetFeatures(key, flags, out server); + var features = GetFeatures(key, flags, RedisCommand.UNLINK, out server); if (server != null && features.Unlink && multiplexer.CommandMap.IsAvailable(RedisCommand.UNLINK)) { return RedisCommand.UNLINK; @@ -650,18 +1169,30 @@ private RedisCommand GetDeleteCommand(RedisKey key, CommandFlags flags, out Serv return RedisCommand.DEL; } - public byte[] KeyDump(RedisKey key, CommandFlags flags = CommandFlags.None) + public byte[]? KeyDump(RedisKey key, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, RedisCommand.DUMP, key); return ExecuteSync(msg, ResultProcessor.ByteArray); } - public Task KeyDumpAsync(RedisKey key, CommandFlags flags = CommandFlags.None) + public Task KeyDumpAsync(RedisKey key, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, RedisCommand.DUMP, key); return ExecuteAsync(msg, ResultProcessor.ByteArray); } + public string? KeyEncoding(RedisKey key, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.OBJECT, RedisLiterals.ENCODING, key); + return ExecuteSync(msg, ResultProcessor.String); + } + + public Task KeyEncodingAsync(RedisKey key, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.OBJECT, RedisLiterals.ENCODING, key); + return ExecuteAsync(msg, ResultProcessor.String); + } + public bool KeyExists(RedisKey key, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, RedisCommand.EXISTS, key); @@ -686,35 +1217,72 @@ public Task KeyExistsAsync(RedisKey[] keys, CommandFlags flags = CommandFl return ExecuteAsync(msg, ResultProcessor.Int64); } - public bool KeyExpire(RedisKey key, TimeSpan? expiry, CommandFlags flags = CommandFlags.None) + public bool KeyExpire(RedisKey key, TimeSpan? expiry, CommandFlags flags = CommandFlags.None) => + KeyExpire(key, expiry, ExpireWhen.Always, flags); + + public bool KeyExpire(RedisKey key, DateTime? expiry, CommandFlags flags = CommandFlags.None) => + KeyExpire(key, expiry, ExpireWhen.Always, flags); + + public bool KeyExpire(RedisKey key, TimeSpan? expiry, ExpireWhen when, CommandFlags flags = CommandFlags.None) { - var msg = GetExpiryMessage(key, flags, expiry, out ServerEndPoint server); + var msg = GetExpiryMessage(key, flags, expiry, when, out ServerEndPoint? server); return ExecuteSync(msg, ResultProcessor.Boolean, server: server); } - public bool KeyExpire(RedisKey key, DateTime? expiry, CommandFlags flags = CommandFlags.None) + public bool KeyExpire(RedisKey key, DateTime? expiry, ExpireWhen when, CommandFlags flags = CommandFlags.None) { - var msg = GetExpiryMessage(key, flags, expiry, out ServerEndPoint server); + var msg = GetExpiryMessage(key, flags, expiry, when, out ServerEndPoint? server); return ExecuteSync(msg, ResultProcessor.Boolean, server: server); } - public Task KeyExpireAsync(RedisKey key, TimeSpan? expiry, CommandFlags flags = CommandFlags.None) + public Task KeyExpireAsync(RedisKey key, TimeSpan? expiry, CommandFlags flags = CommandFlags.None) => + KeyExpireAsync(key, expiry, ExpireWhen.Always, flags); + + public Task KeyExpireAsync(RedisKey key, DateTime? expiry, CommandFlags flags = CommandFlags.None) => + KeyExpireAsync(key, expiry, ExpireWhen.Always, flags); + + public Task KeyExpireAsync(RedisKey key, TimeSpan? expiry, ExpireWhen when, CommandFlags flags = CommandFlags.None) { - var msg = GetExpiryMessage(key, flags, expiry, out ServerEndPoint server); + var msg = GetExpiryMessage(key, flags, expiry, when, out ServerEndPoint? server); return ExecuteAsync(msg, ResultProcessor.Boolean, server: server); } - public Task KeyExpireAsync(RedisKey key, DateTime? expiry, CommandFlags flags = CommandFlags.None) + public Task KeyExpireAsync(RedisKey key, DateTime? expire, ExpireWhen when, CommandFlags flags = CommandFlags.None) { - var msg = GetExpiryMessage(key, flags, expiry, out ServerEndPoint server); + var msg = GetExpiryMessage(key, flags, expire, when, out ServerEndPoint? server); return ExecuteAsync(msg, ResultProcessor.Boolean, server: server); } + public DateTime? KeyExpireTime(RedisKey key, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.PEXPIRETIME, key); + return ExecuteSync(msg, ResultProcessor.NullableDateTimeFromMilliseconds); + } + + public Task KeyExpireTimeAsync(RedisKey key, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.PEXPIRETIME, key); + return ExecuteAsync(msg, ResultProcessor.NullableDateTimeFromMilliseconds); + } + + public long? KeyFrequency(RedisKey key, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.OBJECT, RedisLiterals.FREQ, key); + return ExecuteSync(msg, ResultProcessor.NullableInt64); + } + + public Task KeyFrequencyAsync(RedisKey key, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.OBJECT, RedisLiterals.FREQ, key); + return ExecuteAsync(msg, ResultProcessor.NullableInt64); + } + public TimeSpan? KeyIdleTime(RedisKey key, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, RedisCommand.OBJECT, RedisLiterals.IDLETIME, key); return ExecuteSync(msg, ResultProcessor.TimeSpanFromSeconds); } + public Task KeyIdleTimeAsync(RedisKey key, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, RedisCommand.OBJECT, RedisLiterals.IDLETIME, key); @@ -746,7 +1314,7 @@ public KeyMigrateCommandMessage(int db, RedisKey key, EndPoint toServer, int toD : base(db, flags, RedisCommand.MIGRATE, key) { if (toServer == null) throw new ArgumentNullException(nameof(toServer)); - if (!Format.TryGetHostPort(toServer, out string toHost, out int toPort)) throw new ArgumentException("toServer"); + if (!Format.TryGetHostPort(toServer, out string? toHost, out int? toPort)) throw new ArgumentException($"Couldn't get host and port from {toServer}", nameof(toServer)); this.toHost = toHost; this.toPort = toPort; if (toDatabase < 0) throw new ArgumentOutOfRangeException(nameof(toDatabase)); @@ -765,8 +1333,8 @@ protected override void WriteImpl(PhysicalConnection physical) physical.Write(Key); physical.WriteBulkString(toDatabase); physical.WriteBulkString(timeoutMilliseconds); - if (isCopy) physical.WriteBulkString(RedisLiterals.COPY); - if (isReplace) physical.WriteBulkString(RedisLiterals.REPLACE); + if (isCopy) physical.WriteBulkString("COPY"u8); + if (isReplace) physical.WriteBulkString("REPLACE"u8); } public override int ArgCount @@ -816,6 +1384,18 @@ public Task KeyRandomAsync(CommandFlags flags = CommandFlags.None) return ExecuteAsync(msg, ResultProcessor.RedisKey); } + public long? KeyRefCount(RedisKey key, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.OBJECT, RedisLiterals.REFCOUNT, key); + return ExecuteSync(msg, ResultProcessor.NullableInt64); + } + + public Task KeyRefCountAsync(RedisKey key, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.OBJECT, RedisLiterals.REFCOUNT, key); + return ExecuteAsync(msg, ResultProcessor.NullableInt64); + } + public bool KeyRename(RedisKey key, RedisKey newKey, When when = When.Always, CommandFlags flags = CommandFlags.None) { WhenAlwaysOrNotExists(when); @@ -844,7 +1424,7 @@ public Task KeyRestoreAsync(RedisKey key, byte[] value, TimeSpan? expiry = null, public TimeSpan? KeyTimeToLive(RedisKey key, CommandFlags flags = CommandFlags.None) { - var features = GetFeatures(key, flags, out ServerEndPoint server); + var features = GetFeatures(key, flags, RedisCommand.TTL, out ServerEndPoint? server); Message msg; if (server != null && features.MillisecondExpiry && multiplexer.CommandMap.IsAvailable(RedisCommand.PTTL)) { @@ -857,7 +1437,7 @@ public Task KeyRestoreAsync(RedisKey key, byte[] value, TimeSpan? expiry = null, public Task KeyTimeToLiveAsync(RedisKey key, CommandFlags flags = CommandFlags.None) { - var features = GetFeatures(key, flags, out ServerEndPoint server); + var features = GetFeatures(key, flags, RedisCommand.TTL, out ServerEndPoint? server); Message msg; if (server != null && features.MillisecondExpiry && multiplexer.CommandMap.IsAvailable(RedisCommand.PTTL)) { @@ -922,12 +1502,60 @@ public RedisValue ListLeftPop(RedisKey key, CommandFlags flags = CommandFlags.No return ExecuteSync(msg, ResultProcessor.RedisValue); } + public RedisValue[] ListLeftPop(RedisKey key, long count, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.LPOP, key, count); + return ExecuteSync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); + } + + public ListPopResult ListLeftPop(RedisKey[] keys, long count, CommandFlags flags = CommandFlags.None) + { + var msg = GetListMultiPopMessage(keys, RedisLiterals.LEFT, count, flags); + return ExecuteSync(msg, ResultProcessor.ListPopResult, defaultValue: ListPopResult.Null); + } + + public long ListPosition(RedisKey key, RedisValue element, long rank = 1, long maxLength = 0, CommandFlags flags = CommandFlags.None) + { + var msg = CreateListPositionMessage(Database, flags, key, element, rank, maxLength); + return ExecuteSync(msg, ResultProcessor.Int64DefaultNegativeOne, defaultValue: -1); + } + + public long[] ListPositions(RedisKey key, RedisValue element, long count, long rank = 1, long maxLength = 0, CommandFlags flags = CommandFlags.None) + { + var msg = CreateListPositionMessage(Database, flags, key, element, rank, maxLength, count); + return ExecuteSync(msg, ResultProcessor.Int64Array, defaultValue: Array.Empty()); + } + public Task ListLeftPopAsync(RedisKey key, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, RedisCommand.LPOP, key); return ExecuteAsync(msg, ResultProcessor.RedisValue); } + public Task ListLeftPopAsync(RedisKey key, long count, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.LPOP, key, count); + return ExecuteAsync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); + } + + public Task ListLeftPopAsync(RedisKey[] keys, long count, CommandFlags flags = CommandFlags.None) + { + var msg = GetListMultiPopMessage(keys, RedisLiterals.LEFT, count, flags); + return ExecuteAsync(msg, ResultProcessor.ListPopResult, defaultValue: ListPopResult.Null); + } + + public Task ListPositionAsync(RedisKey key, RedisValue element, long rank = 1, long maxLength = 0, CommandFlags flags = CommandFlags.None) + { + var msg = CreateListPositionMessage(Database, flags, key, element, rank, maxLength); + return ExecuteAsync(msg, ResultProcessor.Int64DefaultNegativeOne, defaultValue: -1); + } + + public Task ListPositionsAsync(RedisKey key, RedisValue element, long count, long rank = 1, long maxLength = 0, CommandFlags flags = CommandFlags.None) + { + var msg = CreateListPositionMessage(Database, flags, key, element, rank, maxLength, count); + return ExecuteAsync(msg, ResultProcessor.Int64Array, defaultValue: Array.Empty()); + } + public long ListLeftPush(RedisKey key, RedisValue value, When when = When.Always, CommandFlags flags = CommandFlags.None) { WhenAlwaysOrExists(when); @@ -986,16 +1614,28 @@ public Task ListLengthAsync(RedisKey key, CommandFlags flags = CommandFlag return ExecuteAsync(msg, ResultProcessor.Int64); } + public RedisValue ListMove(RedisKey sourceKey, RedisKey destinationKey, ListSide sourceSide, ListSide destinationSide, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.LMOVE, sourceKey, destinationKey, sourceSide.ToLiteral(), destinationSide.ToLiteral()); + return ExecuteSync(msg, ResultProcessor.RedisValue); + } + + public Task ListMoveAsync(RedisKey sourceKey, RedisKey destinationKey, ListSide sourceSide, ListSide destinationSide, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.LMOVE, sourceKey, destinationKey, sourceSide.ToLiteral(), destinationSide.ToLiteral()); + return ExecuteAsync(msg, ResultProcessor.RedisValue); + } + public RedisValue[] ListRange(RedisKey key, long start = 0, long stop = -1, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, RedisCommand.LRANGE, key, start, stop); - return ExecuteSync(msg, ResultProcessor.RedisValueArray); + return ExecuteSync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); } public Task ListRangeAsync(RedisKey key, long start = 0, long stop = -1, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, RedisCommand.LRANGE, key, start, stop); - return ExecuteAsync(msg, ResultProcessor.RedisValueArray); + return ExecuteAsync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); } public long ListRemove(RedisKey key, RedisValue value, long count = 0, CommandFlags flags = CommandFlags.None) @@ -1016,12 +1656,36 @@ public RedisValue ListRightPop(RedisKey key, CommandFlags flags = CommandFlags.N return ExecuteSync(msg, ResultProcessor.RedisValue); } + public RedisValue[] ListRightPop(RedisKey key, long count, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.RPOP, key, count); + return ExecuteSync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); + } + + public ListPopResult ListRightPop(RedisKey[] keys, long count, CommandFlags flags = CommandFlags.None) + { + var msg = GetListMultiPopMessage(keys, RedisLiterals.RIGHT, count, flags); + return ExecuteSync(msg, ResultProcessor.ListPopResult, defaultValue: ListPopResult.Null); + } + public Task ListRightPopAsync(RedisKey key, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, RedisCommand.RPOP, key); return ExecuteAsync(msg, ResultProcessor.RedisValue); } + public Task ListRightPopAsync(RedisKey key, long count, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.RPOP, key, count); + return ExecuteAsync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); + } + + public Task ListRightPopAsync(RedisKey[] keys, long count, CommandFlags flags = CommandFlags.None) + { + var msg = GetListMultiPopMessage(keys, RedisLiterals.RIGHT, count, flags); + return ExecuteAsync(msg, ResultProcessor.ListPopResult, defaultValue: ListPopResult.Null); + } + public RedisValue ListRightPopLeftPush(RedisKey source, RedisKey destination, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, RedisCommand.RPOPLPUSH, source, destination); @@ -1106,18 +1770,33 @@ public Task ListTrimAsync(RedisKey key, long start, long stop, CommandFlags flag public bool LockExtend(RedisKey key, RedisValue value, TimeSpan expiry, CommandFlags flags = CommandFlags.None) { - if (value.IsNull) throw new ArgumentNullException(nameof(value)); - var tran = GetLockExtendTransaction(key, value, expiry); + var msg = TryGetLockExtendMessage(key, value, expiry, flags, out var server); + if (msg is not null) return ExecuteSync(msg, ResultProcessor.Boolean, server); + var tran = GetLockExtendTransaction(key, value, expiry); if (tran != null) return tran.Execute(flags); // without transactions (twemproxy etc), we can't enforce the "value" part return KeyExpire(key, expiry, flags); } - public Task LockExtendAsync(RedisKey key, RedisValue value, TimeSpan expiry, CommandFlags flags = CommandFlags.None) + private Message? TryGetLockExtendMessage(in RedisKey key, in RedisValue value, TimeSpan expiry, CommandFlags flags, out ServerEndPoint? server, [CallerMemberName] string? caller = null) { if (value.IsNull) throw new ArgumentNullException(nameof(value)); + + // note that lock tokens are expected to be small, so: we'll use IFEQ rather than IFDEQ, for reliability + // note possible future extension:[P]EXPIRE ... IF* https://github.com/redis/redis/issues/14505 + var features = GetFeatures(key, flags, RedisCommand.SET, out server); + return features.SetWithValueCheck + ? GetStringSetMessage(key, value, expiry, ValueCondition.Equal(value), flags, caller) // use check-and-set + : null; + } + + public Task LockExtendAsync(RedisKey key, RedisValue value, TimeSpan expiry, CommandFlags flags = CommandFlags.None) + { + var msg = TryGetLockExtendMessage(key, value, expiry, flags, out var server); + if (msg is not null) return ExecuteAsync(msg, ResultProcessor.Boolean, server); + var tran = GetLockExtendTransaction(key, value, expiry); if (tran != null) return tran.ExecuteAsync(flags); @@ -1137,7 +1816,9 @@ public Task LockQueryAsync(RedisKey key, CommandFlags flags = Comman public bool LockRelease(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None) { - if (value.IsNull) throw new ArgumentNullException(nameof(value)); + var msg = TryGetLockReleaseMessage(key, value, flags, out var server); + if (msg is not null) return ExecuteSync(msg, ResultProcessor.Boolean, server); + var tran = GetLockReleaseTransaction(key, value); if (tran != null) return tran.Execute(flags); @@ -1145,9 +1826,22 @@ public bool LockRelease(RedisKey key, RedisValue value, CommandFlags flags = Com return KeyDelete(key, flags); } - public Task LockReleaseAsync(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None) + private Message? TryGetLockReleaseMessage(in RedisKey key, in RedisValue value, CommandFlags flags, out ServerEndPoint? server, [CallerMemberName] string? caller = null) { if (value.IsNull) throw new ArgumentNullException(nameof(value)); + + // note that lock tokens are expected to be small, so: we'll use IFEQ rather than IFDEQ, for reliability + var features = GetFeatures(key, flags, RedisCommand.DELEX, out server); + return features.DeleteWithValueCheck + ? GetStringDeleteMessage(key, ValueCondition.Equal(value), flags, caller) // use check-and-delete + : null; + } + + public Task LockReleaseAsync(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None) + { + var msg = TryGetLockReleaseMessage(key, value, flags, out var server); + if (msg is not null) return ExecuteAsync(msg, ResultProcessor.Boolean, server); + var tran = GetLockReleaseTransaction(key, value); if (tran != null) return tran.ExecuteAsync(flags); @@ -1161,92 +1855,177 @@ public bool LockTake(RedisKey key, RedisValue value, TimeSpan expiry, CommandFla return StringSet(key, value, expiry, When.NotExists, flags); } - public Task LockTakeAsync(RedisKey key, RedisValue value, TimeSpan expiry, CommandFlags flags = CommandFlags.None) + public Task LockTakeAsync(RedisKey key, RedisValue value, TimeSpan expiry, CommandFlags flags = CommandFlags.None) + { + if (value.IsNull) throw new ArgumentNullException(nameof(value)); + return StringSetAsync(key, value, expiry, When.NotExists, flags); + } + + public string? StringLongestCommonSubsequence(RedisKey key1, RedisKey key2, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.LCS, key1, key2); + return ExecuteSync(msg, ResultProcessor.String); + } + + public Task StringLongestCommonSubsequenceAsync(RedisKey key1, RedisKey key2, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.LCS, key1, key2); + return ExecuteAsync(msg, ResultProcessor.String); + } + + public long StringLongestCommonSubsequenceLength(RedisKey key1, RedisKey key2, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.LCS, key1, key2, RedisLiterals.LEN); + return ExecuteSync(msg, ResultProcessor.Int64); + } + + public Task StringLongestCommonSubsequenceLengthAsync(RedisKey key1, RedisKey key2, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.LCS, key1, key2, RedisLiterals.LEN); + return ExecuteAsync(msg, ResultProcessor.Int64); + } + + public LCSMatchResult StringLongestCommonSubsequenceWithMatches(RedisKey key1, RedisKey key2, long minSubMatchLength = 0, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.LCS, key1, key2, RedisLiterals.IDX, RedisLiterals.MINMATCHLEN, minSubMatchLength, RedisLiterals.WITHMATCHLEN); + return ExecuteSync(msg, ResultProcessor.LCSMatchResult); + } + + public Task StringLongestCommonSubsequenceWithMatchesAsync(RedisKey key1, RedisKey key2, long minSubMatchLength = 0, CommandFlags flags = CommandFlags.None) { - if (value.IsNull) throw new ArgumentNullException(nameof(value)); - return StringSetAsync(key, value, expiry, When.NotExists, flags); + var msg = Message.Create(Database, flags, RedisCommand.LCS, key1, key2, RedisLiterals.IDX, RedisLiterals.MINMATCHLEN, minSubMatchLength, RedisLiterals.WITHMATCHLEN); + return ExecuteAsync(msg, ResultProcessor.LCSMatchResult); } public long Publish(RedisChannel channel, RedisValue message, CommandFlags flags = CommandFlags.None) { if (channel.IsNullOrEmpty) throw new ArgumentNullException(nameof(channel)); - var msg = Message.Create(-1, flags, RedisCommand.PUBLISH, channel, message); - return ExecuteSync(msg, ResultProcessor.Int64); + var msg = Message.Create(-1, flags, channel.GetPublishCommand(), channel, message); + // if we're actively subscribed: send via that connection (otherwise, follow normal rules) + return ExecuteSync(msg, ResultProcessor.Int64, server: multiplexer.GetSubscribedServer(channel)); } public Task PublishAsync(RedisChannel channel, RedisValue message, CommandFlags flags = CommandFlags.None) { if (channel.IsNullOrEmpty) throw new ArgumentNullException(nameof(channel)); - var msg = Message.Create(-1, flags, RedisCommand.PUBLISH, channel, message); - return ExecuteAsync(msg, ResultProcessor.Int64); + var msg = Message.Create(-1, flags, channel.GetPublishCommand(), channel, message); + // if we're actively subscribed: send via that connection (otherwise, follow normal rules) + return ExecuteAsync(msg, ResultProcessor.Int64, server: multiplexer.GetSubscribedServer(channel)); + } + + public RedisResult Execute(string command, params object[] args) + => Execute(command, args, CommandFlags.None); + + public RedisResult Execute(string command, ICollection args, CommandFlags flags = CommandFlags.None) + { + var msg = new ExecuteMessage(multiplexer?.CommandMap, Database, flags, command, args); + return ExecuteSync(msg, ResultProcessor.ScriptResult)!; + } + + public Task ExecuteAsync(string command, params object[] args) + => ExecuteAsync(command, args, CommandFlags.None); + + public Task ExecuteAsync(string command, ICollection? args, CommandFlags flags = CommandFlags.None) + { + var msg = new ExecuteMessage(multiplexer?.CommandMap, Database, flags, command, args); + return ExecuteAsync(msg, ResultProcessor.ScriptResult, defaultValue: RedisResult.NullSingle); } - public RedisResult ScriptEvaluate(string script, RedisKey[] keys = null, RedisValue[] values = null, CommandFlags flags = CommandFlags.None) + public RedisResult ScriptEvaluate(string script, RedisKey[]? keys = null, RedisValue[]? values = null, CommandFlags flags = CommandFlags.None) { - var msg = new ScriptEvalMessage(Database, flags, script, keys, values); + var command = ResultProcessor.ScriptLoadProcessor.IsSHA1(script) ? RedisCommand.EVALSHA : RedisCommand.EVAL; + var msg = new ScriptEvalMessage(Database, flags, command, script, keys, values); try { - return ExecuteSync(msg, ResultProcessor.ScriptResult); + return ExecuteSync(msg, ResultProcessor.ScriptResult, defaultValue: RedisResult.NullSingle); } catch (RedisServerException) when (msg.IsScriptUnavailable) { // could be a NOSCRIPT; for a sync call, we can re-issue that without problem - return ExecuteSync(msg, ResultProcessor.ScriptResult); + return ExecuteSync(msg, ResultProcessor.ScriptResult, defaultValue: RedisResult.NullSingle); } } - public RedisResult Execute(string command, params object[] args) - => Execute(command, args, CommandFlags.None); - public RedisResult Execute(string command, ICollection args, CommandFlags flags = CommandFlags.None) + public RedisResult ScriptEvaluate(byte[] hash, RedisKey[]? keys = null, RedisValue[]? values = null, CommandFlags flags = CommandFlags.None) { - var msg = new ExecuteMessage(multiplexer?.CommandMap, Database, flags, command, args); - return ExecuteSync(msg, ResultProcessor.ScriptResult); + var msg = new ScriptEvalMessage(Database, flags, RedisCommand.EVALSHA, hash, keys, values); + return ExecuteSync(msg, ResultProcessor.ScriptResult, defaultValue: RedisResult.NullSingle); } - public Task ExecuteAsync(string command, params object[] args) - => ExecuteAsync(command, args, CommandFlags.None); - public Task ExecuteAsync(string command, ICollection args, CommandFlags flags = CommandFlags.None) + public RedisResult ScriptEvaluate(LuaScript script, object? parameters = null, CommandFlags flags = CommandFlags.None) { - var msg = new ExecuteMessage(multiplexer?.CommandMap, Database, flags, command, args); - return ExecuteAsync(msg, ResultProcessor.ScriptResult); + return script.Evaluate(this, parameters, null, flags); } - public RedisResult ScriptEvaluate(byte[] hash, RedisKey[] keys = null, RedisValue[] values = null, CommandFlags flags = CommandFlags.None) + public RedisResult ScriptEvaluate(LoadedLuaScript script, object? parameters = null, CommandFlags flags = CommandFlags.None) { - var msg = new ScriptEvalMessage(Database, flags, hash, keys, values); - return ExecuteSync(msg, ResultProcessor.ScriptResult); + return script.Evaluate(this, parameters, withKeyPrefix: null, flags); } - public RedisResult ScriptEvaluate(LuaScript script, object parameters = null, CommandFlags flags = CommandFlags.None) + public async Task ScriptEvaluateAsync(string script, RedisKey[]? keys = null, RedisValue[]? values = null, CommandFlags flags = CommandFlags.None) { - return script.Evaluate(this, parameters, null, flags); + var command = ResultProcessor.ScriptLoadProcessor.IsSHA1(script) ? RedisCommand.EVALSHA : RedisCommand.EVAL; + var msg = new ScriptEvalMessage(Database, flags, command, script, keys, values); + + try + { + return await ExecuteAsync(msg, ResultProcessor.ScriptResult, defaultValue: RedisResult.NullSingle).ForAwait(); + } + catch (RedisServerException) when (msg.IsScriptUnavailable) + { + // could be a NOSCRIPT; for a sync call, we can re-issue that without problem + return await ExecuteAsync(msg, ResultProcessor.ScriptResult, defaultValue: RedisResult.NullSingle).ForAwait(); + } } - public RedisResult ScriptEvaluate(LoadedLuaScript script, object parameters = null, CommandFlags flags = CommandFlags.None) + public Task ScriptEvaluateAsync(byte[] hash, RedisKey[]? keys = null, RedisValue[]? values = null, CommandFlags flags = CommandFlags.None) { - return script.Evaluate(this, parameters, null, flags); + var msg = new ScriptEvalMessage(Database, flags, RedisCommand.EVALSHA, hash, keys, values); + return ExecuteAsync(msg, ResultProcessor.ScriptResult, defaultValue: RedisResult.NullSingle); } - public Task ScriptEvaluateAsync(string script, RedisKey[] keys = null, RedisValue[] values = null, CommandFlags flags = CommandFlags.None) + public Task ScriptEvaluateAsync(LuaScript script, object? parameters = null, CommandFlags flags = CommandFlags.None) { - var msg = new ScriptEvalMessage(Database, flags, script, keys, values); - return ExecuteAsync(msg, ResultProcessor.ScriptResult); + return script.EvaluateAsync(this, parameters, null, flags); } - public Task ScriptEvaluateAsync(byte[] hash, RedisKey[] keys = null, RedisValue[] values = null, CommandFlags flags = CommandFlags.None) + public Task ScriptEvaluateAsync(LoadedLuaScript script, object? parameters = null, CommandFlags flags = CommandFlags.None) { - var msg = new ScriptEvalMessage(Database, flags, hash, keys, values); - return ExecuteAsync(msg, ResultProcessor.ScriptResult); + return script.EvaluateAsync(this, parameters, withKeyPrefix: null, flags); } - public Task ScriptEvaluateAsync(LuaScript script, object parameters = null, CommandFlags flags = CommandFlags.None) + public RedisResult ScriptEvaluateReadOnly(string script, RedisKey[]? keys = null, RedisValue[]? values = null, CommandFlags flags = CommandFlags.None) { - return script.EvaluateAsync(this, parameters, null, flags); + var command = ResultProcessor.ScriptLoadProcessor.IsSHA1(script) ? RedisCommand.EVALSHA_RO : RedisCommand.EVAL_RO; + var msg = new ScriptEvalMessage(Database, flags, command, script, keys, values); + try + { + return ExecuteSync(msg, ResultProcessor.ScriptResult, defaultValue: RedisResult.NullSingle); + } + catch (RedisServerException) when (msg.IsScriptUnavailable) + { + // could be a NOSCRIPT; for a sync call, we can re-issue that without problem + return ExecuteSync(msg, ResultProcessor.ScriptResult, defaultValue: RedisResult.NullSingle); + } } - public Task ScriptEvaluateAsync(LoadedLuaScript script, object parameters = null, CommandFlags flags = CommandFlags.None) + public RedisResult ScriptEvaluateReadOnly(byte[] hash, RedisKey[]? keys = null, RedisValue[]? values = null, CommandFlags flags = CommandFlags.None) { - return script.EvaluateAsync(this, parameters, null, flags); + var msg = new ScriptEvalMessage(Database, flags, RedisCommand.EVALSHA_RO, hash, keys, values); + return ExecuteSync(msg, ResultProcessor.ScriptResult, defaultValue: RedisResult.NullSingle); + } + + public Task ScriptEvaluateReadOnlyAsync(string script, RedisKey[]? keys = null, RedisValue[]? values = null, CommandFlags flags = CommandFlags.None) + { + var command = ResultProcessor.ScriptLoadProcessor.IsSHA1(script) ? RedisCommand.EVALSHA_RO : RedisCommand.EVAL_RO; + var msg = new ScriptEvalMessage(Database, flags, command, script, keys, values); + return ExecuteAsync(msg, ResultProcessor.ScriptResult, defaultValue: RedisResult.NullSingle); + } + + public Task ScriptEvaluateReadOnlyAsync(byte[] hash, RedisKey[]? keys = null, RedisValue[]? values = null, CommandFlags flags = CommandFlags.None) + { + var msg = new ScriptEvalMessage(Database, flags, RedisCommand.EVALSHA_RO, hash, keys, values); + return ExecuteAsync(msg, ResultProcessor.ScriptResult, defaultValue: RedisResult.NullSingle); } public bool SetAdd(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None) @@ -1278,13 +2057,13 @@ public Task SetAddAsync(RedisKey key, RedisValue[] values, CommandFlags fl public RedisValue[] SetCombine(SetOperation operation, RedisKey first, RedisKey second, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, SetOperationCommand(operation, false), first, second); - return ExecuteSync(msg, ResultProcessor.RedisValueArray); + return ExecuteSync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); } public RedisValue[] SetCombine(SetOperation operation, RedisKey[] keys, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, SetOperationCommand(operation, false), keys); - return ExecuteSync(msg, ResultProcessor.RedisValueArray); + return ExecuteSync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); } public long SetCombineAndStore(SetOperation operation, RedisKey destination, RedisKey first, RedisKey second, CommandFlags flags = CommandFlags.None) @@ -1314,13 +2093,13 @@ public Task SetCombineAndStoreAsync(SetOperation operation, RedisKey desti public Task SetCombineAsync(SetOperation operation, RedisKey first, RedisKey second, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, SetOperationCommand(operation, false), first, second); - return ExecuteAsync(msg, ResultProcessor.RedisValueArray); + return ExecuteAsync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); } public Task SetCombineAsync(SetOperation operation, RedisKey[] keys, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, SetOperationCommand(operation, false), keys); - return ExecuteAsync(msg, ResultProcessor.RedisValueArray); + return ExecuteAsync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); } public bool SetContains(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None) @@ -1335,6 +2114,30 @@ public Task SetContainsAsync(RedisKey key, RedisValue value, CommandFlags return ExecuteAsync(msg, ResultProcessor.Boolean); } + public bool[] SetContains(RedisKey key, RedisValue[] values, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.SMISMEMBER, key, values); + return ExecuteSync(msg, ResultProcessor.BooleanArray, defaultValue: Array.Empty()); + } + + public Task SetContainsAsync(RedisKey key, RedisValue[] values, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.SMISMEMBER, key, values); + return ExecuteAsync(msg, ResultProcessor.BooleanArray, defaultValue: Array.Empty()); + } + + public long SetIntersectionLength(RedisKey[] keys, long limit = 0, CommandFlags flags = CommandFlags.None) + { + var msg = GetSetIntersectionLengthMessage(keys, limit, flags); + return ExecuteSync(msg, ResultProcessor.Int64); + } + + public Task SetIntersectionLengthAsync(RedisKey[] keys, long limit = 0, CommandFlags flags = CommandFlags.None) + { + var msg = GetSetIntersectionLengthMessage(keys, limit, flags); + return ExecuteAsync(msg, ResultProcessor.Int64); + } + public long SetLength(RedisKey key, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, RedisCommand.SCARD, key); @@ -1350,13 +2153,13 @@ public Task SetLengthAsync(RedisKey key, CommandFlags flags = CommandFlags public RedisValue[] SetMembers(RedisKey key, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, RedisCommand.SMEMBERS, key); - return ExecuteSync(msg, ResultProcessor.RedisValueArray); + return ExecuteSync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); } public Task SetMembersAsync(RedisKey key, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, RedisCommand.SMEMBERS, key); - return ExecuteAsync(msg, ResultProcessor.RedisValueArray); + return ExecuteAsync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); } public bool SetMove(RedisKey source, RedisKey destination, RedisValue value, CommandFlags flags = CommandFlags.None) @@ -1389,16 +2192,16 @@ public RedisValue[] SetPop(RedisKey key, long count, CommandFlags flags = Comman var msg = count == 1 ? Message.Create(Database, flags, RedisCommand.SPOP, key) : Message.Create(Database, flags, RedisCommand.SPOP, key, count); - return ExecuteSync(msg, ResultProcessor.RedisValueArray); + return ExecuteSync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); } public Task SetPopAsync(RedisKey key, long count, CommandFlags flags = CommandFlags.None) { - if(count == 0) return Task.FromResult(Array.Empty()); + if (count == 0) return CompletedTask.FromDefault(Array.Empty(), asyncState); var msg = count == 1 ? Message.Create(Database, flags, RedisCommand.SPOP, key) : Message.Create(Database, flags, RedisCommand.SPOP, key, count); - return ExecuteAsync(msg, ResultProcessor.RedisValueArray); + return ExecuteAsync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); } public RedisValue SetRandomMember(RedisKey key, CommandFlags flags = CommandFlags.None) @@ -1416,13 +2219,13 @@ public Task SetRandomMemberAsync(RedisKey key, CommandFlags flags = public RedisValue[] SetRandomMembers(RedisKey key, long count, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, RedisCommand.SRANDMEMBER, key, count); - return ExecuteSync(msg, ResultProcessor.RedisValueArray); + return ExecuteSync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); } public Task SetRandomMembersAsync(RedisKey key, long count, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, RedisCommand.SRANDMEMBER, key, count); - return ExecuteAsync(msg, ResultProcessor.RedisValueArray); + return ExecuteAsync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); } public bool SetRemove(RedisKey key, RedisValue value, CommandFlags flags = CommandFlags.None) @@ -1472,85 +2275,133 @@ private CursorEnumerable SetScanAsync(RedisKey key, RedisValue patte throw ExceptionFactory.NotSupported(true, RedisCommand.SSCAN); } - public RedisValue[] Sort(RedisKey key, long skip = 0, long take = -1, Order order = Order.Ascending, SortType sortType = SortType.Numeric, RedisValue by = default(RedisValue), RedisValue[] get = null, CommandFlags flags = CommandFlags.None) + public RedisValue[] Sort(RedisKey key, long skip = 0, long take = -1, Order order = Order.Ascending, SortType sortType = SortType.Numeric, RedisValue by = default, RedisValue[]? get = null, CommandFlags flags = CommandFlags.None) { - var msg = GetSortedSetAddMessage(default(RedisKey), key, skip, take, order, sortType, by, get, flags); - return ExecuteSync(msg, ResultProcessor.RedisValueArray); + var msg = GetSortMessage(RedisKey.Null, key, skip, take, order, sortType, by, get, flags, out var server); + return ExecuteSync(msg, ResultProcessor.RedisValueArray, server: server, defaultValue: Array.Empty()); } - public long SortAndStore(RedisKey destination, RedisKey key, long skip = 0, long take = -1, Order order = Order.Ascending, SortType sortType = SortType.Numeric, RedisValue by = default(RedisValue), RedisValue[] get = null, CommandFlags flags = CommandFlags.None) + public long SortAndStore(RedisKey destination, RedisKey key, long skip = 0, long take = -1, Order order = Order.Ascending, SortType sortType = SortType.Numeric, RedisValue by = default, RedisValue[]? get = null, CommandFlags flags = CommandFlags.None) { - var msg = GetSortedSetAddMessage(destination, key, skip, take, order, sortType, by, get, flags); - return ExecuteSync(msg, ResultProcessor.Int64); + var msg = GetSortMessage(destination, key, skip, take, order, sortType, by, get, flags, out var server); + return ExecuteSync(msg, ResultProcessor.Int64, server); } - public Task SortAndStoreAsync(RedisKey destination, RedisKey key, long skip = 0, long take = -1, Order order = Order.Ascending, SortType sortType = SortType.Numeric, RedisValue by = default(RedisValue), RedisValue[] get = null, CommandFlags flags = CommandFlags.None) + public Task SortAndStoreAsync(RedisKey destination, RedisKey key, long skip = 0, long take = -1, Order order = Order.Ascending, SortType sortType = SortType.Numeric, RedisValue by = default, RedisValue[]? get = null, CommandFlags flags = CommandFlags.None) { - var msg = GetSortedSetAddMessage(destination, key, skip, take, order, sortType, by, get, flags); - return ExecuteAsync(msg, ResultProcessor.Int64); + var msg = GetSortMessage(destination, key, skip, take, order, sortType, by, get, flags, out var server); + return ExecuteAsync(msg, ResultProcessor.Int64, server); } - public Task SortAsync(RedisKey key, long skip = 0, long take = -1, Order order = Order.Ascending, SortType sortType = SortType.Numeric, RedisValue by = default(RedisValue), RedisValue[] get = null, CommandFlags flags = CommandFlags.None) + public Task SortAsync(RedisKey key, long skip = 0, long take = -1, Order order = Order.Ascending, SortType sortType = SortType.Numeric, RedisValue by = default, RedisValue[]? get = null, CommandFlags flags = CommandFlags.None) { - var msg = GetSortedSetAddMessage(default(RedisKey), key, skip, take, order, sortType, by, get, flags); - return ExecuteAsync(msg, ResultProcessor.RedisValueArray); + var msg = GetSortMessage(RedisKey.Null, key, skip, take, order, sortType, by, get, flags, out var server); + return ExecuteAsync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty(), server: server); } - public bool SortedSetAdd(RedisKey key, RedisValue member, double score, CommandFlags flags) + public bool SortedSetAdd(RedisKey key, RedisValue member, double score, CommandFlags flags) => + SortedSetAdd(key, member, score, SortedSetWhen.Always, flags); + + public bool SortedSetAdd(RedisKey key, RedisValue member, double score, When when = When.Always, CommandFlags flags = CommandFlags.None) => + SortedSetAdd(key, member, score, SortedSetWhenExtensions.Parse(when), flags); + + public bool SortedSetAdd(RedisKey key, RedisValue member, double score, SortedSetWhen when = SortedSetWhen.Always, CommandFlags flags = CommandFlags.None) { - var msg = GetSortedSetAddMessage(key, member, score, When.Always, flags); + var msg = GetSortedSetAddMessage(key, member, score, when, false, flags); return ExecuteSync(msg, ResultProcessor.Boolean); } - public bool SortedSetAdd(RedisKey key, RedisValue member, double score, When when = When.Always, CommandFlags flags = CommandFlags.None) + public bool SortedSetUpdate(RedisKey key, RedisValue member, double score, SortedSetWhen when = SortedSetWhen.Always, CommandFlags flags = CommandFlags.None) { - var msg = GetSortedSetAddMessage(key, member, score, when, flags); + var msg = GetSortedSetAddMessage(key, member, score, when, true, flags); return ExecuteSync(msg, ResultProcessor.Boolean); } - public long SortedSetAdd(RedisKey key, SortedSetEntry[] values, CommandFlags flags) + public long SortedSetAdd(RedisKey key, SortedSetEntry[] values, CommandFlags flags) => + SortedSetAdd(key, values, SortedSetWhen.Always, flags); + + public long SortedSetAdd(RedisKey key, SortedSetEntry[] values, When when = When.Always, CommandFlags flags = CommandFlags.None) => + SortedSetAdd(key, values, SortedSetWhenExtensions.Parse(when), flags); + + public long SortedSetAdd(RedisKey key, SortedSetEntry[] values, SortedSetWhen when = SortedSetWhen.Always, CommandFlags flags = CommandFlags.None) { - var msg = GetSortedSetAddMessage(key, values, When.Always, flags); + var msg = GetSortedSetAddMessage(key, values, when, false, flags); return ExecuteSync(msg, ResultProcessor.Int64); } - public long SortedSetAdd(RedisKey key, SortedSetEntry[] values, When when = When.Always, CommandFlags flags = CommandFlags.None) + public long SortedSetUpdate(RedisKey key, SortedSetEntry[] values, SortedSetWhen when = SortedSetWhen.Always, CommandFlags flags = CommandFlags.None) { - var msg = GetSortedSetAddMessage(key, values, when, flags); + var msg = GetSortedSetAddMessage(key, values, when, true, flags); return ExecuteSync(msg, ResultProcessor.Int64); } - public Task SortedSetAddAsync(RedisKey key, RedisValue member, double score, CommandFlags flags) + public Task SortedSetAddAsync(RedisKey key, RedisValue member, double score, CommandFlags flags) => + SortedSetAddAsync(key, member, score, SortedSetWhen.Always, flags); + + public Task SortedSetAddAsync(RedisKey key, RedisValue member, double score, When when = When.Always, CommandFlags flags = CommandFlags.None) => + SortedSetAddAsync(key, member, score, SortedSetWhenExtensions.Parse(when), flags); + + public Task SortedSetAddAsync(RedisKey key, RedisValue member, double score, SortedSetWhen when = SortedSetWhen.Always, CommandFlags flags = CommandFlags.None) { - var msg = GetSortedSetAddMessage(key, member, score, When.Always, flags); + var msg = GetSortedSetAddMessage(key, member, score, when, false, flags); return ExecuteAsync(msg, ResultProcessor.Boolean); } - public Task SortedSetAddAsync(RedisKey key, RedisValue member, double score, When when = When.Always, CommandFlags flags = CommandFlags.None) + public Task SortedSetUpdateAsync(RedisKey key, RedisValue member, double score, SortedSetWhen when = SortedSetWhen.Always, CommandFlags flags = CommandFlags.None) { - var msg = GetSortedSetAddMessage(key, member, score, when, flags); + var msg = GetSortedSetAddMessage(key, member, score, when, true, flags); return ExecuteAsync(msg, ResultProcessor.Boolean); } - public Task SortedSetAddAsync(RedisKey key, SortedSetEntry[] values, CommandFlags flags) + public Task SortedSetAddAsync(RedisKey key, SortedSetEntry[] values, CommandFlags flags) => + SortedSetAddAsync(key, values, SortedSetWhen.Always, flags); + + public Task SortedSetAddAsync(RedisKey key, SortedSetEntry[] values, When when = When.Always, CommandFlags flags = CommandFlags.None) => + SortedSetAddAsync(key, values, SortedSetWhenExtensions.Parse(when), flags); + + public Task SortedSetAddAsync(RedisKey key, SortedSetEntry[] values, SortedSetWhen when = SortedSetWhen.Always, CommandFlags flags = CommandFlags.None) { - var msg = GetSortedSetAddMessage(key, values, When.Always, flags); + var msg = GetSortedSetAddMessage(key, values, when, false, flags); return ExecuteAsync(msg, ResultProcessor.Int64); } - public Task SortedSetAddAsync(RedisKey key, SortedSetEntry[] values, When when = When.Always, CommandFlags flags = CommandFlags.None) + public Task SortedSetUpdateAsync(RedisKey key, SortedSetEntry[] values, SortedSetWhen when = SortedSetWhen.Always, CommandFlags flags = CommandFlags.None) { - var msg = GetSortedSetAddMessage(key, values, when, flags); + var msg = GetSortedSetAddMessage(key, values, when, true, flags); return ExecuteAsync(msg, ResultProcessor.Int64); } + public RedisValue[] SortedSetCombine(SetOperation operation, RedisKey[] keys, double[]? weights = null, Aggregate aggregate = Aggregate.Sum, CommandFlags flags = CommandFlags.None) + { + var msg = GetSortedSetCombineCommandMessage(operation, keys, weights, aggregate, withScores: false, flags); + return ExecuteSync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); + } + + public Task SortedSetCombineAsync(SetOperation operation, RedisKey[] keys, double[]? weights = null, Aggregate aggregate = Aggregate.Sum, CommandFlags flags = CommandFlags.None) + { + var msg = GetSortedSetCombineCommandMessage(operation, keys, weights, aggregate, withScores: false, flags); + return ExecuteAsync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); + } + + public SortedSetEntry[] SortedSetCombineWithScores(SetOperation operation, RedisKey[] keys, double[]? weights = null, Aggregate aggregate = Aggregate.Sum, CommandFlags flags = CommandFlags.None) + { + var msg = GetSortedSetCombineCommandMessage(operation, keys, weights, aggregate, withScores: true, flags); + return ExecuteSync(msg, ResultProcessor.SortedSetWithScores, defaultValue: Array.Empty()); + } + + public Task SortedSetCombineWithScoresAsync(SetOperation operation, RedisKey[] keys, double[]? weights = null, Aggregate aggregate = Aggregate.Sum, CommandFlags flags = CommandFlags.None) + { + var msg = GetSortedSetCombineCommandMessage(operation, keys, weights, aggregate, withScores: true, flags); + return ExecuteAsync(msg, ResultProcessor.SortedSetWithScores, defaultValue: Array.Empty()); + } + public long SortedSetCombineAndStore(SetOperation operation, RedisKey destination, RedisKey first, RedisKey second, Aggregate aggregate = Aggregate.Sum, CommandFlags flags = CommandFlags.None) { var msg = GetSortedSetCombineAndStoreCommandMessage(operation, destination, new[] { first, second }, null, aggregate, flags); return ExecuteSync(msg, ResultProcessor.Int64); } - public long SortedSetCombineAndStore(SetOperation operation, RedisKey destination, RedisKey[] keys, double[] weights = null, Aggregate aggregate = Aggregate.Sum, CommandFlags flags = CommandFlags.None) + public long SortedSetCombineAndStore(SetOperation operation, RedisKey destination, RedisKey[] keys, double[]? weights = null, Aggregate aggregate = Aggregate.Sum, CommandFlags flags = CommandFlags.None) { var msg = GetSortedSetCombineAndStoreCommandMessage(operation, destination, keys, weights, aggregate, flags); return ExecuteSync(msg, ResultProcessor.Int64); @@ -1562,7 +2413,7 @@ public Task SortedSetCombineAndStoreAsync(SetOperation operation, RedisKey return ExecuteAsync(msg, ResultProcessor.Int64); } - public Task SortedSetCombineAndStoreAsync(SetOperation operation, RedisKey destination, RedisKey[] keys, double[] weights = null, Aggregate aggregate = Aggregate.Sum, CommandFlags flags = CommandFlags.None) + public Task SortedSetCombineAndStoreAsync(SetOperation operation, RedisKey destination, RedisKey[] keys, double[]? weights = null, Aggregate aggregate = Aggregate.Sum, CommandFlags flags = CommandFlags.None) { var msg = GetSortedSetCombineAndStoreCommandMessage(operation, destination, keys, weights, aggregate, flags); return ExecuteAsync(msg, ResultProcessor.Int64); @@ -1590,6 +2441,18 @@ public Task SortedSetIncrementAsync(RedisKey key, RedisValue member, dou return ExecuteAsync(msg, ResultProcessor.Double); } + public long SortedSetIntersectionLength(RedisKey[] keys, long limit = 0, CommandFlags flags = CommandFlags.None) + { + var msg = GetSortedSetIntersectionLengthMessage(keys, limit, flags); + return ExecuteSync(msg, ResultProcessor.Int64); + } + + public Task SortedSetIntersectionLengthAsync(RedisKey[] keys, long limit = 0, CommandFlags flags = CommandFlags.None) + { + var msg = GetSortedSetIntersectionLengthMessage(keys, limit, flags); + return ExecuteAsync(msg, ResultProcessor.Int64); + } + public long SortedSetLength(RedisKey key, double min = double.NegativeInfinity, double max = double.PositiveInfinity, Exclude exclude = Exclude.None, CommandFlags flags = CommandFlags.None) { var msg = GetSortedSetLengthMessage(key, min, max, exclude, flags); @@ -1602,52 +2465,120 @@ public Task SortedSetLengthAsync(RedisKey key, double min = double.Negativ return ExecuteAsync(msg, ResultProcessor.Int64); } + public RedisValue SortedSetRandomMember(RedisKey key, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.ZRANDMEMBER, key); + return ExecuteSync(msg, ResultProcessor.RedisValue); + } + + public RedisValue[] SortedSetRandomMembers(RedisKey key, long count, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.ZRANDMEMBER, key, count); + return ExecuteSync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); + } + + public SortedSetEntry[] SortedSetRandomMembersWithScores(RedisKey key, long count, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.ZRANDMEMBER, key, count, RedisLiterals.WITHSCORES); + return ExecuteSync(msg, ResultProcessor.SortedSetWithScores, defaultValue: Array.Empty()); + } + + public Task SortedSetRandomMemberAsync(RedisKey key, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.ZRANDMEMBER, key); + return ExecuteAsync(msg, ResultProcessor.RedisValue); + } + + public Task SortedSetRandomMembersAsync(RedisKey key, long count, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.ZRANDMEMBER, key, count); + return ExecuteAsync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); + } + + public Task SortedSetRandomMembersWithScoresAsync(RedisKey key, long count, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.ZRANDMEMBER, key, count, RedisLiterals.WITHSCORES); + return ExecuteAsync(msg, ResultProcessor.SortedSetWithScores, defaultValue: Array.Empty()); + } + public RedisValue[] SortedSetRangeByRank(RedisKey key, long start = 0, long stop = -1, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, order == Order.Descending ? RedisCommand.ZREVRANGE : RedisCommand.ZRANGE, key, start, stop); - return ExecuteSync(msg, ResultProcessor.RedisValueArray); + return ExecuteSync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); + } + + public long SortedSetRangeAndStore( + RedisKey sourceKey, + RedisKey destinationKey, + RedisValue start, + RedisValue stop, + SortedSetOrder sortedSetOrder = SortedSetOrder.ByRank, + Exclude exclude = Exclude.None, + Order order = Order.Ascending, + long skip = 0, + long? take = null, + CommandFlags flags = CommandFlags.None) + { + var msg = CreateSortedSetRangeStoreMessage(Database, flags, sourceKey, destinationKey, start, stop, sortedSetOrder, order, exclude, skip, take); + return ExecuteSync(msg, ResultProcessor.Int64); } public Task SortedSetRangeByRankAsync(RedisKey key, long start = 0, long stop = -1, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, order == Order.Descending ? RedisCommand.ZREVRANGE : RedisCommand.ZRANGE, key, start, stop); - return ExecuteAsync(msg, ResultProcessor.RedisValueArray); + return ExecuteAsync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); + } + + public Task SortedSetRangeAndStoreAsync( + RedisKey sourceKey, + RedisKey destinationKey, + RedisValue start, + RedisValue stop, + SortedSetOrder sortedSetOrder = SortedSetOrder.ByRank, + Exclude exclude = Exclude.None, + Order order = Order.Ascending, + long skip = 0, + long? take = null, + CommandFlags flags = CommandFlags.None) + { + var msg = CreateSortedSetRangeStoreMessage(Database, flags, sourceKey, destinationKey, start, stop, sortedSetOrder, order, exclude, skip, take); + return ExecuteAsync(msg, ResultProcessor.Int64); } public SortedSetEntry[] SortedSetRangeByRankWithScores(RedisKey key, long start = 0, long stop = -1, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, order == Order.Descending ? RedisCommand.ZREVRANGE : RedisCommand.ZRANGE, key, start, stop, RedisLiterals.WITHSCORES); - return ExecuteSync(msg, ResultProcessor.SortedSetWithScores); + return ExecuteSync(msg, ResultProcessor.SortedSetWithScores, defaultValue: Array.Empty()); } public Task SortedSetRangeByRankWithScoresAsync(RedisKey key, long start = 0, long stop = -1, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, order == Order.Descending ? RedisCommand.ZREVRANGE : RedisCommand.ZRANGE, key, start, stop, RedisLiterals.WITHSCORES); - return ExecuteAsync(msg, ResultProcessor.SortedSetWithScores); + return ExecuteAsync(msg, ResultProcessor.SortedSetWithScores, defaultValue: Array.Empty()); } public RedisValue[] SortedSetRangeByScore(RedisKey key, double start = double.NegativeInfinity, double stop = double.PositiveInfinity, Exclude exclude = Exclude.None, Order order = Order.Ascending, long skip = 0, long take = -1, CommandFlags flags = CommandFlags.None) { var msg = GetSortedSetRangeByScoreMessage(key, start, stop, exclude, order, skip, take, flags, false); - return ExecuteSync(msg, ResultProcessor.RedisValueArray); + return ExecuteSync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); } public Task SortedSetRangeByScoreAsync(RedisKey key, double start = double.NegativeInfinity, double stop = double.PositiveInfinity, Exclude exclude = Exclude.None, Order order = Order.Ascending, long skip = 0, long take = -1, CommandFlags flags = CommandFlags.None) { var msg = GetSortedSetRangeByScoreMessage(key, start, stop, exclude, order, skip, take, flags, false); - return ExecuteAsync(msg, ResultProcessor.RedisValueArray); + return ExecuteAsync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); } public SortedSetEntry[] SortedSetRangeByScoreWithScores(RedisKey key, double start = double.NegativeInfinity, double stop = double.PositiveInfinity, Exclude exclude = Exclude.None, Order order = Order.Ascending, long skip = 0, long take = -1, CommandFlags flags = CommandFlags.None) { var msg = GetSortedSetRangeByScoreMessage(key, start, stop, exclude, order, skip, take, flags, true); - return ExecuteSync(msg, ResultProcessor.SortedSetWithScores); + return ExecuteSync(msg, ResultProcessor.SortedSetWithScores, defaultValue: Array.Empty()); } public Task SortedSetRangeByScoreWithScoresAsync(RedisKey key, double start = double.NegativeInfinity, double stop = double.PositiveInfinity, Exclude exclude = Exclude.None, Order order = Order.Ascending, long skip = 0, long take = -1, CommandFlags flags = CommandFlags.None) { var msg = GetSortedSetRangeByScoreMessage(key, start, stop, exclude, order, skip, take, flags, true); - return ExecuteAsync(msg, ResultProcessor.SortedSetWithScores); + return ExecuteAsync(msg, ResultProcessor.SortedSetWithScores, defaultValue: Array.Empty()); } public long? SortedSetRank(RedisKey key, RedisValue member, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None) @@ -1735,12 +2666,24 @@ private CursorEnumerable SortedSetScanAsync(RedisKey key, RedisV return ExecuteSync(msg, ResultProcessor.NullableDouble); } + public double?[] SortedSetScores(RedisKey key, RedisValue[] members, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.ZMSCORE, key, members); + return ExecuteSync(msg, ResultProcessor.NullableDoubleArray, defaultValue: Array.Empty()); + } + public Task SortedSetScoreAsync(RedisKey key, RedisValue member, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, RedisCommand.ZSCORE, key, member); return ExecuteAsync(msg, ResultProcessor.NullableDouble); } + public Task SortedSetScoresAsync(RedisKey key, RedisValue[] members, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.ZMSCORE, key, members); + return ExecuteAsync(msg, ResultProcessor.NullableDoubleArray, defaultValue: Array.Empty()); + } + public SortedSetEntry? SortedSetPop(RedisKey key, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, order == Order.Descending ? RedisCommand.ZPOPMAX : RedisCommand.ZPOPMIN, key); @@ -1759,16 +2702,28 @@ public SortedSetEntry[] SortedSetPop(RedisKey key, long count, Order order = Ord var msg = count == 1 ? Message.Create(Database, flags, order == Order.Descending ? RedisCommand.ZPOPMAX : RedisCommand.ZPOPMIN, key) : Message.Create(Database, flags, order == Order.Descending ? RedisCommand.ZPOPMAX : RedisCommand.ZPOPMIN, key, count); - return ExecuteSync(msg, ResultProcessor.SortedSetWithScores); + return ExecuteSync(msg, ResultProcessor.SortedSetWithScores, defaultValue: Array.Empty()); + } + + public SortedSetPopResult SortedSetPop(RedisKey[] keys, long count, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None) + { + var msg = GetSortedSetMultiPopMessage(keys, order, count, flags); + return ExecuteSync(msg, ResultProcessor.SortedSetPopResult, defaultValue: SortedSetPopResult.Null); } public Task SortedSetPopAsync(RedisKey key, long count, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None) { - if (count == 0) return Task.FromResult(Array.Empty()); + if (count == 0) return CompletedTask.FromDefault(Array.Empty(), asyncState); var msg = count == 1 ? Message.Create(Database, flags, order == Order.Descending ? RedisCommand.ZPOPMAX : RedisCommand.ZPOPMIN, key) : Message.Create(Database, flags, order == Order.Descending ? RedisCommand.ZPOPMAX : RedisCommand.ZPOPMIN, key, count); - return ExecuteAsync(msg, ResultProcessor.SortedSetWithScores); + return ExecuteAsync(msg, ResultProcessor.SortedSetWithScores, defaultValue: Array.Empty()); + } + + public Task SortedSetPopAsync(RedisKey[] keys, long count, Order order = Order.Ascending, CommandFlags flags = CommandFlags.None) + { + var msg = GetSortedSetMultiPopMessage(keys, order, count, flags); + return ExecuteAsync(msg, ResultProcessor.SortedSetPopResult, defaultValue: SortedSetPopResult.Null); } public long StreamAcknowledge(RedisKey key, RedisValue groupName, RedisValue messageId, CommandFlags flags = CommandFlags.None) @@ -1795,57 +2750,254 @@ public Task StreamAcknowledgeAsync(RedisKey key, RedisValue groupName, Red return ExecuteAsync(msg, ResultProcessor.Int64); } - public RedisValue StreamAdd(RedisKey key, RedisValue streamField, RedisValue streamValue, RedisValue? messageId = null, int? maxLength = null, bool useApproximateMaxLength = false, CommandFlags flags = CommandFlags.None) + public StreamTrimResult StreamAcknowledgeAndDelete(RedisKey key, RedisValue groupName, StreamTrimMode mode, RedisValue messageId, CommandFlags flags = CommandFlags.None) + { + var msg = GetStreamAcknowledgeAndDeleteMessage(key, groupName, mode, messageId, flags); + return ExecuteSync(msg, ResultProcessor.StreamTrimResult); + } + + public Task StreamAcknowledgeAndDeleteAsync(RedisKey key, RedisValue groupName, StreamTrimMode mode, RedisValue messageId, CommandFlags flags = CommandFlags.None) + { + var msg = GetStreamAcknowledgeAndDeleteMessage(key, groupName, mode, messageId, flags); + return ExecuteAsync(msg, ResultProcessor.StreamTrimResult); + } + + public StreamTrimResult[] StreamAcknowledgeAndDelete(RedisKey key, RedisValue groupName, StreamTrimMode mode, RedisValue[] messageIds, CommandFlags flags = CommandFlags.None) + { + var msg = GetStreamAcknowledgeAndDeleteMessage(key, groupName, mode, messageIds, flags); + return ExecuteSync(msg, ResultProcessor.StreamTrimResultArray)!; + } + + public Task StreamAcknowledgeAndDeleteAsync(RedisKey key, RedisValue groupName, StreamTrimMode mode, RedisValue[] messageIds, CommandFlags flags = CommandFlags.None) + { + var msg = GetStreamAcknowledgeAndDeleteMessage(key, groupName, mode, messageIds, flags); + return ExecuteAsync(msg, ResultProcessor.StreamTrimResultArray)!; + } + + public RedisValue StreamAdd(RedisKey key, RedisValue streamField, RedisValue streamValue, RedisValue? messageId, int? maxLength, bool useApproximateMaxLength, CommandFlags flags) + => StreamAdd(key, streamField, streamValue, messageId, maxLength, useApproximateMaxLength, null, StreamTrimMode.KeepReferences, flags); + + public RedisValue StreamAdd(RedisKey key, RedisValue streamField, RedisValue streamValue, RedisValue? messageId = null, long? maxLength = null, bool useApproximateMaxLength = false, long? limit = null, StreamTrimMode mode = StreamTrimMode.KeepReferences, CommandFlags flags = CommandFlags.None) { - var msg = GetStreamAddMessage(key, + var msg = GetStreamAddMessage( + key, messageId ?? StreamConstants.AutoGeneratedId, + StreamIdempotentId.Empty, + maxLength, + useApproximateMaxLength, + new NameValueEntry(streamField, streamValue), + limit, + mode, + flags); + + return ExecuteSync(msg, ResultProcessor.RedisValue); + } + + public RedisValue StreamAdd(RedisKey key, RedisValue streamField, RedisValue streamValue, StreamIdempotentId idempotentId, long? maxLength = null, bool useApproximateMaxLength = false, long? limit = null, StreamTrimMode mode = StreamTrimMode.KeepReferences, CommandFlags flags = CommandFlags.None) + { + var msg = GetStreamAddMessage( + key, + StreamConstants.AutoGeneratedId, + idempotentId, maxLength, useApproximateMaxLength, new NameValueEntry(streamField, streamValue), + limit, + mode, flags); return ExecuteSync(msg, ResultProcessor.RedisValue); } - public Task StreamAddAsync(RedisKey key, RedisValue streamField, RedisValue streamValue, RedisValue? messageId = null, int? maxLength = null, bool useApproximateMaxLength = false, CommandFlags flags = CommandFlags.None) + public Task StreamAddAsync(RedisKey key, RedisValue streamField, RedisValue streamValue, RedisValue? messageId, int? maxLength, bool useApproximateMaxLength, CommandFlags flags) + => StreamAddAsync(key, streamField, streamValue, messageId, maxLength, useApproximateMaxLength, null, StreamTrimMode.KeepReferences, flags); + + public Task StreamAddAsync(RedisKey key, RedisValue streamField, RedisValue streamValue, RedisValue? messageId = null, long? maxLength = null, bool useApproximateMaxLength = false, long? limit = null, StreamTrimMode mode = StreamTrimMode.KeepReferences, CommandFlags flags = CommandFlags.None) { - var msg = GetStreamAddMessage(key, + var msg = GetStreamAddMessage( + key, messageId ?? StreamConstants.AutoGeneratedId, + StreamIdempotentId.Empty, + maxLength, + useApproximateMaxLength, + new NameValueEntry(streamField, streamValue), + limit, + mode, + flags); + + return ExecuteAsync(msg, ResultProcessor.RedisValue); + } + + public Task StreamAddAsync(RedisKey key, RedisValue streamField, RedisValue streamValue, StreamIdempotentId idempotentId, long? maxLength = null, bool useApproximateMaxLength = false, long? limit = null, StreamTrimMode mode = StreamTrimMode.KeepReferences, CommandFlags flags = CommandFlags.None) + { + var msg = GetStreamAddMessage( + key, + StreamConstants.AutoGeneratedId, + idempotentId, maxLength, useApproximateMaxLength, new NameValueEntry(streamField, streamValue), + limit, + mode, flags); return ExecuteAsync(msg, ResultProcessor.RedisValue); } - public RedisValue StreamAdd(RedisKey key, NameValueEntry[] streamPairs, RedisValue? messageId = null, int? maxLength = null, bool useApproximateMaxLength = false, CommandFlags flags = CommandFlags.None) + public RedisValue StreamAdd(RedisKey key, NameValueEntry[] streamPairs, RedisValue? messageId, int? maxLength, bool useApproximateMaxLength, CommandFlags flags) + => StreamAdd(key, streamPairs, messageId, maxLength, useApproximateMaxLength, null, StreamTrimMode.KeepReferences, flags); + + public RedisValue StreamAdd(RedisKey key, NameValueEntry[] streamPairs, RedisValue? messageId = null, long? maxLength = null, bool useApproximateMaxLength = false, long? limit = null, StreamTrimMode mode = StreamTrimMode.KeepReferences, CommandFlags flags = CommandFlags.None) { - var msg = GetStreamAddMessage(key, + var msg = GetStreamAddMessage( + key, messageId ?? StreamConstants.AutoGeneratedId, + StreamIdempotentId.Empty, + maxLength, + useApproximateMaxLength, + streamPairs, + limit, + mode, + flags); + + return ExecuteSync(msg, ResultProcessor.RedisValue); + } + + public RedisValue StreamAdd(RedisKey key, NameValueEntry[] streamPairs, StreamIdempotentId idempotentId, long? maxLength = null, bool useApproximateMaxLength = false, long? limit = null, StreamTrimMode mode = StreamTrimMode.KeepReferences, CommandFlags flags = CommandFlags.None) + { + var msg = GetStreamAddMessage( + key, + StreamConstants.AutoGeneratedId, + idempotentId, maxLength, useApproximateMaxLength, streamPairs, + limit, + mode, flags); return ExecuteSync(msg, ResultProcessor.RedisValue); } - public Task StreamAddAsync(RedisKey key, NameValueEntry[] streamPairs, RedisValue? messageId = null, int? maxLength = null, bool useApproximateMaxLength = false, CommandFlags flags = CommandFlags.None) + public Task StreamAddAsync(RedisKey key, NameValueEntry[] streamPairs, RedisValue? messageId, int? maxLength, bool useApproximateMaxLength, CommandFlags flags) + => StreamAddAsync(key, streamPairs, messageId, maxLength, useApproximateMaxLength, null, StreamTrimMode.KeepReferences, flags); + + public Task StreamAddAsync(RedisKey key, NameValueEntry[] streamPairs, RedisValue? messageId = null, long? maxLength = null, bool useApproximateMaxLength = false, long? limit = null, StreamTrimMode mode = StreamTrimMode.KeepReferences, CommandFlags flags = CommandFlags.None) { - var msg = GetStreamAddMessage(key, + var msg = GetStreamAddMessage( + key, messageId ?? StreamConstants.AutoGeneratedId, + StreamIdempotentId.Empty, + maxLength, + useApproximateMaxLength, + streamPairs, + limit, + mode, + flags); + + return ExecuteAsync(msg, ResultProcessor.RedisValue); + } + + public Task StreamAddAsync(RedisKey key, NameValueEntry[] streamPairs, StreamIdempotentId idempotentId, long? maxLength = null, bool useApproximateMaxLength = false, long? limit = null, StreamTrimMode mode = StreamTrimMode.KeepReferences, CommandFlags flags = CommandFlags.None) + { + var msg = GetStreamAddMessage( + key, + StreamConstants.AutoGeneratedId, + idempotentId, maxLength, useApproximateMaxLength, streamPairs, + limit, + mode, flags); return ExecuteAsync(msg, ResultProcessor.RedisValue); } + public void StreamConfigure(RedisKey key, StreamConfiguration configuration, CommandFlags flags = CommandFlags.None) + { + var msg = GetStreamConfigureMessage(key, configuration, flags); + ExecuteSync(msg, ResultProcessor.DemandOK); + } + + public Task StreamConfigureAsync(RedisKey key, StreamConfiguration configuration, CommandFlags flags = CommandFlags.None) + { + var msg = GetStreamConfigureMessage(key, configuration, flags); + return ExecuteAsync(msg, ResultProcessor.DemandOK); + } + + private Message GetStreamConfigureMessage(RedisKey key, StreamConfiguration configuration, CommandFlags flags) + { + if (key.IsNull) throw new ArgumentNullException(nameof(key)); + if (configuration == null) throw new ArgumentNullException(nameof(configuration)); + if (configuration.IdmpMaxSize.HasValue) + { + if (configuration.IdmpDuration.HasValue) + { + // duration and maxsize + return Message.Create( + Database, + flags, + RedisCommand.XCFGSET, + key, + RedisLiterals.IDMP_DURATION, + configuration.IdmpDuration.Value, + RedisLiterals.IDMP_MAXSIZE, + configuration.IdmpMaxSize.Value); + } + // just maxsize + return Message.Create( + Database, + flags, + RedisCommand.XCFGSET, + key, + RedisLiterals.IDMP_MAXSIZE, + configuration.IdmpMaxSize.Value); + } + + if (configuration.IdmpDuration.HasValue) + { + // just duration + return Message.Create( + Database, + flags, + RedisCommand.XCFGSET, + key, + RedisLiterals.IDMP_DURATION, + configuration.IdmpDuration.Value); + } + + return Message.Create(Database, flags, RedisCommand.XCFGSET, key); // this will manifest a -ERR, but let's use the server's message + } + + public StreamAutoClaimResult StreamAutoClaim(RedisKey key, RedisValue consumerGroup, RedisValue claimingConsumer, long minIdleTimeInMs, RedisValue startAtId, int? count = null, CommandFlags flags = CommandFlags.None) + { + var msg = GetStreamAutoClaimMessage(key, consumerGroup, claimingConsumer, minIdleTimeInMs, startAtId, count, idsOnly: false, flags); + return ExecuteSync(msg, ResultProcessor.StreamAutoClaim, defaultValue: StreamAutoClaimResult.Null); + } + + public Task StreamAutoClaimAsync(RedisKey key, RedisValue consumerGroup, RedisValue claimingConsumer, long minIdleTimeInMs, RedisValue startAtId, int? count = null, CommandFlags flags = CommandFlags.None) + { + var msg = GetStreamAutoClaimMessage(key, consumerGroup, claimingConsumer, minIdleTimeInMs, startAtId, count, idsOnly: false, flags); + return ExecuteAsync(msg, ResultProcessor.StreamAutoClaim, defaultValue: StreamAutoClaimResult.Null); + } + + public StreamAutoClaimIdsOnlyResult StreamAutoClaimIdsOnly(RedisKey key, RedisValue consumerGroup, RedisValue claimingConsumer, long minIdleTimeInMs, RedisValue startAtId, int? count = null, CommandFlags flags = CommandFlags.None) + { + var msg = GetStreamAutoClaimMessage(key, consumerGroup, claimingConsumer, minIdleTimeInMs, startAtId, count, idsOnly: true, flags); + return ExecuteSync(msg, ResultProcessor.StreamAutoClaimIdsOnly, defaultValue: StreamAutoClaimIdsOnlyResult.Null); + } + + public Task StreamAutoClaimIdsOnlyAsync(RedisKey key, RedisValue consumerGroup, RedisValue claimingConsumer, long minIdleTimeInMs, RedisValue startAtId, int? count = null, CommandFlags flags = CommandFlags.None) + { + var msg = GetStreamAutoClaimMessage(key, consumerGroup, claimingConsumer, minIdleTimeInMs, startAtId, count, idsOnly: true, flags); + return ExecuteAsync(msg, ResultProcessor.StreamAutoClaimIdsOnly, defaultValue: StreamAutoClaimIdsOnlyResult.Null); + } + public StreamEntry[] StreamClaim(RedisKey key, RedisValue consumerGroup, RedisValue claimingConsumer, long minIdleTimeInMs, RedisValue[] messageIds, CommandFlags flags = CommandFlags.None) { - var msg = GetStreamClaimMessage(key, + var msg = GetStreamClaimMessage( + key, consumerGroup, claimingConsumer, minIdleTimeInMs, @@ -1853,12 +3005,13 @@ public StreamEntry[] StreamClaim(RedisKey key, RedisValue consumerGroup, RedisVa returnJustIds: false, flags: flags); - return ExecuteSync(msg, ResultProcessor.SingleStream); + return ExecuteSync(msg, ResultProcessor.SingleStream, defaultValue: Array.Empty()); } public Task StreamClaimAsync(RedisKey key, RedisValue consumerGroup, RedisValue claimingConsumer, long minIdleTimeInMs, RedisValue[] messageIds, CommandFlags flags = CommandFlags.None) { - var msg = GetStreamClaimMessage(key, + var msg = GetStreamClaimMessage( + key, consumerGroup, claimingConsumer, minIdleTimeInMs, @@ -1866,12 +3019,13 @@ public Task StreamClaimAsync(RedisKey key, RedisValue consumerGro returnJustIds: false, flags: flags); - return ExecuteAsync(msg, ResultProcessor.SingleStream); + return ExecuteAsync(msg, ResultProcessor.SingleStream, defaultValue: Array.Empty()); } public RedisValue[] StreamClaimIdsOnly(RedisKey key, RedisValue consumerGroup, RedisValue claimingConsumer, long minIdleTimeInMs, RedisValue[] messageIds, CommandFlags flags = CommandFlags.None) { - var msg = GetStreamClaimMessage(key, + var msg = GetStreamClaimMessage( + key, consumerGroup, claimingConsumer, minIdleTimeInMs, @@ -1879,12 +3033,13 @@ public RedisValue[] StreamClaimIdsOnly(RedisKey key, RedisValue consumerGroup, R returnJustIds: true, flags: flags); - return ExecuteSync(msg, ResultProcessor.RedisValueArray); + return ExecuteSync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); } public Task StreamClaimIdsOnlyAsync(RedisKey key, RedisValue consumerGroup, RedisValue claimingConsumer, long minIdleTimeInMs, RedisValue[] messageIds, CommandFlags flags = CommandFlags.None) { - var msg = GetStreamClaimMessage(key, + var msg = GetStreamClaimMessage( + key, consumerGroup, claimingConsumer, minIdleTimeInMs, @@ -1892,12 +3047,13 @@ public Task StreamClaimIdsOnlyAsync(RedisKey key, RedisValue consu returnJustIds: true, flags: flags); - return ExecuteAsync(msg, ResultProcessor.RedisValueArray); + return ExecuteAsync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); } public bool StreamConsumerGroupSetPosition(RedisKey key, RedisValue groupName, RedisValue position, CommandFlags flags = CommandFlags.None) { - var msg = Message.Create(Database, + var msg = Message.Create( + Database, flags, RedisCommand.XGROUP, new RedisValue[] @@ -1905,7 +3061,7 @@ public bool StreamConsumerGroupSetPosition(RedisKey key, RedisValue groupName, R StreamConstants.SetId, key.AsRedisValue(), groupName, - StreamPosition.Resolve(position, RedisCommand.XGROUP) + StreamPosition.Resolve(position, RedisCommand.XGROUP), }); return ExecuteSync(msg, ResultProcessor.Boolean); @@ -1913,7 +3069,8 @@ public bool StreamConsumerGroupSetPosition(RedisKey key, RedisValue groupName, R public Task StreamConsumerGroupSetPositionAsync(RedisKey key, RedisValue groupName, RedisValue position, CommandFlags flags = CommandFlags.None) { - var msg = Message.Create(Database, + var msg = Message.Create( + Database, flags, RedisCommand.XGROUP, new RedisValue[] @@ -1921,7 +3078,7 @@ public Task StreamConsumerGroupSetPositionAsync(RedisKey key, RedisValue g StreamConstants.SetId, key.AsRedisValue(), groupName, - StreamPosition.Resolve(position, RedisCommand.XGROUP) + StreamPosition.Resolve(position, RedisCommand.XGROUP), }); return ExecuteAsync(msg, ResultProcessor.Boolean); @@ -1933,7 +3090,7 @@ public bool StreamCreateConsumerGroup(RedisKey key, RedisValue groupName, RedisV key, groupName, position, - true, + true, flags); } @@ -1973,44 +3130,46 @@ public Task StreamCreateConsumerGroupAsync(RedisKey key, RedisValue groupN public StreamConsumerInfo[] StreamConsumerInfo(RedisKey key, RedisValue groupName, CommandFlags flags = CommandFlags.None) { - var msg = Message.Create(Database, + var msg = Message.Create( + Database, flags, RedisCommand.XINFO, new RedisValue[] { StreamConstants.Consumers, key.AsRedisValue(), - groupName + groupName, }); - return ExecuteSync(msg, ResultProcessor.StreamConsumerInfo); + return ExecuteSync(msg, ResultProcessor.StreamConsumerInfo, defaultValue: Array.Empty()); } public Task StreamConsumerInfoAsync(RedisKey key, RedisValue groupName, CommandFlags flags = CommandFlags.None) { - var msg = Message.Create(Database, + var msg = Message.Create( + Database, flags, RedisCommand.XINFO, new RedisValue[] { StreamConstants.Consumers, key.AsRedisValue(), - groupName + groupName, }); - return ExecuteAsync(msg, ResultProcessor.StreamConsumerInfo); + return ExecuteAsync(msg, ResultProcessor.StreamConsumerInfo, defaultValue: Array.Empty()); } public StreamGroupInfo[] StreamGroupInfo(RedisKey key, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, RedisCommand.XINFO, StreamConstants.Groups, key); - return ExecuteSync(msg, ResultProcessor.StreamGroupInfo); + return ExecuteSync(msg, ResultProcessor.StreamGroupInfo, defaultValue: Array.Empty()); } public Task StreamGroupInfoAsync(RedisKey key, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, RedisCommand.XINFO, StreamConstants.Groups, key); - return ExecuteAsync(msg, ResultProcessor.StreamGroupInfo); + return ExecuteAsync(msg, ResultProcessor.StreamGroupInfo, defaultValue: Array.Empty()); } public StreamInfo StreamInfo(RedisKey key, CommandFlags flags = CommandFlags.None) @@ -2039,29 +3198,54 @@ public Task StreamLengthAsync(RedisKey key, CommandFlags flags = CommandFl public long StreamDelete(RedisKey key, RedisValue[] messageIds, CommandFlags flags = CommandFlags.None) { - var msg = Message.Create(Database, - flags, - RedisCommand.XDEL, - key, - messageIds); - + var msg = Message.Create(Database, flags, RedisCommand.XDEL, key, messageIds); return ExecuteSync(msg, ResultProcessor.Int64); } - public Task StreamDeleteAsync(RedisKey key, RedisValue[] messageIds, CommandFlags flags = CommandFlags.None) + public StreamTrimResult[] StreamDelete(RedisKey key, RedisValue[] messageIds, StreamTrimMode mode, CommandFlags flags) { - var msg = Message.Create(Database, - flags, - RedisCommand.XDEL, - key, - messageIds); + var msg = GetStreamDeleteExMessage(key, messageIds, mode, flags); + return ExecuteSync(msg, ResultProcessor.StreamTrimResultArray)!; + } + + private Message GetStreamDeleteExMessage(RedisKey key, RedisValue[] messageIds, StreamTrimMode mode, CommandFlags flags) + { + if (messageIds == null) throw new ArgumentNullException(nameof(messageIds)); + if (messageIds.Length == 0) throw new ArgumentOutOfRangeException(nameof(messageIds), "messageIds must contain at least one item."); + + // avoid array for single message case + if (messageIds.Length == 1) + { + return Message.Create(Database, flags, RedisCommand.XDELEX, key, StreamConstants.GetMode(mode), StreamConstants.Ids, 1, messageIds[0]); + } + + var values = new RedisValue[messageIds.Length + 3]; + + var offset = 0; + values[offset++] = StreamConstants.GetMode(mode); + values[offset++] = StreamConstants.Ids; + values[offset++] = messageIds.Length; + messageIds.AsSpan().CopyTo(values.AsSpan(offset)); + Debug.Assert(offset + messageIds.Length == values.Length); + return Message.Create(Database, flags, RedisCommand.XDELEX, key, values); + } + public Task StreamDeleteAsync(RedisKey key, RedisValue[] messageIds, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.XDEL, key, messageIds); return ExecuteAsync(msg, ResultProcessor.Int64); } + public Task StreamDeleteAsync(RedisKey key, RedisValue[] messageIds, StreamTrimMode mode, CommandFlags flags = CommandFlags.None) + { + var msg = GetStreamDeleteExMessage(key, messageIds, mode, flags); + return ExecuteAsync(msg, ResultProcessor.StreamTrimResultArray)!; + } + public long StreamDeleteConsumer(RedisKey key, RedisValue groupName, RedisValue consumerName, CommandFlags flags = CommandFlags.None) { - var msg = Message.Create(Database, + var msg = Message.Create( + Database, flags, RedisCommand.XGROUP, new RedisValue[] @@ -2069,7 +3253,7 @@ public long StreamDeleteConsumer(RedisKey key, RedisValue groupName, RedisValue StreamConstants.DeleteConsumer, key.AsRedisValue(), groupName, - consumerName + consumerName, }); return ExecuteSync(msg, ResultProcessor.Int64); @@ -2077,7 +3261,8 @@ public long StreamDeleteConsumer(RedisKey key, RedisValue groupName, RedisValue public Task StreamDeleteConsumerAsync(RedisKey key, RedisValue groupName, RedisValue consumerName, CommandFlags flags = CommandFlags.None) { - var msg = Message.Create(Database, + var msg = Message.Create( + Database, flags, RedisCommand.XGROUP, new RedisValue[] @@ -2085,7 +3270,7 @@ public Task StreamDeleteConsumerAsync(RedisKey key, RedisValue groupName, StreamConstants.DeleteConsumer, key.AsRedisValue(), groupName, - consumerName + consumerName, }); return ExecuteAsync(msg, ResultProcessor.Int64); @@ -2093,14 +3278,15 @@ public Task StreamDeleteConsumerAsync(RedisKey key, RedisValue groupName, public bool StreamDeleteConsumerGroup(RedisKey key, RedisValue groupName, CommandFlags flags = CommandFlags.None) { - var msg = Message.Create(Database, + var msg = Message.Create( + Database, flags, RedisCommand.XGROUP, new RedisValue[] { StreamConstants.Destroy, key.AsRedisValue(), - groupName + groupName, }); return ExecuteSync(msg, ResultProcessor.Boolean); @@ -2108,14 +3294,15 @@ public bool StreamDeleteConsumerGroup(RedisKey key, RedisValue groupName, Comman public Task StreamDeleteConsumerGroupAsync(RedisKey key, RedisValue groupName, CommandFlags flags = CommandFlags.None) { - var msg = Message.Create(Database, + var msg = Message.Create( + Database, flags, RedisCommand.XGROUP, new RedisValue[] { StreamConstants.Destroy, key.AsRedisValue(), - groupName + groupName, }); return ExecuteAsync(msg, ResultProcessor.Boolean); @@ -2133,193 +3320,273 @@ public Task StreamPendingAsync(RedisKey key, RedisValue group return ExecuteAsync(msg, ResultProcessor.StreamPendingInfo); } - public StreamPendingMessageInfo[] StreamPendingMessages(RedisKey key, RedisValue groupName, int count, RedisValue consumerName, RedisValue? minId = null, RedisValue? maxId = null, CommandFlags flags = CommandFlags.None) + public StreamPendingMessageInfo[] StreamPendingMessages(RedisKey key, RedisValue groupName, int count, RedisValue consumerName, RedisValue? minId = null, RedisValue? maxId = null, CommandFlags flags = CommandFlags.None) => + StreamPendingMessages(key, groupName, count, consumerName, minId, maxId, null, flags); + + public StreamPendingMessageInfo[] StreamPendingMessages(RedisKey key, RedisValue groupName, int count, RedisValue consumerName, RedisValue? minId = null, RedisValue? maxId = null, long? minIdleTimeInMs = null, CommandFlags flags = CommandFlags.None) { - var msg = GetStreamPendingMessagesMessage(key, + var msg = GetStreamPendingMessagesMessage( + key, groupName, minId, maxId, count, consumerName, + minIdleTimeInMs, flags); - return ExecuteSync(msg, ResultProcessor.StreamPendingMessages); + return ExecuteSync(msg, ResultProcessor.StreamPendingMessages, defaultValue: Array.Empty()); } - public Task StreamPendingMessagesAsync(RedisKey key, RedisValue groupName, int count, RedisValue consumerName, RedisValue? minId = null, RedisValue? maxId = null, CommandFlags flags = CommandFlags.None) + public Task StreamPendingMessagesAsync(RedisKey key, RedisValue groupName, int count, RedisValue consumerName, RedisValue? minId = null, RedisValue? maxId = null, CommandFlags flags = CommandFlags.None) => + StreamPendingMessagesAsync(key, groupName, count, consumerName, minId, maxId, null, flags); + + public Task StreamPendingMessagesAsync(RedisKey key, RedisValue groupName, int count, RedisValue consumerName, RedisValue? minId = null, RedisValue? maxId = null, long? minIdleTimeInMs = null, CommandFlags flags = CommandFlags.None) { - var msg = GetStreamPendingMessagesMessage(key, + var msg = GetStreamPendingMessagesMessage( + key, groupName, minId, maxId, count, consumerName, + minIdleTimeInMs, flags); - return ExecuteAsync(msg, ResultProcessor.StreamPendingMessages); + return ExecuteAsync(msg, ResultProcessor.StreamPendingMessages, defaultValue: Array.Empty()); } public StreamEntry[] StreamRange(RedisKey key, RedisValue? minId = null, RedisValue? maxId = null, int? count = null, Order messageOrder = Order.Ascending, CommandFlags flags = CommandFlags.None) { - var msg = GetStreamRangeMessage(key, + var msg = GetStreamRangeMessage( + key, minId, maxId, count, messageOrder, flags); - return ExecuteSync(msg, ResultProcessor.SingleStream); + return ExecuteSync(msg, ResultProcessor.SingleStream, defaultValue: Array.Empty()); } public Task StreamRangeAsync(RedisKey key, RedisValue? minId = null, RedisValue? maxId = null, int? count = null, Order messageOrder = Order.Ascending, CommandFlags flags = CommandFlags.None) { - var msg = GetStreamRangeMessage(key, + var msg = GetStreamRangeMessage( + key, minId, maxId, count, messageOrder, flags); - return ExecuteAsync(msg, ResultProcessor.SingleStream); + return ExecuteAsync(msg, ResultProcessor.SingleStream, defaultValue: Array.Empty()); } public StreamEntry[] StreamRead(RedisKey key, RedisValue position, int? count = null, CommandFlags flags = CommandFlags.None) { - var msg = GetSingleStreamReadMessage(key, + var msg = GetSingleStreamReadMessage( + key, StreamPosition.Resolve(position, RedisCommand.XREAD), count, flags); - return ExecuteSync(msg, ResultProcessor.SingleStreamWithNameSkip); + return ExecuteSync(msg, ResultProcessor.SingleStreamWithNameSkip, defaultValue: Array.Empty()); } public Task StreamReadAsync(RedisKey key, RedisValue position, int? count = null, CommandFlags flags = CommandFlags.None) { - var msg = GetSingleStreamReadMessage(key, + var msg = GetSingleStreamReadMessage( + key, StreamPosition.Resolve(position, RedisCommand.XREAD), count, flags); - return ExecuteAsync(msg, ResultProcessor.SingleStreamWithNameSkip); + return ExecuteAsync(msg, ResultProcessor.SingleStreamWithNameSkip, defaultValue: Array.Empty()); } public RedisStream[] StreamRead(StreamPosition[] streamPositions, int? countPerStream = null, CommandFlags flags = CommandFlags.None) { var msg = GetMultiStreamReadMessage(streamPositions, countPerStream, flags); - return ExecuteSync(msg, ResultProcessor.MultiStream); + return ExecuteSync(msg, ResultProcessor.MultiStream, defaultValue: Array.Empty()); } public Task StreamReadAsync(StreamPosition[] streamPositions, int? countPerStream = null, CommandFlags flags = CommandFlags.None) { var msg = GetMultiStreamReadMessage(streamPositions, countPerStream, flags); - return ExecuteAsync(msg, ResultProcessor.MultiStream); + return ExecuteAsync(msg, ResultProcessor.MultiStream, defaultValue: Array.Empty()); } - public StreamEntry[] StreamReadGroup(RedisKey key, RedisValue groupName, RedisValue consumerName, RedisValue? position, int? count, CommandFlags flags) - { - return StreamReadGroup(key, + public StreamEntry[] StreamReadGroup(RedisKey key, RedisValue groupName, RedisValue consumerName, RedisValue? position, int? count, CommandFlags flags) => + StreamReadGroup( + key, groupName, consumerName, position, count, false, + null, flags); - } public StreamEntry[] StreamReadGroup(RedisKey key, RedisValue groupName, RedisValue consumerName, RedisValue? position = null, int? count = null, bool noAck = false, CommandFlags flags = CommandFlags.None) - { - var actualPosition = position ?? StreamPosition.NewMessages; + => StreamReadGroup( + key, + groupName, + consumerName, + position, + count, + noAck, + null, + flags); - var msg = GetStreamReadGroupMessage(key, + public StreamEntry[] StreamReadGroup(RedisKey key, RedisValue groupName, RedisValue consumerName, RedisValue? position = null, int? count = null, bool noAck = false, TimeSpan? claimMinIdleTime = null, CommandFlags flags = CommandFlags.None) + { + var msg = GetStreamReadGroupMessage( + key, groupName, consumerName, - StreamPosition.Resolve(actualPosition, RedisCommand.XREADGROUP), + StreamPosition.Resolve(position ?? StreamPosition.NewMessages, RedisCommand.XREADGROUP), count, noAck, + claimMinIdleTime, flags); - return ExecuteSync(msg, ResultProcessor.SingleStreamWithNameSkip); + return ExecuteSync(msg, ResultProcessor.SingleStreamWithNameSkip, defaultValue: Array.Empty()); } public Task StreamReadGroupAsync(RedisKey key, RedisValue groupName, RedisValue consumerName, RedisValue? position, int? count, CommandFlags flags) { - return StreamReadGroupAsync(key, + return StreamReadGroupAsync( + key, groupName, consumerName, position, count, false, + null, flags); } - + public Task StreamReadGroupAsync(RedisKey key, RedisValue groupName, RedisValue consumerName, RedisValue? position = null, int? count = null, bool noAck = false, CommandFlags flags = CommandFlags.None) - { - var actualPosition = position ?? StreamPosition.NewMessages; + => StreamReadGroupAsync( + key, + groupName, + consumerName, + position, + count, + noAck, + null, + flags); - var msg = GetStreamReadGroupMessage(key, + public Task StreamReadGroupAsync(RedisKey key, RedisValue groupName, RedisValue consumerName, RedisValue? position = null, int? count = null, bool noAck = false, TimeSpan? claimMinIdleTime = null, CommandFlags flags = CommandFlags.None) + { + var msg = GetStreamReadGroupMessage( + key, groupName, consumerName, - StreamPosition.Resolve(actualPosition, RedisCommand.XREADGROUP), + StreamPosition.Resolve(position ?? StreamPosition.NewMessages, RedisCommand.XREADGROUP), count, noAck, + claimMinIdleTime, flags); - return ExecuteAsync(msg, ResultProcessor.SingleStreamWithNameSkip); + return ExecuteAsync(msg, ResultProcessor.SingleStreamWithNameSkip, defaultValue: Array.Empty()); } public RedisStream[] StreamReadGroup(StreamPosition[] streamPositions, RedisValue groupName, RedisValue consumerName, int? countPerStream, CommandFlags flags) - { - return StreamReadGroup(streamPositions, + => StreamReadGroup( + streamPositions, groupName, consumerName, countPerStream, false, + null, + flags); + + public RedisStream[] StreamReadGroup(StreamPosition[] streamPositions, RedisValue groupName, RedisValue consumerName, int? countPerStream, bool noAck, CommandFlags flags) + => StreamReadGroup( + streamPositions, + groupName, + consumerName, + countPerStream, + noAck, + null, flags); - } - public RedisStream[] StreamReadGroup(StreamPosition[] streamPositions, RedisValue groupName, RedisValue consumerName, int? countPerStream = null, bool noAck = false, CommandFlags flags = CommandFlags.None) + public RedisStream[] StreamReadGroup(StreamPosition[] streamPositions, RedisValue groupName, RedisValue consumerName, int? countPerStream = null, bool noAck = false, TimeSpan? claimMinIdleTime = null, CommandFlags flags = CommandFlags.None) { - var msg = GetMultiStreamReadGroupMessage(streamPositions, + var msg = GetMultiStreamReadGroupMessage( + streamPositions, groupName, consumerName, countPerStream, noAck, + claimMinIdleTime, flags); - return ExecuteSync(msg, ResultProcessor.MultiStream); + return ExecuteSync(msg, ResultProcessor.MultiStream, defaultValue: Array.Empty()); } public Task StreamReadGroupAsync(StreamPosition[] streamPositions, RedisValue groupName, RedisValue consumerName, int? countPerStream, CommandFlags flags) - { - return StreamReadGroupAsync(streamPositions, + => StreamReadGroupAsync( + streamPositions, groupName, consumerName, countPerStream, false, + null, + flags); + + public Task StreamReadGroupAsync(StreamPosition[] streamPositions, RedisValue groupName, RedisValue consumerName, int? countPerStream, bool noAck, CommandFlags flags) + => StreamReadGroupAsync( + streamPositions, + groupName, + consumerName, + countPerStream, + noAck, + null, flags); - } - public Task StreamReadGroupAsync(StreamPosition[] streamPositions, RedisValue groupName, RedisValue consumerName, int? countPerStream = null, bool noAck = false, CommandFlags flags = CommandFlags.None) + public Task StreamReadGroupAsync(StreamPosition[] streamPositions, RedisValue groupName, RedisValue consumerName, int? countPerStream = null, bool noAck = false, TimeSpan? claimMinIdleTime = null, CommandFlags flags = CommandFlags.None) { - var msg = GetMultiStreamReadGroupMessage(streamPositions, + var msg = GetMultiStreamReadGroupMessage( + streamPositions, groupName, consumerName, countPerStream, noAck, + claimMinIdleTime, flags); - return ExecuteAsync(msg, ResultProcessor.MultiStream); + return ExecuteAsync(msg, ResultProcessor.MultiStream, defaultValue: Array.Empty()); + } + + public long StreamTrim(RedisKey key, int maxLength, bool useApproximateMaxLength, CommandFlags flags) + => StreamTrim(key, maxLength, useApproximateMaxLength, null, StreamTrimMode.KeepReferences, flags); + + public long StreamTrim(RedisKey key, long maxLength, bool useApproximateMaxLength = false, long? limit = null, StreamTrimMode mode = StreamTrimMode.KeepReferences, CommandFlags flags = CommandFlags.None) + { + var msg = GetStreamTrimMessage(true, key, maxLength, useApproximateMaxLength, limit, mode, flags); + return ExecuteSync(msg, ResultProcessor.Int64); + } + + public Task StreamTrimAsync(RedisKey key, int maxLength, bool useApproximateMaxLength, CommandFlags flags) + => StreamTrimAsync(key, maxLength, useApproximateMaxLength, null, StreamTrimMode.KeepReferences, flags); + + public Task StreamTrimAsync(RedisKey key, long maxLength, bool useApproximateMaxLength = false, long? limit = null, StreamTrimMode mode = StreamTrimMode.KeepReferences, CommandFlags flags = CommandFlags.None) + { + var msg = GetStreamTrimMessage(true, key, maxLength, useApproximateMaxLength, limit, mode, flags); + return ExecuteAsync(msg, ResultProcessor.Int64); } - public long StreamTrim(RedisKey key, int maxLength, bool useApproximateMaxLength = false, CommandFlags flags = CommandFlags.None) + public long StreamTrimByMinId(RedisKey key, RedisValue minId, bool useApproximateMaxLength = false, long? limit = null, StreamTrimMode mode = StreamTrimMode.KeepReferences, CommandFlags flags = CommandFlags.None) { - var msg = GetStreamTrimMessage(key, maxLength, useApproximateMaxLength, flags); + var msg = GetStreamTrimMessage(false, key, minId, useApproximateMaxLength, limit, mode, flags); return ExecuteSync(msg, ResultProcessor.Int64); } - public Task StreamTrimAsync(RedisKey key, int maxLength, bool useApproximateMaxLength = false, CommandFlags flags = CommandFlags.None) + public Task StreamTrimByMinIdAsync(RedisKey key, RedisValue minId, bool useApproximateMaxLength = false, long? limit = null, StreamTrimMode mode = StreamTrimMode.KeepReferences, CommandFlags flags = CommandFlags.None) { - var msg = GetStreamTrimMessage(key, maxLength, useApproximateMaxLength, flags); + var msg = GetStreamTrimMessage(false, key, minId, useApproximateMaxLength, limit, mode, flags); return ExecuteAsync(msg, ResultProcessor.Int64); } @@ -2335,15 +3602,29 @@ public Task StringAppendAsync(RedisKey key, RedisValue value, CommandFlags return ExecuteAsync(msg, ResultProcessor.Int64); } - public long StringBitCount(RedisKey key, long start = 0, long end = -1, CommandFlags flags = CommandFlags.None) + public long StringBitCount(RedisKey key, long start, long end, CommandFlags flags) => + StringBitCount(key, start, end, StringIndexType.Byte, flags); + + public long StringBitCount(RedisKey key, long start = 0, long end = -1, StringIndexType indexType = StringIndexType.Byte, CommandFlags flags = CommandFlags.None) { - var msg = Message.Create(Database, flags, RedisCommand.BITCOUNT, key, start, end); + var msg = indexType switch + { + StringIndexType.Byte => Message.Create(Database, flags, RedisCommand.BITCOUNT, key, start, end), + _ => Message.Create(Database, flags, RedisCommand.BITCOUNT, key, start, end, indexType.ToLiteral()), + }; return ExecuteSync(msg, ResultProcessor.Int64); } - public Task StringBitCountAsync(RedisKey key, long start = 0, long end = -1, CommandFlags flags = CommandFlags.None) + public Task StringBitCountAsync(RedisKey key, long start, long end, CommandFlags flags) => + StringBitCountAsync(key, start, end, StringIndexType.Byte, flags); + + public Task StringBitCountAsync(RedisKey key, long start = 0, long end = -1, StringIndexType indexType = StringIndexType.Byte, CommandFlags flags = CommandFlags.None) { - var msg = Message.Create(Database, flags, RedisCommand.BITCOUNT, key, start, end); + var msg = indexType switch + { + StringIndexType.Byte => Message.Create(Database, flags, RedisCommand.BITCOUNT, key, start, end), + _ => Message.Create(Database, flags, RedisCommand.BITCOUNT, key, start, end, indexType.ToLiteral()), + }; return ExecuteAsync(msg, ResultProcessor.Int64); } @@ -2371,15 +3652,29 @@ public Task StringBitOperationAsync(Bitwise operation, RedisKey destinatio return ExecuteAsync(msg, ResultProcessor.Int64); } - public long StringBitPosition(RedisKey key, bool bit, long start = 0, long end = -1, CommandFlags flags = CommandFlags.None) + public long StringBitPosition(RedisKey key, bool bit, long start, long end, CommandFlags flags) => + StringBitPosition(key, bit, start, end, StringIndexType.Byte, flags); + + public long StringBitPosition(RedisKey key, bool bit, long start = 0, long end = -1, StringIndexType indexType = StringIndexType.Byte, CommandFlags flags = CommandFlags.None) { - var msg = Message.Create(Database, flags, RedisCommand.BITPOS, key, bit, start, end); + var msg = indexType switch + { + StringIndexType.Byte => Message.Create(Database, flags, RedisCommand.BITPOS, key, bit, start, end), + _ => Message.Create(Database, flags, RedisCommand.BITPOS, key, bit, start, end, indexType.ToLiteral()), + }; return ExecuteSync(msg, ResultProcessor.Int64); } - public Task StringBitPositionAsync(RedisKey key, bool value, long start = 0, long end = -1, CommandFlags flags = CommandFlags.None) + public Task StringBitPositionAsync(RedisKey key, bool bit, long start, long end, CommandFlags flags) => + StringBitPositionAsync(key, bit, start, end, StringIndexType.Byte, flags); + + public Task StringBitPositionAsync(RedisKey key, bool bit, long start = 0, long end = -1, StringIndexType indexType = StringIndexType.Byte, CommandFlags flags = CommandFlags.None) { - var msg = Message.Create(Database, flags, RedisCommand.BITPOS, key, value, start, end); + var msg = indexType switch + { + StringIndexType.Byte => Message.Create(Database, flags, RedisCommand.BITPOS, key, bit, start, end), + _ => Message.Create(Database, flags, RedisCommand.BITPOS, key, bit, start, end, indexType.ToLiteral()), + }; return ExecuteAsync(msg, ResultProcessor.Int64); } @@ -2409,15 +3704,39 @@ public RedisValue StringGet(RedisKey key, CommandFlags flags = CommandFlags.None return ExecuteSync(msg, ResultProcessor.RedisValue); } + public RedisValue StringGetSetExpiry(RedisKey key, TimeSpan? expiry, CommandFlags flags = CommandFlags.None) + { + var msg = GetStringGetExMessage(key, Expiration.CreateOrPersist(expiry, !expiry.HasValue), flags); + return ExecuteSync(msg, ResultProcessor.RedisValue); + } + + public RedisValue StringGetSetExpiry(RedisKey key, DateTime expiry, CommandFlags flags = CommandFlags.None) + { + var msg = GetStringGetExMessage(key, new(expiry), flags); + return ExecuteSync(msg, ResultProcessor.RedisValue); + } + + public Task StringGetSetExpiryAsync(RedisKey key, TimeSpan? expiry, CommandFlags flags = CommandFlags.None) + { + var msg = GetStringGetExMessage(key, Expiration.CreateOrPersist(expiry, !expiry.HasValue), flags); + return ExecuteAsync(msg, ResultProcessor.RedisValue); + } + + public Task StringGetSetExpiryAsync(RedisKey key, DateTime expiry, CommandFlags flags = CommandFlags.None) + { + var msg = GetStringGetExMessage(key, new(expiry), flags); + return ExecuteAsync(msg, ResultProcessor.RedisValue); + } + public RedisValue[] StringGet(RedisKey[] keys, CommandFlags flags = CommandFlags.None) { if (keys == null) throw new ArgumentNullException(nameof(keys)); if (keys.Length == 0) return Array.Empty(); var msg = Message.Create(Database, flags, RedisCommand.MGET, keys); - return ExecuteSync(msg, ResultProcessor.RedisValueArray); + return ExecuteSync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); } - public Lease StringGetLease(RedisKey key, CommandFlags flags = CommandFlags.None) + public Lease? StringGetLease(RedisKey key, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, RedisCommand.GET, key); return ExecuteSync(msg, ResultProcessor.Lease); @@ -2429,7 +3748,7 @@ public Task StringGetAsync(RedisKey key, CommandFlags flags = Comman return ExecuteAsync(msg, ResultProcessor.RedisValue); } - public Task> StringGetLeaseAsync(RedisKey key, CommandFlags flags = CommandFlags.None) + public Task?> StringGetLeaseAsync(RedisKey key, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, RedisCommand.GET, key); return ExecuteAsync(msg, ResultProcessor.Lease); @@ -2438,9 +3757,9 @@ public Task> StringGetLeaseAsync(RedisKey key, CommandFlags flags = public Task StringGetAsync(RedisKey[] keys, CommandFlags flags = CommandFlags.None) { if (keys == null) throw new ArgumentNullException(nameof(keys)); - if (keys.Length == 0) return CompletedTask.FromResult(Array.Empty(), asyncState); + if (keys.Length == 0) return CompletedTask.FromDefault(Array.Empty(), asyncState); var msg = Message.Create(Database, flags, RedisCommand.MGET, keys); - return ExecuteAsync(msg, ResultProcessor.RedisValueArray); + return ExecuteAsync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); } public bool StringGetBit(RedisKey key, long offset, CommandFlags flags = CommandFlags.None) @@ -2479,15 +3798,27 @@ public Task StringGetSetAsync(RedisKey key, RedisValue value, Comman return ExecuteAsync(msg, ResultProcessor.RedisValue); } + public RedisValue StringGetDelete(RedisKey key, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.GETDEL, key); + return ExecuteSync(msg, ResultProcessor.RedisValue); + } + + public Task StringGetDeleteAsync(RedisKey key, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(Database, flags, RedisCommand.GETDEL, key); + return ExecuteAsync(msg, ResultProcessor.RedisValue); + } + public RedisValueWithExpiry StringGetWithExpiry(RedisKey key, CommandFlags flags = CommandFlags.None) { - var msg = GetStringGetWithExpiryMessage(key, flags, out ResultProcessor processor, out ServerEndPoint server); + var msg = GetStringGetWithExpiryMessage(key, flags, out ResultProcessor processor, out ServerEndPoint? server); return ExecuteSync(msg, processor, server); } public Task StringGetWithExpiryAsync(RedisKey key, CommandFlags flags = CommandFlags.None) { - var msg = GetStringGetWithExpiryMessage(key, flags, out ResultProcessor processor, out ServerEndPoint server); + var msg = GetStringGetWithExpiryMessage(key, flags, out ResultProcessor processor, out ServerEndPoint? server); return ExecuteAsync(msg, processor, server); } @@ -2529,30 +3860,72 @@ public Task StringLengthAsync(RedisKey key, CommandFlags flags = CommandFl return ExecuteAsync(msg, ResultProcessor.Int64); } - public bool StringSet(RedisKey key, RedisValue value, TimeSpan? expiry = null, When when = When.Always, CommandFlags flags = CommandFlags.None) + // Backwards compatibility overloads: + public bool StringSet(RedisKey key, RedisValue value, TimeSpan? expiry, When when) => + StringSet(key, value, expiry, false, when, CommandFlags.None); + public bool StringSet(RedisKey key, RedisValue value, TimeSpan? expiry, When when, CommandFlags flags) => + StringSet(key, value, expiry, false, when, flags); + + public bool StringSet(RedisKey key, RedisValue value, TimeSpan? expiry = null, bool keepTtl = false, When when = When.Always, CommandFlags flags = CommandFlags.None) { - var msg = GetStringSetMessage(key, value, expiry, when, flags); + var msg = GetStringSetMessage(key, value, Expiration.CreateOrKeepTtl(expiry, keepTtl), when, flags); return ExecuteSync(msg, ResultProcessor.Boolean); } public bool StringSet(KeyValuePair[] values, When when = When.Always, CommandFlags flags = CommandFlags.None) { - var msg = GetStringSetMessage(values, when, flags); + var msg = GetStringSetMessage(values, when, Expiration.Default, flags); + return ExecuteSync(msg, ResultProcessor.Boolean); + } + + public bool StringSet(KeyValuePair[] values, When when, Expiration expiry, CommandFlags flags) + { + var msg = GetStringSetMessage(values, when, expiry, flags); return ExecuteSync(msg, ResultProcessor.Boolean); } - public Task StringSetAsync(RedisKey key, RedisValue value, TimeSpan? expiry = null, When when = When.Always, CommandFlags flags = CommandFlags.None) + // Backwards compatibility overloads: + public Task StringSetAsync(RedisKey key, RedisValue value, TimeSpan? expiry, When when) => + StringSetAsync(key, value, expiry, false, when); + public Task StringSetAsync(RedisKey key, RedisValue value, TimeSpan? expiry, When when, CommandFlags flags) => + StringSetAsync(key, value, expiry, false, when, flags); + + public Task StringSetAsync(RedisKey key, RedisValue value, TimeSpan? expiry = null, bool keepTtl = false, When when = When.Always, CommandFlags flags = CommandFlags.None) { - var msg = GetStringSetMessage(key, value, expiry, when, flags); + var msg = GetStringSetMessage(key, value, Expiration.CreateOrKeepTtl(expiry, keepTtl), when, flags); return ExecuteAsync(msg, ResultProcessor.Boolean); } public Task StringSetAsync(KeyValuePair[] values, When when = When.Always, CommandFlags flags = CommandFlags.None) { - var msg = GetStringSetMessage(values, when, flags); + var msg = GetStringSetMessage(values, when, Expiration.Default, flags); + return ExecuteAsync(msg, ResultProcessor.Boolean); + } + + public Task StringSetAsync(KeyValuePair[] values, When when, Expiration expiry, CommandFlags flags) + { + var msg = GetStringSetMessage(values, when, expiry, flags); return ExecuteAsync(msg, ResultProcessor.Boolean); } + public RedisValue StringSetAndGet(RedisKey key, RedisValue value, TimeSpan? expiry, When when, CommandFlags flags) => + StringSetAndGet(key, value, expiry, false, when, flags); + + public RedisValue StringSetAndGet(RedisKey key, RedisValue value, TimeSpan? expiry = null, bool keepTtl = false, When when = When.Always, CommandFlags flags = CommandFlags.None) + { + var msg = GetStringSetAndGetMessage(key, value, expiry, keepTtl, when, flags); + return ExecuteSync(msg, ResultProcessor.RedisValue); + } + + public Task StringSetAndGetAsync(RedisKey key, RedisValue value, TimeSpan? expiry, When when, CommandFlags flags) => + StringSetAndGetAsync(key, value, expiry, false, when, flags); + + public Task StringSetAndGetAsync(RedisKey key, RedisValue value, TimeSpan? expiry = null, bool keepTtl = false, When when = When.Always, CommandFlags flags = CommandFlags.None) + { + var msg = GetStringSetAndGetMessage(key, value, expiry, keepTtl, when, flags); + return ExecuteAsync(msg, ResultProcessor.RedisValue); + } + public bool StringSetBit(RedisKey key, long offset, bool bit, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(Database, flags, RedisCommand.SETBIT, key, offset, bit); @@ -2611,72 +3984,160 @@ public Task StringSetRangeAsync(RedisKey key, long offset, RedisValu return ExecuteAsync(msg, ResultProcessor.RedisValue); } - private Message GetExpiryMessage(in RedisKey key, CommandFlags flags, TimeSpan? expiry, out ServerEndPoint server) + private Message GetCopyMessage(in RedisKey sourceKey, RedisKey destinationKey, int destinationDatabase, bool replace, CommandFlags flags) => + destinationDatabase switch + { + < -1 => throw new ArgumentOutOfRangeException(nameof(destinationDatabase)), + -1 when replace => Message.Create(Database, flags, RedisCommand.COPY, sourceKey, destinationKey, RedisLiterals.REPLACE), + -1 => Message.Create(Database, flags, RedisCommand.COPY, sourceKey, destinationKey), + _ when replace => Message.Create(Database, flags, RedisCommand.COPY, sourceKey, destinationKey, RedisLiterals.DB, destinationDatabase, RedisLiterals.REPLACE), + _ => Message.Create(Database, flags, RedisCommand.COPY, sourceKey, destinationKey, RedisLiterals.DB, destinationDatabase), + }; + + private Message GetExpiryMessage(in RedisKey key, CommandFlags flags, TimeSpan? expiry, ExpireWhen when, out ServerEndPoint? server) + { + if (expiry is null || expiry.Value == TimeSpan.MaxValue) + { + server = null; + return when switch + { + ExpireWhen.Always => Message.Create(Database, flags, RedisCommand.PERSIST, key), + _ => throw new ArgumentException("PERSIST cannot be used with when."), + }; + } + + long milliseconds = expiry.Value.Ticks / TimeSpan.TicksPerMillisecond; + return GetExpiryMessage(key, RedisCommand.PEXPIRE, RedisCommand.EXPIRE, milliseconds, when, flags, out server); + } + + private Message GetExpiryMessage(in RedisKey key, CommandFlags flags, DateTime? expiry, ExpireWhen when, out ServerEndPoint? server) { - TimeSpan duration; - if (expiry == null || (duration = expiry.Value) == TimeSpan.MaxValue) + if (expiry is null || expiry == DateTime.MaxValue) { server = null; - return Message.Create(Database, flags, RedisCommand.PERSIST, key); + return when switch + { + ExpireWhen.Always => Message.Create(Database, flags, RedisCommand.PERSIST, key), + _ => throw new ArgumentException("PERSIST cannot be used with when."), + }; } - long milliseconds = duration.Ticks / TimeSpan.TicksPerMillisecond; + + long milliseconds = Expiration.GetUnixTimeMilliseconds(expiry.Value); + return GetExpiryMessage(key, RedisCommand.PEXPIREAT, RedisCommand.EXPIREAT, milliseconds, when, flags, out server); + } + + private Message GetExpiryMessage( + in RedisKey key, + RedisCommand millisecondsCommand, + RedisCommand secondsCommand, + long milliseconds, + ExpireWhen when, + CommandFlags flags, + out ServerEndPoint? server) + { + server = null; if ((milliseconds % 1000) != 0) { - var features = GetFeatures(key, flags, out server); - if (server != null && features.MillisecondExpiry && multiplexer.CommandMap.IsAvailable(RedisCommand.PEXPIRE)) + var features = GetFeatures(key, flags, RedisCommand.PEXPIRE, out server); + if (server is not null && features.MillisecondExpiry && multiplexer.CommandMap.IsAvailable(millisecondsCommand)) { - return Message.Create(Database, flags, RedisCommand.PEXPIRE, key, milliseconds); + return when switch + { + ExpireWhen.Always => Message.Create(Database, flags, millisecondsCommand, key, milliseconds), + _ => Message.Create(Database, flags, millisecondsCommand, key, milliseconds, when.ToLiteral()), + }; } + server = null; } - server = null; + long seconds = milliseconds / 1000; - return Message.Create(Database, flags, RedisCommand.EXPIRE, key, seconds); + return when switch + { + ExpireWhen.Always => Message.Create(Database, flags, secondsCommand, key, seconds), + _ => Message.Create(Database, flags, secondsCommand, key, seconds, when.ToLiteral()), + }; } - private Message GetExpiryMessage(in RedisKey key, CommandFlags flags, DateTime? expiry, out ServerEndPoint server) + private Message GetListMultiPopMessage(RedisKey[] keys, RedisValue side, long count, CommandFlags flags) { - DateTime when; - if (expiry == null || (when = expiry.Value) == DateTime.MaxValue) + if (keys is null || keys.Length == 0) { - server = null; - return Message.Create(Database, flags, RedisCommand.PERSIST, key); + throw new ArgumentOutOfRangeException(nameof(keys), "keys must have a size of at least 1"); } - switch (when.Kind) + + var slot = multiplexer.ServerSelectionStrategy.HashSlot(keys[0]); + + var args = new RedisValue[2 + keys.Length + (count == 1 ? 0 : 2)]; + var i = 0; + args[i++] = keys.Length; + foreach (var key in keys) { - case DateTimeKind.Local: - case DateTimeKind.Utc: - break; // fine, we can work with that - default: - throw new ArgumentException("Expiry time must be either Utc or Local", nameof(expiry)); + args[i++] = key.AsRedisValue(); } - long milliseconds = (when.ToUniversalTime() - RedisBase.UnixEpoch).Ticks / TimeSpan.TicksPerMillisecond; - if ((milliseconds % 1000) != 0) + args[i++] = side; + + if (count != 1) { - var features = GetFeatures(key, flags, out server); - if (server != null && features.MillisecondExpiry && multiplexer.CommandMap.IsAvailable(RedisCommand.PEXPIREAT)) - { - return Message.Create(Database, flags, RedisCommand.PEXPIREAT, key, milliseconds); - } + args[i++] = RedisLiterals.COUNT; + args[i++] = count; } - server = null; - long seconds = milliseconds / 1000; - return Message.Create(Database, flags, RedisCommand.EXPIREAT, key, seconds); + + return Message.CreateInSlot(Database, slot, flags, RedisCommand.LMPOP, args); + } + + private Message GetSortedSetMultiPopMessage(RedisKey[] keys, Order order, long count, CommandFlags flags) + { + if (keys is null || keys.Length == 0) + { + throw new ArgumentOutOfRangeException(nameof(keys), "keys must have a size of at least 1"); + } + + var slot = multiplexer.ServerSelectionStrategy.HashSlot(keys[0]); + + var args = new RedisValue[2 + keys.Length + (count == 1 ? 0 : 2)]; + var i = 0; + args[i++] = keys.Length; + foreach (var key in keys) + { + args[i++] = key.AsRedisValue(); + } + + args[i++] = order == Order.Ascending ? RedisLiterals.MIN : RedisLiterals.MAX; + + if (count != 1) + { + args[i++] = RedisLiterals.COUNT; + args[i++] = count; + } + + return Message.CreateInSlot(Database, slot, flags, RedisCommand.ZMPOP, args); } - private Message GetHashSetMessage(RedisKey key, HashEntry[] hashFields, CommandFlags flags) + private Message? GetHashSetMessage(RedisKey key, HashEntry[] hashFields, CommandFlags flags) { if (hashFields == null) throw new ArgumentNullException(nameof(hashFields)); switch (hashFields.Length) { case 0: return null; case 1: - return Message.Create(Database, flags, RedisCommand.HMSET, key, - hashFields[0].name, hashFields[0].value); + return Message.Create( + Database, + flags, + RedisCommand.HMSET, + key, + hashFields[0].name, + hashFields[0].value); case 2: - return Message.Create(Database, flags, RedisCommand.HMSET, key, - hashFields[0].name, hashFields[0].value, - hashFields[1].name, hashFields[1].value); + return Message.Create( + Database, + flags, + RedisCommand.HMSET, + key, + hashFields[0].name, + hashFields[0].value, + hashFields[1].name, + hashFields[1].value); default: var arr = new RedisValue[hashFields.Length * 2]; int offset = 0; @@ -2689,10 +4150,10 @@ private Message GetHashSetMessage(RedisKey key, HashEntry[] hashFields, CommandF } } - private ITransaction GetLockExtendTransaction(RedisKey key, RedisValue value, TimeSpan expiry) + private ITransaction? GetLockExtendTransaction(RedisKey key, RedisValue value, TimeSpan expiry) { var tran = CreateTransactionIfAvailable(asyncState); - if (tran != null) + if (tran is not null) { tran.AddCondition(Condition.StringEqual(key, value)); tran.KeyExpireAsync(key, expiry, CommandFlags.FireAndForget); @@ -2700,10 +4161,10 @@ private ITransaction GetLockExtendTransaction(RedisKey key, RedisValue value, Ti return tran; } - private ITransaction GetLockReleaseTransaction(RedisKey key, RedisValue value) + private ITransaction? GetLockReleaseTransaction(RedisKey key, RedisValue value) { var tran = CreateTransactionIfAvailable(asyncState); - if (tran != null) + if (tran is not null) { tran.AddCondition(Condition.StringEqual(key, value)); tran.KeyDeleteAsync(key, CommandFlags.FireAndForget); @@ -2711,127 +4172,194 @@ private ITransaction GetLockReleaseTransaction(RedisKey key, RedisValue value) return tran; } - private RedisValue GetLexRange(RedisValue value, Exclude exclude, bool isStart) + internal static RedisValue GetLexRange(RedisValue value, Exclude exclude, bool isStart, Order order) { - if (value.IsNull) + if (value.IsNull) // open search { - return isStart ? RedisLiterals.MinusSymbol : RedisLiterals.PlusSumbol; + if (order == Order.Ascending) return isStart ? RedisLiterals.MinusSymbol : RedisLiterals.PlusSymbol; + + return isStart ? RedisLiterals.PlusSymbol : RedisLiterals.MinusSymbol; // when descending order: Plus and Minus have to be reversed } - byte[] orig = value; - byte[] result = new byte[orig.Length + 1]; + var srcLength = value.GetByteCount(); + Debug.Assert(srcLength >= 0); + + byte[] result = new byte[srcLength + 1]; // no defaults here; must always explicitly specify [ / ( result[0] = (exclude & (isStart ? Exclude.Start : Exclude.Stop)) == 0 ? (byte)'[' : (byte)'('; - Buffer.BlockCopy(orig, 0, result, 1, orig.Length); + int written = value.CopyTo(result.AsSpan(1)); + Debug.Assert(written == srcLength, "predicted/actual length mismatch"); return result; } - private Message GetMultiStreamReadGroupMessage(StreamPosition[] streamPositions, RedisValue groupName, RedisValue consumerName, int? countPerStream, bool noAck, CommandFlags flags) + private Message GetMultiStreamReadGroupMessage(StreamPosition[] streamPositions, RedisValue groupName, RedisValue consumerName, int? countPerStream, bool noAck, TimeSpan? claimMinIdleTime, CommandFlags flags) => + new MultiStreamReadGroupCommandMessage( + Database, + flags, + streamPositions, + groupName, + consumerName, + countPerStream, + noAck, + claimMinIdleTime); + + private sealed class MultiStreamReadGroupCommandMessage : Message // XREADGROUP with multiple stream. Example: XREADGROUP GROUP groupName consumerName COUNT countPerStream STREAMS stream1 stream2 id1 id2 { - // Example: XREADGROUP GROUP groupName consumerName COUNT countPerStream STREAMS stream1 stream2 id1 id2 - if (streamPositions == null) throw new ArgumentNullException(nameof(streamPositions)); - if (streamPositions.Length == 0) throw new ArgumentOutOfRangeException(nameof(streamPositions), "streamOffsetPairs must contain at least one item."); + private readonly StreamPosition[] streamPositions; + private readonly RedisValue groupName; + private readonly RedisValue consumerName; + private readonly int? countPerStream; + private readonly bool noAck; + private readonly int argCount; + private readonly TimeSpan? claimMinIdleTime; - if (countPerStream.HasValue && countPerStream <= 0) + public MultiStreamReadGroupCommandMessage(int db, CommandFlags flags, StreamPosition[] streamPositions, RedisValue groupName, RedisValue consumerName, int? countPerStream, bool noAck, TimeSpan? claimMinIdleTime) + : base(db, flags, RedisCommand.XREADGROUP) { - throw new ArgumentOutOfRangeException(nameof(countPerStream), "countPerStream must be greater than 0."); - } - - var values = new RedisValue[ - 4 // Room for GROUP groupName consumerName & STREAMS - + (streamPositions.Length * 2) // Enough room for the stream keys and associated IDs. - + (countPerStream.HasValue ? 2 : 0) // Room for "COUNT num" or 0 if countPerStream is null. - + (noAck ? 1 : 0)]; // Allow for the NOACK subcommand. + if (streamPositions == null) throw new ArgumentNullException(nameof(streamPositions)); + if (streamPositions.Length == 0) throw new ArgumentOutOfRangeException(nameof(streamPositions), "streamOffsetPairs must contain at least one item."); + for (int i = 0; i < streamPositions.Length; i++) + { + streamPositions[i].Key.AssertNotNull(); + } - var offset = 0; + if (countPerStream.HasValue && countPerStream <= 0) + { + throw new ArgumentOutOfRangeException(nameof(countPerStream), "countPerStream must be greater than 0."); + } - values[offset++] = StreamConstants.Group; - values[offset++] = groupName; - values[offset++] = consumerName; + groupName.AssertNotNull(); + consumerName.AssertNotNull(); + + this.streamPositions = streamPositions; + this.groupName = groupName; + this.consumerName = consumerName; + this.countPerStream = countPerStream; + this.noAck = noAck; + this.claimMinIdleTime = claimMinIdleTime; + + argCount = 4 // Room for GROUP groupName consumerName & STREAMS + + (streamPositions.Length * 2) // Enough room for the stream keys and associated IDs. + + (countPerStream.HasValue ? 2 : 0) // Room for "COUNT num" or 0 if countPerStream is null. + + (noAck ? 1 : 0) // Allow for the NOACK subcommand. + + (claimMinIdleTime.HasValue ? 2 : 0); // Allow for the CLAIM {minIdleTime} subcommand. + } - if (countPerStream.HasValue) + public override int GetHashSlot(ServerSelectionStrategy serverSelectionStrategy) { - values[offset++] = StreamConstants.Count; - values[offset++] = countPerStream; + int slot = ServerSelectionStrategy.NoSlot; + for (int i = 0; i < streamPositions.Length; i++) + { + slot = serverSelectionStrategy.CombineSlot(slot, streamPositions[i].Key); + } + return slot; } - if (noAck) + protected override void WriteImpl(PhysicalConnection physical) { - values[offset++] = StreamConstants.NoAck; - } + physical.WriteHeader(Command, argCount); + physical.WriteBulkString("GROUP"u8); + physical.WriteBulkString(groupName); + physical.WriteBulkString(consumerName); - values[offset++] = StreamConstants.Streams; + if (countPerStream.HasValue) + { + physical.WriteBulkString("COUNT"u8); + physical.WriteBulkString(countPerStream.Value); + } - var pairCount = streamPositions.Length; + if (noAck) + { + physical.WriteBulkString("NOACK"u8); + } - for (var i = 0; i < pairCount; i++) - { - values[offset] = streamPositions[i].Key.AsRedisValue(); - values[offset + pairCount] = StreamPosition.Resolve(streamPositions[i].Position, RedisCommand.XREADGROUP); + if (claimMinIdleTime.HasValue) + { + physical.WriteBulkString("CLAIM"u8); + physical.WriteBulkString(claimMinIdleTime.Value.TotalMilliseconds); + } - offset++; + physical.WriteBulkString("STREAMS"u8); + for (int i = 0; i < streamPositions.Length; i++) + { + physical.Write(streamPositions[i].Key); + } + for (int i = 0; i < streamPositions.Length; i++) + { + physical.WriteBulkString(StreamPosition.Resolve(streamPositions[i].Position, RedisCommand.XREADGROUP)); + } } - return Message.Create(Database, flags, RedisCommand.XREADGROUP, values); + public override int ArgCount => argCount; } - private Message GetMultiStreamReadMessage(StreamPosition[] streamPositions, int? countPerStream, CommandFlags flags) - { - // Example: XREAD COUNT 2 STREAMS mystream writers 0-0 0-0 + private Message GetMultiStreamReadMessage(StreamPosition[] streamPositions, int? countPerStream, CommandFlags flags) => + new MultiStreamReadCommandMessage(Database, flags, streamPositions, countPerStream); - if (streamPositions == null) throw new ArgumentNullException(nameof(streamPositions)); - if (streamPositions.Length == 0) throw new ArgumentOutOfRangeException(nameof(streamPositions), "streamOffsetPairs must contain at least one item."); + private sealed class MultiStreamReadCommandMessage : Message // XREAD with multiple stream. Example: XREAD COUNT 2 STREAMS mystream writers 0-0 0-0 + { + private readonly StreamPosition[] streamPositions; + private readonly int? countPerStream; + private readonly int argCount; - if (countPerStream.HasValue && countPerStream <= 0) + public MultiStreamReadCommandMessage(int db, CommandFlags flags, StreamPosition[] streamPositions, int? countPerStream) + : base(db, flags, RedisCommand.XREAD) { - throw new ArgumentOutOfRangeException(nameof(countPerStream), "countPerStream must be greater than 0."); - } + if (streamPositions == null) throw new ArgumentNullException(nameof(streamPositions)); + if (streamPositions.Length == 0) throw new ArgumentOutOfRangeException(nameof(streamPositions), "streamOffsetPairs must contain at least one item."); + for (int i = 0; i < streamPositions.Length; i++) + { + streamPositions[i].Key.AssertNotNull(); + } + + if (countPerStream.HasValue && countPerStream <= 0) + { + throw new ArgumentOutOfRangeException(nameof(countPerStream), "countPerStream must be greater than 0."); + } - var values = new RedisValue[ - 1 // Streams keyword. - + (streamPositions.Length * 2) // Room for the stream names and the ID after which to begin reading. - + (countPerStream.HasValue ? 2 : 0)]; // Room for "COUNT num" or 0 if countPerStream is null. + this.streamPositions = streamPositions; + this.countPerStream = countPerStream; - var offset = 0; + argCount = 1 // Streams keyword. + + (countPerStream.HasValue ? 2 : 0) // Room for "COUNT num" or 0 if countPerStream is null. + + (streamPositions.Length * 2); // Room for the stream names and the ID after which to begin reading. + } - if (countPerStream.HasValue) + public override int GetHashSlot(ServerSelectionStrategy serverSelectionStrategy) { - values[offset++] = StreamConstants.Count; - values[offset++] = countPerStream; + int slot = ServerSelectionStrategy.NoSlot; + for (int i = 0; i < streamPositions.Length; i++) + { + slot = serverSelectionStrategy.CombineSlot(slot, streamPositions[i].Key); + } + return slot; } - values[offset++] = StreamConstants.Streams; - - // Write the stream names and the message IDs from which to read for the associated stream. Each pair - // will be separated by an offset of the index of the stream name plus the pair count. - - /* - * [0] = COUNT - * [1] = 2 - * [3] = STREAMS - * [4] = stream1 - * [5] = stream2 - * [6] = stream3 - * [7] = id1 - * [8] = id2 - * [9] = id3 - * - * */ - - var pairCount = streamPositions.Length; - - for (var i = 0; i < pairCount; i++) + protected override void WriteImpl(PhysicalConnection physical) { - values[offset] = streamPositions[i].Key.AsRedisValue(); - values[offset + pairCount] = StreamPosition.Resolve(streamPositions[i].Position, RedisCommand.XREAD); + physical.WriteHeader(Command, argCount); + + if (countPerStream.HasValue) + { + physical.WriteBulkString("COUNT"u8); + physical.WriteBulkString(countPerStream.Value); + } - offset++; + physical.WriteBulkString("STREAMS"u8); + for (int i = 0; i < streamPositions.Length; i++) + { + physical.Write(streamPositions[i].Key); + } + for (int i = 0; i < streamPositions.Length; i++) + { + physical.WriteBulkString(StreamPosition.Resolve(streamPositions[i].Position, RedisCommand.XREADGROUP)); + } } - return Message.Create(Database, flags, RedisCommand.XREAD, values); + public override int ArgCount => argCount; } - private RedisValue GetRange(double value, Exclude exclude, bool isStart) + private static RedisValue GetRange(double value, Exclude exclude, bool isStart) { if (isStart) { @@ -2850,45 +4378,87 @@ private Message GetRestoreMessage(RedisKey key, byte[] value, TimeSpan? expiry, return Message.Create(Database, flags, RedisCommand.RESTORE, key, pttl, value); } - private Message GetSortedSetAddMessage(RedisKey key, RedisValue member, double score, When when, CommandFlags flags) + private Message GetSetIntersectionLengthMessage(RedisKey[] keys, long limit = 0, CommandFlags flags = CommandFlags.None) { - WhenAlwaysOrExistsOrNotExists(when); - switch (when) + if (keys == null) throw new ArgumentNullException(nameof(keys)); + + var values = new RedisValue[1 + keys.Length + (limit > 0 ? 2 : 0)]; + int i = 0; + values[i++] = keys.Length; + for (var j = 0; j < keys.Length; j++) + { + values[i++] = keys[j].AsRedisValue(); + } + if (limit > 0) { - case When.Always: return Message.Create(Database, flags, RedisCommand.ZADD, key, score, member); - case When.NotExists: return Message.Create(Database, flags, RedisCommand.ZADD, key, RedisLiterals.NX, score, member); - case When.Exists: return Message.Create(Database, flags, RedisCommand.ZADD, key, RedisLiterals.XX, score, member); - default: throw new ArgumentOutOfRangeException(nameof(when)); + values[i++] = RedisLiterals.LIMIT; + values[i] = limit; } + + return Message.Create(Database, flags, RedisCommand.SINTERCARD, values); } - private Message GetSortedSetAddMessage(RedisKey key, SortedSetEntry[] values, When when, CommandFlags flags) + private Message GetSortedSetAddMessage(RedisKey key, RedisValue member, double score, SortedSetWhen when, bool change, CommandFlags flags) + { + RedisValue[] arr = new RedisValue[2 + when.CountBits() + (change ? 1 : 0)]; + int index = 0; + if ((when & SortedSetWhen.NotExists) != 0) + { + arr[index++] = RedisLiterals.NX; + } + if ((when & SortedSetWhen.Exists) != 0) + { + arr[index++] = RedisLiterals.XX; + } + if ((when & SortedSetWhen.GreaterThan) != 0) + { + arr[index++] = RedisLiterals.GT; + } + if ((when & SortedSetWhen.LessThan) != 0) + { + arr[index++] = RedisLiterals.LT; + } + if (change) + { + arr[index++] = RedisLiterals.CH; + } + arr[index++] = score; + arr[index++] = member; + return Message.Create(Database, flags, RedisCommand.ZADD, key, arr); + } + + private Message? GetSortedSetAddMessage(RedisKey key, SortedSetEntry[] values, SortedSetWhen when, bool change, CommandFlags flags) { - WhenAlwaysOrExistsOrNotExists(when); if (values == null) throw new ArgumentNullException(nameof(values)); switch (values.Length) { case 0: return null; case 1: - return GetSortedSetAddMessage(key, values[0].element, values[0].score, when, flags); + return GetSortedSetAddMessage(key, values[0].element, values[0].score, when, change, flags); default: - RedisValue[] arr; + RedisValue[] arr = new RedisValue[(values.Length * 2) + when.CountBits() + (change ? 1 : 0)]; int index = 0; - switch (when) + if ((when & SortedSetWhen.NotExists) != 0) + { + arr[index++] = RedisLiterals.NX; + } + if ((when & SortedSetWhen.Exists) != 0) + { + arr[index++] = RedisLiterals.XX; + } + if ((when & SortedSetWhen.GreaterThan) != 0) + { + arr[index++] = RedisLiterals.GT; + } + if ((when & SortedSetWhen.LessThan) != 0) { - case When.Always: - arr = new RedisValue[values.Length * 2]; - break; - case When.NotExists: - arr = new RedisValue[(values.Length * 2) + 1]; - arr[index++] = RedisLiterals.NX; - break; - case When.Exists: - arr = new RedisValue[(values.Length * 2) + 1]; - arr[index++] = RedisLiterals.XX; - break; - default: throw new ArgumentOutOfRangeException(nameof(when)); + arr[index++] = RedisLiterals.LT; } + if (change) + { + arr[index++] = RedisLiterals.CH; + } + for (int i = 0; i < values.Length; i++) { arr[index++] = values[i].score; @@ -2898,28 +4468,31 @@ private Message GetSortedSetAddMessage(RedisKey key, SortedSetEntry[] values, Wh } } - private Message GetSortedSetAddMessage(RedisKey destination, RedisKey key, long skip, long take, Order order, SortType sortType, RedisValue by, RedisValue[] get, CommandFlags flags) + private Message GetSortMessage(RedisKey destination, RedisKey key, long skip, long take, Order order, SortType sortType, RedisValue by, RedisValue[]? get, CommandFlags flags, out ServerEndPoint? server) { + server = null; + var command = destination.IsNull && GetFeatures(key, flags, RedisCommand.SORT_RO, out server).ReadOnlySort + ? RedisCommand.SORT_RO + : RedisCommand.SORT; + + // If SORT_RO is not available, we cannot issue the command to a read-only replica + if (command == RedisCommand.SORT) + { + server = null; + } + // most common cases; no "get", no "by", no "destination", no "skip", no "take" if (destination.IsNull && skip == 0 && take == -1 && by.IsNull && (get == null || get.Length == 0)) { - switch (order) + return order switch { - case Order.Ascending: - switch (sortType) - { - case SortType.Numeric: return Message.Create(Database, flags, RedisCommand.SORT, key); - case SortType.Alphabetic: return Message.Create(Database, flags, RedisCommand.SORT, key, RedisLiterals.ALPHA); - } - break; - case Order.Descending: - switch (sortType) - { - case SortType.Numeric: return Message.Create(Database, flags, RedisCommand.SORT, key, RedisLiterals.DESC); - case SortType.Alphabetic: return Message.Create(Database, flags, RedisCommand.SORT, key, RedisLiterals.DESC, RedisLiterals.ALPHA); - } - break; - } + Order.Ascending when sortType == SortType.Numeric => Message.Create(Database, flags, command, key), + Order.Ascending when sortType == SortType.Alphabetic => Message.Create(Database, flags, command, key, RedisLiterals.ALPHA), + Order.Descending when sortType == SortType.Numeric => Message.Create(Database, flags, command, key, RedisLiterals.DESC), + Order.Descending when sortType == SortType.Alphabetic => Message.Create(Database, flags, command, key, RedisLiterals.DESC, RedisLiterals.ALPHA), + Order.Ascending or Order.Descending => throw new ArgumentOutOfRangeException(nameof(sortType)), + _ => throw new ArgumentOutOfRangeException(nameof(order)), + }; } // and now: more complicated scenarios... @@ -2929,7 +4502,8 @@ private Message GetSortedSetAddMessage(RedisKey destination, RedisKey key, long values.Add(RedisLiterals.BY); values.Add(by); } - if (skip != 0 || take != -1)// these are our defaults that mean "everything"; anything else needs to be sent explicitly + // these are our defaults that mean "everything"; anything else needs to be sent explicitly + if (skip != 0 || take != -1) { values.Add(RedisLiterals.LIMIT); values.Add(skip); @@ -2963,51 +4537,105 @@ private Message GetSortedSetAddMessage(RedisKey destination, RedisKey key, long values.Add(item); } } - if (destination.IsNull) return Message.Create(Database, flags, RedisCommand.SORT, key, values.ToArray()); + if (destination.IsNull) return Message.Create(Database, flags, command, key, values.ToArray()); - // because we are using STORE, we need to push this to a master - if (Message.GetMasterReplicaFlags(flags) == CommandFlags.DemandReplica) + // Because we are using STORE, we need to push this to a primary + if (Message.GetPrimaryReplicaFlags(flags) == CommandFlags.DemandReplica) { - throw ExceptionFactory.MasterOnly(multiplexer.IncludeDetailInExceptions, RedisCommand.SORT, null, null); + throw ExceptionFactory.PrimaryOnly(multiplexer.RawConfig.IncludeDetailInExceptions, RedisCommand.SORT, null, null); } - flags = Message.SetMasterReplicaFlags(flags, CommandFlags.DemandMaster); + flags = Message.SetPrimaryReplicaFlags(flags, CommandFlags.DemandMaster); values.Add(RedisLiterals.STORE); return Message.Create(Database, flags, RedisCommand.SORT, key, values.ToArray(), destination); } - private Message GetSortedSetCombineAndStoreCommandMessage(SetOperation operation, RedisKey destination, RedisKey[] keys, double[] weights, Aggregate aggregate, CommandFlags flags) + private Message GetSortedSetCombineAndStoreCommandMessage(SetOperation operation, RedisKey destination, RedisKey[] keys, double[]? weights, Aggregate aggregate, CommandFlags flags) { - RedisCommand command; - switch (operation) + var command = operation.ToCommand(store: true); + if (keys == null) { - case SetOperation.Intersect: command = RedisCommand.ZINTERSTORE; break; - case SetOperation.Union: command = RedisCommand.ZUNIONSTORE; break; - default: throw new ArgumentOutOfRangeException(nameof(operation)); + throw new ArgumentNullException(nameof(keys)); } - if (keys == null) throw new ArgumentNullException(nameof(keys)); + if (command == RedisCommand.ZDIFFSTORE && (weights != null || aggregate != Aggregate.Sum)) + { + throw new ArgumentException("ZDIFFSTORE cannot be used with weights or aggregation."); + } + if (weights != null && keys.Length != weights.Length) + { + throw new ArgumentException("Keys and weights should have the same number of elements.", nameof(weights)); + } + + RedisValue[] values = RedisValue.EmptyArray; + + var argsLength = (weights?.Length > 0 ? 1 + weights.Length : 0) + (aggregate != Aggregate.Sum ? 2 : 0); + if (argsLength > 0) + { + values = new RedisValue[argsLength]; + AddWeightsAggregationAndScore(values, weights, aggregate); + } + return new SortedSetCombineAndStoreCommandMessage(Database, flags, command, destination, keys, values); + } + + private Message GetSortedSetCombineCommandMessage(SetOperation operation, RedisKey[] keys, double[]? weights, Aggregate aggregate, bool withScores, CommandFlags flags) + { + var command = operation.ToCommand(store: false); + if (keys == null) + { + throw new ArgumentNullException(nameof(keys)); + } + if (command == RedisCommand.ZDIFF && (weights != null || aggregate != Aggregate.Sum)) + { + throw new ArgumentException("ZDIFF cannot be used with weights or aggregation."); + } + if (weights != null && keys.Length != weights.Length) + { + throw new ArgumentException("Keys and weights should have the same number of elements.", nameof(weights)); + } + + var i = 0; + var values = new RedisValue[1 + keys.Length + + (weights?.Length > 0 ? 1 + weights.Length : 0) + + (aggregate != Aggregate.Sum ? 2 : 0) + + (withScores ? 1 : 0)]; + values[i++] = keys.Length; + foreach (var key in keys) + { + values[i++] = key.AsRedisValue(); + } + AddWeightsAggregationAndScore(values.AsSpan(i), weights, aggregate, withScores: withScores); + return Message.Create(Database, flags, command, values ?? RedisValue.EmptyArray); + } - List values = null; - if (weights != null && weights.Length != 0) + private void AddWeightsAggregationAndScore(Span values, double[]? weights, Aggregate aggregate, bool withScores = false) + { + int i = 0; + if (weights?.Length > 0) { - (values ??= new List()).Add(RedisLiterals.WEIGHTS); + values[i++] = RedisLiterals.WEIGHTS; foreach (var weight in weights) - values.Add(weight); + { + values[i++] = weight; + } } switch (aggregate) { - case Aggregate.Sum: break; // default + case Aggregate.Sum: + break; // add nothing - Redis default case Aggregate.Min: - (values ??= new List()).Add(RedisLiterals.AGGREGATE); - values.Add(RedisLiterals.MIN); + values[i++] = RedisLiterals.AGGREGATE; + values[i++] = RedisLiterals.MIN; break; case Aggregate.Max: - (values ??= new List()).Add(RedisLiterals.AGGREGATE); - values.Add(RedisLiterals.MAX); + values[i++] = RedisLiterals.AGGREGATE; + values[i++] = RedisLiterals.MAX; break; default: throw new ArgumentOutOfRangeException(nameof(aggregate)); } - return new SortedSetCombineAndStoreCommandMessage(Database, flags, command, destination, keys, values?.ToArray() ?? RedisValue.EmptyArray); + if (withScores) + { + values[i++] = RedisLiterals.WITHSCORES; + } } private Message GetSortedSetLengthMessage(RedisKey key, double min, double max, Exclude exclude, CommandFlags flags) @@ -3020,6 +4648,25 @@ private Message GetSortedSetLengthMessage(RedisKey key, double min, double max, return Message.Create(Database, flags, RedisCommand.ZCOUNT, key, from, to); } + private Message GetSortedSetIntersectionLengthMessage(RedisKey[] keys, long limit, CommandFlags flags) + { + if (keys == null) throw new ArgumentNullException(nameof(keys)); + + var i = 0; + var values = new RedisValue[1 + keys.Length + (limit > 0 ? 2 : 0)]; + values[i++] = keys.Length; + foreach (var key in keys) + { + values[i++] = key.AsRedisValue(); + } + if (limit > 0) + { + values[i++] = RedisLiterals.LIMIT; + values[i++] = limit; + } + return Message.Create(Database, flags, RedisCommand.ZINTERCARD, values); + } + private Message GetSortedSetRangeByScoreMessage(RedisKey key, double start, double stop, Exclude exclude, Order order, long skip, long take, CommandFlags flags, bool withScores) { // usage: {ZRANGEBYSCORE|ZREVRANGEBYSCORE} key from to [WITHSCORES] [LIMIT offset count] @@ -3055,20 +4702,18 @@ private Message GetSortedSetRangeByScoreMessage(RedisKey key, double start, doub private Message GetSortedSetRemoveRangeByScoreMessage(RedisKey key, double start, double stop, Exclude exclude, CommandFlags flags) { - return Message.Create(Database, flags, RedisCommand.ZREMRANGEBYSCORE, key, - GetRange(start, exclude, true), GetRange(stop, exclude, false)); + return Message.Create( + Database, + flags, + RedisCommand.ZREMRANGEBYSCORE, + key, + GetRange(start, exclude, true), + GetRange(stop, exclude, false)); } private Message GetStreamAcknowledgeMessage(RedisKey key, RedisValue groupName, RedisValue messageId, CommandFlags flags) { - var values = new RedisValue[] - { - key.AsRedisValue(), - groupName, - messageId - }; - - return Message.Create(Database, flags, RedisCommand.XACK, values); + return Message.Create(Database, flags, RedisCommand.XACK, key, groupName, messageId); } private Message GetStreamAcknowledgeMessage(RedisKey key, RedisValue groupName, RedisValue[] messageIds, CommandFlags flags) @@ -3076,29 +4721,47 @@ private Message GetStreamAcknowledgeMessage(RedisKey key, RedisValue groupName, if (messageIds == null) throw new ArgumentNullException(nameof(messageIds)); if (messageIds.Length == 0) throw new ArgumentOutOfRangeException(nameof(messageIds), "messageIds must contain at least one item."); - var values = new RedisValue[messageIds.Length + 2]; + var values = new RedisValue[messageIds.Length + 1]; + values[0] = groupName; + messageIds.AsSpan().CopyTo(values.AsSpan(1)); - var offset = 0; + return Message.Create(Database, flags, RedisCommand.XACK, key, values); + } - values[offset++] = key.AsRedisValue(); - values[offset++] = groupName; + private Message GetStreamAcknowledgeAndDeleteMessage(RedisKey key, RedisValue groupName, StreamTrimMode mode, RedisValue messageId, CommandFlags flags) + { + return Message.Create(Database, flags, RedisCommand.XACKDEL, key, groupName, StreamConstants.GetMode(mode), StreamConstants.Ids, 1, messageId); + } - for (var i = 0; i < messageIds.Length; i++) - { - values[offset++] = messageIds[i]; - } + private Message GetStreamAcknowledgeAndDeleteMessage(RedisKey key, RedisValue groupName, StreamTrimMode mode, RedisValue[] messageIds, CommandFlags flags) + { + if (messageIds == null) throw new ArgumentNullException(nameof(messageIds)); + if (messageIds.Length == 0) throw new ArgumentOutOfRangeException(nameof(messageIds), "messageIds must contain at least one item."); - return Message.Create(Database, flags, RedisCommand.XACK, values); + var values = new RedisValue[messageIds.Length + 4]; + + var offset = 0; + values[offset++] = groupName; + values[offset++] = StreamConstants.GetMode(mode); + values[offset++] = StreamConstants.Ids; + values[offset++] = messageIds.Length; + messageIds.AsSpan().CopyTo(values.AsSpan(offset)); + Debug.Assert(offset + messageIds.Length == values.Length); + + return Message.Create(Database, flags, RedisCommand.XACKDEL, key, values); } - private Message GetStreamAddMessage(RedisKey key, RedisValue messageId, int? maxLength, bool useApproximateMaxLength, NameValueEntry streamPair, CommandFlags flags) + private Message GetStreamAddMessage(in RedisKey key, RedisValue messageId, in StreamIdempotentId idempotentId, long? maxLength, bool useApproximateMaxLength, NameValueEntry streamPair, long? limit, StreamTrimMode mode, CommandFlags flags) { // Calculate the correct number of arguments: // 3 array elements for Entry ID & NameValueEntry.Name & NameValueEntry.Value. // 2 elements if using MAXLEN (keyword & value), otherwise 0. // 1 element if using Approximate Length (~), otherwise 0. var totalLength = 3 + (maxLength.HasValue ? 2 : 0) - + (maxLength.HasValue && useApproximateMaxLength ? 1 : 0); + + idempotentId.ArgCount + + (maxLength.HasValue && useApproximateMaxLength ? 1 : 0) + + (limit.HasValue ? 2 : 0) + + (mode != StreamTrimMode.KeepReferences ? 1 : 0); var values = new RedisValue[totalLength]; var offset = 0; @@ -3110,26 +4773,38 @@ private Message GetStreamAddMessage(RedisKey key, RedisValue messageId, int? max if (useApproximateMaxLength) { values[offset++] = StreamConstants.ApproximateMaxLen; - values[offset++] = maxLength.Value; - } - else - { - values[offset++] = maxLength.Value; } + + values[offset++] = maxLength.Value; + } + + if (limit.HasValue) + { + values[offset++] = RedisLiterals.LIMIT; + values[offset++] = limit.Value; } + if (mode != StreamTrimMode.KeepReferences) + { + values[offset++] = StreamConstants.GetMode(mode); + } + + idempotentId.WriteTo(values, ref offset); + values[offset++] = messageId; values[offset++] = streamPair.Name; - values[offset] = streamPair.Value; + values[offset++] = streamPair.Value; + Debug.Assert(offset == totalLength); return Message.Create(Database, flags, RedisCommand.XADD, key, values); } - private Message GetStreamAddMessage(RedisKey key, RedisValue entryId, int? maxLength, bool useApproximateMaxLength, NameValueEntry[] streamPairs, CommandFlags flags) + /// + /// Gets message for . + /// + private Message GetStreamAddMessage(in RedisKey key, RedisValue entryId, in StreamIdempotentId idempotentId, long? maxLength, bool useApproximateMaxLength, NameValueEntry[] streamPairs, long? limit, StreamTrimMode mode, CommandFlags flags) { - // See https://redis.io/commands/xadd. - if (streamPairs == null) throw new ArgumentNullException(nameof(streamPairs)); if (streamPairs.Length == 0) throw new ArgumentOutOfRangeException(nameof(streamPairs), "streamPairs must contain at least one item."); @@ -3138,13 +4813,13 @@ private Message GetStreamAddMessage(RedisKey key, RedisValue entryId, int? maxLe throw new ArgumentOutOfRangeException(nameof(maxLength), "maxLength must be greater than 0."); } - var includeMaxLen = maxLength.HasValue ? 2 : 0; - var includeApproxLen = maxLength.HasValue && useApproximateMaxLength ? 1 : 0; - - var totalLength = (streamPairs.Length * 2) // Room for the name/value pairs - + 1 // The stream entry ID - + includeMaxLen // 2 or 0 (MAXLEN keyword & the count) - + includeApproxLen; // 1 or 0 + var totalLength = (streamPairs.Length * 2) // Room for the name/value pairs + + 1 // The stream entry ID + + idempotentId.ArgCount + + (maxLength.HasValue ? 2 : 0) // MAXLEN N + + (maxLength.HasValue && useApproximateMaxLength ? 1 : 0) // ~ + + (mode == StreamTrimMode.KeepReferences ? 0 : 1) // relevant trim-mode keyword + + (limit.HasValue ? 2 : 0); // LIMIT N var values = new RedisValue[totalLength]; @@ -3162,6 +4837,19 @@ private Message GetStreamAddMessage(RedisKey key, RedisValue entryId, int? maxLe values[offset++] = maxLength.Value; } + if (limit.HasValue) + { + values[offset++] = RedisLiterals.LIMIT; + values[offset++] = limit.Value; + } + + if (mode != StreamTrimMode.KeepReferences) + { + values[offset++] = StreamConstants.GetMode(mode); + } + + idempotentId.WriteTo(values, ref offset); + values[offset++] = entryId; for (var i = 0; i < streamPairs.Length; i++) @@ -3170,20 +4858,46 @@ private Message GetStreamAddMessage(RedisKey key, RedisValue entryId, int? maxLe values[offset++] = streamPairs[i].Value; } + Debug.Assert(offset == totalLength); return Message.Create(Database, flags, RedisCommand.XADD, key, values); } + private Message GetStreamAutoClaimMessage(RedisKey key, RedisValue consumerGroup, RedisValue assignToConsumer, long minIdleTimeInMs, RedisValue startAtId, int? count, bool idsOnly, CommandFlags flags) + { + // XAUTOCLAIM [COUNT count] [JUSTID] + var values = new RedisValue[4 + (count is null ? 0 : 2) + (idsOnly ? 1 : 0)]; + + var offset = 0; + + values[offset++] = consumerGroup; + values[offset++] = assignToConsumer; + values[offset++] = minIdleTimeInMs; + values[offset++] = startAtId; + + if (count is not null) + { + values[offset++] = StreamConstants.Count; + values[offset++] = count.Value; + } + + if (idsOnly) + { + values[offset++] = StreamConstants.JustId; + } + + return Message.Create(Database, flags, RedisCommand.XAUTOCLAIM, key, values); + } + private Message GetStreamClaimMessage(RedisKey key, RedisValue consumerGroup, RedisValue assignToConsumer, long minIdleTimeInMs, RedisValue[] messageIds, bool returnJustIds, CommandFlags flags) { if (messageIds == null) throw new ArgumentNullException(nameof(messageIds)); if (messageIds.Length == 0) throw new ArgumentOutOfRangeException(nameof(messageIds), "messageIds must contain at least one item."); // XCLAIM ... - var values = new RedisValue[4 + messageIds.Length + (returnJustIds ? 1 : 0)]; + var values = new RedisValue[3 + messageIds.Length + (returnJustIds ? 1 : 0)]; var offset = 0; - values[offset++] = key.AsRedisValue(); values[offset++] = consumerGroup; values[offset++] = assignToConsumer; values[offset++] = minIdleTimeInMs; @@ -3198,7 +4912,7 @@ private Message GetStreamClaimMessage(RedisKey key, RedisValue consumerGroup, Re values[offset] = StreamConstants.JustId; } - return Message.Create(Database, flags, RedisCommand.XCLAIM, values); + return Message.Create(Database, flags, RedisCommand.XCLAIM, key, values); } private Message GetStreamCreateConsumerGroupMessage(RedisKey key, RedisValue groupName, RedisValue? position = null, bool createStream = true, CommandFlags flags = CommandFlags.None) @@ -3217,15 +4931,20 @@ private Message GetStreamCreateConsumerGroupMessage(RedisKey key, RedisValue gro values[4] = StreamConstants.MkStream; } - return Message.Create(Database, + return Message.Create( + Database, flags, RedisCommand.XGROUP, values); } - private Message GetStreamPendingMessagesMessage(RedisKey key, RedisValue groupName, RedisValue? minId, RedisValue? maxId, int count, RedisValue consumerName, CommandFlags flags) + /// + /// Gets a message for . + /// + /// + private Message GetStreamPendingMessagesMessage(RedisKey key, RedisValue groupName, RedisValue? minId, RedisValue? maxId, int count, RedisValue consumerName, long? minIdleTimeInMs, CommandFlags flags) { - // > XPENDING mystream mygroup - + 10 [consumer name] + // > XPENDING mystream mygroup [IDLE min-idle-time] - + 10 [consumer name] // 1) 1) 1526569498055 - 0 // 2) "Bob" // 3) (integer)74170458 @@ -3234,30 +4953,45 @@ private Message GetStreamPendingMessagesMessage(RedisKey key, RedisValue groupNa // 2) "Bob" // 3) (integer)74170458 // 4) (integer)1 - - // See https://redis.io/topics/streams-intro. - if (count <= 0) { throw new ArgumentOutOfRangeException(nameof(count), "count must be greater than 0."); } - var values = new RedisValue[consumerName == RedisValue.Null ? 5 : 6]; + var valuesLength = 4; + if (consumerName != RedisValue.Null) + { + valuesLength++; + } + + if (minIdleTimeInMs is not null) + { + valuesLength += 2; + } + var values = new RedisValue[valuesLength]; + + var offset = 0; - values[0] = key.AsRedisValue(); - values[1] = groupName; - values[2] = minId ?? StreamConstants.ReadMinValue; - values[3] = maxId ?? StreamConstants.ReadMaxValue; - values[4] = count; + values[offset++] = groupName; + if (minIdleTimeInMs is not null) + { + values[offset++] = "IDLE"; + values[offset++] = minIdleTimeInMs; + } + values[offset++] = minId ?? StreamConstants.ReadMinValue; + values[offset++] = maxId ?? StreamConstants.ReadMaxValue; + values[offset++] = count; if (consumerName != RedisValue.Null) { - values[5] = consumerName; + values[offset++] = consumerName; } - return Message.Create(Database, + return Message.Create( + Database, flags, RedisCommand.XPENDING, + key, values); } @@ -3273,8 +5007,8 @@ private Message GetStreamRangeMessage(RedisKey key, RedisValue? minId, RedisValu var values = new RedisValue[2 + (count.HasValue ? 2 : 0)]; - values[0] = (messageOrder == Order.Ascending ? actualMin : actualMax); - values[1] = (messageOrder == Order.Ascending ? actualMax : actualMin); + values[0] = messageOrder == Order.Ascending ? actualMin : actualMax; + values[1] = messageOrder == Order.Ascending ? actualMax : actualMin; if (count.HasValue) { @@ -3282,107 +5016,170 @@ private Message GetStreamRangeMessage(RedisKey key, RedisValue? minId, RedisValu values[3] = count.Value; } - return Message.Create(Database, + return Message.Create( + Database, flags, messageOrder == Order.Ascending ? RedisCommand.XRANGE : RedisCommand.XREVRANGE, key, values); } - private Message GetStreamReadGroupMessage(RedisKey key, RedisValue groupName, RedisValue consumerName, RedisValue afterId, int? count, bool noAck, CommandFlags flags) + private Message GetStreamReadGroupMessage(RedisKey key, RedisValue groupName, RedisValue consumerName, RedisValue afterId, int? count, bool noAck, TimeSpan? claimMinIdleTime, CommandFlags flags) => + new SingleStreamReadGroupCommandMessage(Database, flags, key, groupName, consumerName, afterId, count, noAck, claimMinIdleTime); + + private sealed class SingleStreamReadGroupCommandMessage : Message.CommandKeyBase // XREADGROUP with single stream. eg XREADGROUP GROUP mygroup Alice COUNT 1 STREAMS mystream > { - // Example: > XREADGROUP GROUP mygroup Alice COUNT 1 STREAMS mystream > - if (count.HasValue && count <= 0) + private readonly RedisValue groupName; + private readonly RedisValue consumerName; + private readonly RedisValue afterId; + private readonly int? count; + private readonly bool noAck; + private readonly int argCount; + private readonly TimeSpan? claimMinIdleTime; + + public SingleStreamReadGroupCommandMessage(int db, CommandFlags flags, RedisKey key, RedisValue groupName, RedisValue consumerName, RedisValue afterId, int? count, bool noAck, TimeSpan? claimMinIdleTime) + : base(db, flags, RedisCommand.XREADGROUP, key) { - throw new ArgumentOutOfRangeException(nameof(count), "count must be greater than 0."); + if (count.HasValue && count <= 0) + { + throw new ArgumentOutOfRangeException(nameof(count), "count must be greater than 0."); + } + + groupName.AssertNotNull(); + consumerName.AssertNotNull(); + afterId.AssertNotNull(); + + this.groupName = groupName; + this.consumerName = consumerName; + this.afterId = afterId; + this.count = count; + this.noAck = noAck; + this.claimMinIdleTime = claimMinIdleTime; + argCount = 6 + (count.HasValue ? 2 : 0) + (noAck ? 1 : 0) + (claimMinIdleTime.HasValue ? 2 : 0); } - var totalValueCount = 6 + (count.HasValue ? 2 : 0) + (noAck ? 1 : 0); - var values = new RedisValue[totalValueCount]; + protected override void WriteImpl(PhysicalConnection physical) + { + physical.WriteHeader(Command, argCount); + physical.WriteBulkString("GROUP"u8); + physical.WriteBulkString(groupName); + physical.WriteBulkString(consumerName); - var offset = 0; + if (count.HasValue) + { + physical.WriteBulkString("COUNT"u8); + physical.WriteBulkString(count.Value); + } - values[offset++] = StreamConstants.Group; - values[offset++] = groupName; - values[offset++] = consumerName; + if (noAck) + { + physical.WriteBulkString("NOACK"u8); + } - if (count.HasValue) - { - values[offset++] = StreamConstants.Count; - values[offset++] = count.Value; - } + if (claimMinIdleTime.HasValue) + { + physical.WriteBulkString("CLAIM"u8); + physical.WriteBulkString(claimMinIdleTime.Value.TotalMilliseconds); + } - if (noAck) - { - values[offset++] = StreamConstants.NoAck; + physical.WriteBulkString("STREAMS"u8); + physical.Write(Key); + physical.WriteBulkString(afterId); } - values[offset++] = StreamConstants.Streams; - values[offset++] = key.AsRedisValue(); - values[offset] = afterId; - - return Message.Create(Database, - flags, - RedisCommand.XREADGROUP, - values); + public override int ArgCount => argCount; } - private Message GetSingleStreamReadMessage(RedisKey key, RedisValue afterId, int? count, CommandFlags flags) + private Message GetSingleStreamReadMessage(RedisKey key, RedisValue afterId, int? count, CommandFlags flags) => + new SingleStreamReadCommandMessage(Database, flags, key, afterId, count); + + private sealed class SingleStreamReadCommandMessage : Message.CommandKeyBase // XREAD with a single stream. Example: XREAD COUNT 2 STREAMS mystream 0-0 { - if (count.HasValue && count <= 0) + private readonly RedisValue afterId; + private readonly int? count; + private readonly int argCount; + + public SingleStreamReadCommandMessage(int db, CommandFlags flags, RedisKey key, RedisValue afterId, int? count) + : base(db, flags, RedisCommand.XREAD, key) { - throw new ArgumentOutOfRangeException(nameof(count), "count must be greater than 0."); - } + if (count.HasValue && count <= 0) + { + throw new ArgumentOutOfRangeException(nameof(count), "count must be greater than 0."); + } - var values = new RedisValue[3 + (count.HasValue ? 2 : 0)]; - var offset = 0; + afterId.AssertNotNull(); - if (count.HasValue) - { - values[offset++] = StreamConstants.Count; - values[offset++] = count.Value; + this.afterId = afterId; + this.count = count; + argCount = count.HasValue ? 5 : 3; } - values[offset++] = StreamConstants.Streams; - values[offset++] = key.AsRedisValue(); - values[offset] = afterId; + protected override void WriteImpl(PhysicalConnection physical) + { + physical.WriteHeader(Command, argCount); - // Example: > XREAD COUNT 2 STREAMS writers 1526999352406-0 - return Message.Create(Database, - flags, - RedisCommand.XREAD, - values); + if (count.HasValue) + { + physical.WriteBulkString("COUNT"u8); + physical.WriteBulkString(count.Value); + } + + physical.WriteBulkString("STREAMS"u8); + physical.Write(Key); + physical.WriteBulkString(afterId); + } + + public override int ArgCount => argCount; } - private Message GetStreamTrimMessage(RedisKey key, int maxLength, bool useApproximateMaxLength, CommandFlags flags) + private Message GetStreamTrimMessage(bool maxLen, RedisKey key, RedisValue threshold, bool useApproximateMaxLength, long? limit, StreamTrimMode mode, CommandFlags flags) { - if (maxLength <= 0) + if (limit.HasValue && limit.GetValueOrDefault() <= 0) { - throw new ArgumentOutOfRangeException(nameof(maxLength), "maxLength must be greater than 0."); + throw new ArgumentOutOfRangeException(nameof(limit), "limit must be greater than 0 when specified."); + } + + if (limit is null && !useApproximateMaxLength && mode == StreamTrimMode.KeepReferences) + { + // avoid array alloc in simple case + return Message.Create(Database, flags, RedisCommand.XTRIM, key, maxLen ? StreamConstants.MaxLen : StreamConstants.MinId, threshold); } - var values = new RedisValue[2 + (useApproximateMaxLength ? 1 : 0)]; + var values = new RedisValue[2 + (useApproximateMaxLength ? 1 : 0) + (limit.HasValue ? 2 : 0) + (mode == StreamTrimMode.KeepReferences ? 0 : 1)]; + + var offset = 0; - values[0] = StreamConstants.MaxLen; + values[offset++] = maxLen ? StreamConstants.MaxLen : StreamConstants.MinId; if (useApproximateMaxLength) { - values[1] = StreamConstants.ApproximateMaxLen; - values[2] = maxLength; + values[offset++] = StreamConstants.ApproximateMaxLen; } - else + + values[offset++] = threshold; + + if (limit.HasValue) + { + values[offset++] = RedisLiterals.LIMIT; + values[offset++] = limit.GetValueOrDefault(); + } + + if (mode != StreamTrimMode.KeepReferences) // omit when not needed, for back-compat { - values[1] = maxLength; + values[offset++] = StreamConstants.GetMode(mode); } - return Message.Create(Database, + Debug.Assert(offset == values.Length); + + return Message.Create( + Database, flags, RedisCommand.XTRIM, key, values); } - private Message GetStringBitOperationMessage(Bitwise operation, RedisKey destination, RedisKey[] keys, CommandFlags flags) + private Message? GetStringBitOperationMessage(Bitwise operation, RedisKey destination, RedisKey[] keys, CommandFlags flags) { if (keys == null) throw new ArgumentNullException(nameof(keys)); if (keys.Length == 0) return null; @@ -3409,7 +5206,8 @@ private Message GetStringBitOperationMessage(Bitwise operation, RedisKey destina int slot = serverSelectionStrategy.HashSlot(destination); slot = serverSelectionStrategy.CombineSlot(slot, first); if (second.IsNull || operation == Bitwise.Not) - { // unary + { + // unary return Message.CreateInSlot(Database, slot, flags, RedisCommand.BITOP, new[] { op, destination.AsRedisValue(), first.AsRedisValue() }); } // binary @@ -3417,13 +5215,23 @@ private Message GetStringBitOperationMessage(Bitwise operation, RedisKey destina return Message.CreateInSlot(Database, slot, flags, RedisCommand.BITOP, new[] { op, destination.AsRedisValue(), first.AsRedisValue(), second.AsRedisValue() }); } - private Message GetStringGetWithExpiryMessage(RedisKey key, CommandFlags flags, out ResultProcessor processor, out ServerEndPoint server) + private Message GetStringGetExMessage(in RedisKey key, Expiration expiry, CommandFlags flags = CommandFlags.None) + { + return expiry.TokenCount switch + { + 0 => Message.Create(Database, flags, RedisCommand.GETEX, key), + 1 => Message.Create(Database, flags, RedisCommand.GETEX, key, expiry.Operand), + _ => Message.Create(Database, flags, RedisCommand.GETEX, key, expiry.Operand, expiry.Value), + }; + } + + private Message GetStringGetWithExpiryMessage(RedisKey key, CommandFlags flags, out ResultProcessor processor, out ServerEndPoint? server) { if (this is IBatch) { throw new NotSupportedException("This operation is not possible inside a transaction or batch; please issue separate GetString and KeyTimeToLive requests"); } - var features = GetFeatures(key, flags, out server); + var features = GetFeatures(key, flags, RedisCommand.PTTL, out server); processor = StringGetWithExpiryProcessor.Default; if (server != null && features.MillisecondExpiry && multiplexer.CommandMap.IsAvailable(RedisCommand.PTTL)) { @@ -3434,111 +5242,168 @@ private Message GetStringGetWithExpiryMessage(RedisKey key, CommandFlags flags, return new StringGetWithExpiryMessage(Database, flags, RedisCommand.TTL, key); } - private Message GetStringSetMessage(KeyValuePair[] values, When when = When.Always, CommandFlags flags = CommandFlags.None) + private Message? GetStringSetMessage(KeyValuePair[] values, When when, Expiration expiry, CommandFlags flags) { if (values == null) throw new ArgumentNullException(nameof(values)); switch (values.Length) { case 0: return null; - case 1: return GetStringSetMessage(values[0].Key, values[0].Value, null, when, flags); + case 1: return GetStringSetMessage(values[0].Key, values[0].Value, expiry, when, flags); default: - WhenAlwaysOrNotExists(when); - int slot = ServerSelectionStrategy.NoSlot, offset = 0; - var args = new RedisValue[values.Length * 2]; - var serverSelectionStrategy = multiplexer.ServerSelectionStrategy; - for (int i = 0; i < values.Length; i++) + // assume MSETEX in the general case, but look for scenarios where we can use the simpler + // MSET/MSETNX commands (which have wider applicability in terms of server versions) + // (note that when/expiry is ignored when not MSETEX; no need to explicitly wipe) + WhenAlwaysOrExistsOrNotExists(when); + var cmd = when switch { - args[offset++] = values[i].Key.AsRedisValue(); - args[offset++] = values[i].Value; - slot = serverSelectionStrategy.CombineSlot(slot, values[i].Key); - } - return Message.CreateInSlot(Database, slot, flags, when == When.NotExists ? RedisCommand.MSETNX : RedisCommand.MSET, args); + When.Always when expiry.IsNone => RedisCommand.MSET, + When.NotExists when expiry.IsNoneOrKeepTtl => RedisCommand.MSETNX, // "keepttl" with "not exists" is the same as "no expiry" + _ => RedisCommand.MSETEX, + }; + return Message.Create(Database, flags, cmd, values, expiry, when); } } - private Message GetStringSetMessage(RedisKey key, RedisValue value, TimeSpan? expiry = null, When when = When.Always, CommandFlags flags = CommandFlags.None) + private Message GetStringSetMessage( + RedisKey key, + RedisValue value, + Expiration expiry, + When when = When.Always, + CommandFlags flags = CommandFlags.None) { WhenAlwaysOrExistsOrNotExists(when); + static Message ThrowWhen() => throw new ArgumentOutOfRangeException(nameof(when)); + if (value.IsNull) return Message.Create(Database, flags, RedisCommand.DEL, key); - if (expiry == null || expiry.Value == TimeSpan.MaxValue) - { // no expiry - switch (when) + if (expiry.IsPersist) throw new NotSupportedException("SET+PERSIST is not supported"); // we don't expect to get here ever + + if (expiry.IsNone) + { + return when switch { - case When.Always: return Message.Create(Database, flags, RedisCommand.SET, key, value); - case When.NotExists: return Message.Create(Database, flags, RedisCommand.SETNX, key, value); - case When.Exists: return Message.Create(Database, flags, RedisCommand.SET, key, value, RedisLiterals.XX); - } + When.Always => Message.Create(Database, flags, RedisCommand.SET, key, value), + When.NotExists => Message.Create(Database, flags, RedisCommand.SETNX, key, value), + When.Exists => Message.Create(Database, flags, RedisCommand.SET, key, value, RedisLiterals.XX), + _ => ThrowWhen(), + }; } - long milliseconds = expiry.Value.Ticks / TimeSpan.TicksPerMillisecond; - if ((milliseconds % 1000) == 0) + if (expiry.IsKeepTtl) { - // a nice round number of seconds - long seconds = milliseconds / 1000; - switch (when) + return when switch { - case When.Always: return Message.Create(Database, flags, RedisCommand.SETEX, key, seconds, value); - case When.Exists: return Message.Create(Database, flags, RedisCommand.SET, key, value, RedisLiterals.EX, seconds, RedisLiterals.XX); - case When.NotExists: return Message.Create(Database, flags, RedisCommand.SET, key, value, RedisLiterals.EX, seconds, RedisLiterals.NX); - } + When.Always => Message.Create(Database, flags, RedisCommand.SET, key, value, RedisLiterals.KEEPTTL), + When.NotExists => Message.Create(Database, flags, RedisCommand.SETNX, key, value), // (there would be no existing TTL to keep) + When.Exists => Message.Create(Database, flags, RedisCommand.SET, key, value, RedisLiterals.XX, RedisLiterals.KEEPTTL), + _ => ThrowWhen(), + }; } - switch (when) + if (when is When.Always & expiry.IsRelative) { - case When.Always: return Message.Create(Database, flags, RedisCommand.PSETEX, key, milliseconds, value); - case When.Exists: return Message.Create(Database, flags, RedisCommand.SET, key, value, RedisLiterals.PX, milliseconds, RedisLiterals.XX); - case When.NotExists: return Message.Create(Database, flags, RedisCommand.SET, key, value, RedisLiterals.PX, milliseconds, RedisLiterals.NX); + // special case to SETEX/PSETEX + return expiry.IsSeconds + ? Message.Create(Database, flags, RedisCommand.SETEX, key, expiry.Value, value) + : Message.Create(Database, flags, RedisCommand.PSETEX, key, expiry.Value, value); } - throw new NotSupportedException(); + + // use SET with EX/PX/EXAT/PXAT and possibly XX/NX + var expiryOperand = expiry.GetOperand(out var expiryValue); + return when switch + { + When.Always => Message.Create(Database, flags, RedisCommand.SET, key, value, expiryOperand, expiryValue), + When.Exists => Message.Create(Database, flags, RedisCommand.SET, key, value, expiryOperand, expiryValue, RedisLiterals.XX), + When.NotExists => Message.Create(Database, flags, RedisCommand.SET, key, value, expiryOperand, expiryValue, RedisLiterals.NX), + _ => throw new ArgumentOutOfRangeException(nameof(when)), + }; } - private Message IncrMessage(RedisKey key, long value, CommandFlags flags) + private Message GetStringSetAndGetMessage( + RedisKey key, + RedisValue value, + TimeSpan? expiry = null, + bool keepTtl = false, + When when = When.Always, + CommandFlags flags = CommandFlags.None) { - switch (value) + WhenAlwaysOrExistsOrNotExists(when); + if (value.IsNull) return Message.Create(Database, flags, RedisCommand.GETDEL, key); + + if (expiry == null || expiry.Value == TimeSpan.MaxValue) { - case 0: - if ((flags & CommandFlags.FireAndForget) != 0) return null; - return Message.Create(Database, flags, RedisCommand.INCRBY, key, value); - case 1: - return Message.Create(Database, flags, RedisCommand.INCR, key); - case -1: - return Message.Create(Database, flags, RedisCommand.DECR, key); - default: - return value > 0 - ? Message.Create(Database, flags, RedisCommand.INCRBY, key, value) - : Message.Create(Database, flags, RedisCommand.DECRBY, key, -value); + // no expiry + return when switch + { + When.Always when !keepTtl => Message.Create(Database, flags, RedisCommand.SET, key, value, RedisLiterals.GET), + When.Always when keepTtl => Message.Create(Database, flags, RedisCommand.SET, key, value, RedisLiterals.GET, RedisLiterals.KEEPTTL), + When.Exists when !keepTtl => Message.Create(Database, flags, RedisCommand.SET, key, value, RedisLiterals.XX, RedisLiterals.GET), + When.Exists when keepTtl => Message.Create(Database, flags, RedisCommand.SET, key, value, RedisLiterals.XX, RedisLiterals.GET, RedisLiterals.KEEPTTL), + When.NotExists when !keepTtl => Message.Create(Database, flags, RedisCommand.SET, key, value, RedisLiterals.NX, RedisLiterals.GET), + When.NotExists when keepTtl => Message.Create(Database, flags, RedisCommand.SET, key, value, RedisLiterals.NX, RedisLiterals.GET, RedisLiterals.KEEPTTL), + _ => throw new ArgumentOutOfRangeException(nameof(when)), + }; } - } + long milliseconds = expiry.Value.Ticks / TimeSpan.TicksPerMillisecond; - private RedisCommand SetOperationCommand(SetOperation operation, bool store) - { - switch (operation) + if ((milliseconds % 1000) == 0) { - case SetOperation.Difference: return store ? RedisCommand.SDIFFSTORE : RedisCommand.SDIFF; - case SetOperation.Intersect: return store ? RedisCommand.SINTERSTORE : RedisCommand.SINTER; - case SetOperation.Union: return store ? RedisCommand.SUNIONSTORE : RedisCommand.SUNION; - default: throw new ArgumentOutOfRangeException(nameof(operation)); + // a nice round number of seconds + long seconds = milliseconds / 1000; + return when switch + { + When.Always => Message.Create(Database, flags, RedisCommand.SET, key, value, RedisLiterals.EX, seconds, RedisLiterals.GET), + When.Exists => Message.Create(Database, flags, RedisCommand.SET, key, value, RedisLiterals.EX, seconds, RedisLiterals.XX, RedisLiterals.GET), + When.NotExists => Message.Create(Database, flags, RedisCommand.SET, key, value, RedisLiterals.EX, seconds, RedisLiterals.NX, RedisLiterals.GET), + _ => throw new ArgumentOutOfRangeException(nameof(when)), + }; } + + return when switch + { + When.Always => Message.Create(Database, flags, RedisCommand.SET, key, value, RedisLiterals.PX, milliseconds, RedisLiterals.GET), + When.Exists => Message.Create(Database, flags, RedisCommand.SET, key, value, RedisLiterals.PX, milliseconds, RedisLiterals.XX, RedisLiterals.GET), + When.NotExists => Message.Create(Database, flags, RedisCommand.SET, key, value, RedisLiterals.PX, milliseconds, RedisLiterals.NX, RedisLiterals.GET), + _ => throw new ArgumentOutOfRangeException(nameof(when)), + }; } - private CursorEnumerable TryScan(RedisKey key, RedisValue pattern, int pageSize, long cursor, int pageOffset, CommandFlags flags, RedisCommand command, ResultProcessor.ScanResult> processor, out ServerEndPoint server) + private Message? IncrMessage(RedisKey key, long value, CommandFlags flags) => value switch + { + 0 => ((flags & CommandFlags.FireAndForget) != 0) + ? null + : Message.Create(Database, flags, RedisCommand.INCRBY, key, value), + 1 => Message.Create(Database, flags, RedisCommand.INCR, key), + -1 => Message.Create(Database, flags, RedisCommand.DECR, key), + > 0 => Message.Create(Database, flags, RedisCommand.INCRBY, key, value), + _ => Message.Create(Database, flags, RedisCommand.DECRBY, key, -value), + }; + + private static RedisCommand SetOperationCommand(SetOperation operation, bool store) => operation switch + { + SetOperation.Difference => store ? RedisCommand.SDIFFSTORE : RedisCommand.SDIFF, + SetOperation.Intersect => store ? RedisCommand.SINTERSTORE : RedisCommand.SINTER, + SetOperation.Union => store ? RedisCommand.SUNIONSTORE : RedisCommand.SUNION, + _ => throw new ArgumentOutOfRangeException(nameof(operation)), + }; + + private CursorEnumerable? TryScan(RedisKey key, RedisValue pattern, int pageSize, long cursor, int pageOffset, CommandFlags flags, RedisCommand command, ResultProcessor.ScanResult> processor, out ServerEndPoint? server, bool noValues = false) { server = null; if (pageSize <= 0) throw new ArgumentOutOfRangeException(nameof(pageSize)); if (!multiplexer.CommandMap.IsAvailable(command)) return null; - var features = GetFeatures(key, flags, out server); + var features = GetFeatures(key, flags, RedisCommand.SCAN, out server); if (!features.Scan) return null; - if (CursorUtils.IsNil(pattern)) pattern = (byte[])null; - return new ScanEnumerable(this, server, key, pattern, pageSize, cursor, pageOffset, flags, command, processor); + if (CursorUtils.IsNil(pattern)) pattern = (byte[]?)null; + return new ScanEnumerable(this, server, key, pattern, pageSize, cursor, pageOffset, flags, command, processor, noValues); } - private Message GetLexMessage(RedisCommand command, RedisKey key, RedisValue min, RedisValue max, Exclude exclude, long skip, long take, CommandFlags flags) + private Message GetLexMessage(RedisCommand command, RedisKey key, RedisValue min, RedisValue max, Exclude exclude, long skip, long take, CommandFlags flags, Order order) { - RedisValue start = GetLexRange(min, exclude, true), stop = GetLexRange(max, exclude, false); + RedisValue start = GetLexRange(min, exclude, true, order), stop = GetLexRange(max, exclude, false, order); if (skip == 0 && take == -1) return Message.Create(Database, flags, command, key, start, stop); @@ -3548,7 +5413,7 @@ private Message GetLexMessage(RedisCommand command, RedisKey key, RedisValue min public long SortedSetLengthByValue(RedisKey key, RedisValue min, RedisValue max, Exclude exclude = Exclude.None, CommandFlags flags = CommandFlags.None) { - var msg = GetLexMessage(RedisCommand.ZLEXCOUNT, key, min, max, exclude, 0, -1, flags); + var msg = GetLexMessage(RedisCommand.ZLEXCOUNT, key, min, max, exclude, 0, -1, flags, Order.Ascending); return ExecuteSync(msg, ResultProcessor.Int64); } @@ -3557,7 +5422,7 @@ public RedisValue[] SortedSetRangeByValue(RedisKey key, RedisValue min, RedisVal private static void ReverseLimits(Order order, ref Exclude exclude, ref RedisValue start, ref RedisValue stop) { - bool reverseLimits = (order == Order.Ascending) == (stop != default(RedisValue) && start.CompareTo(stop) > 0); + bool reverseLimits = (order == Order.Ascending) == (stop != default && start.CompareTo(stop) > 0); if (reverseLimits) { var tmp = start; @@ -3570,63 +5435,96 @@ private static void ReverseLimits(Order order, ref Exclude exclude, ref RedisVal } } } - public RedisValue[] SortedSetRangeByValue(RedisKey key, RedisValue min = default(RedisValue), RedisValue max = default(RedisValue), - Exclude exclude = Exclude.None, Order order = Order.Ascending, long skip = 0, long take = -1, CommandFlags flags = CommandFlags.None) + public RedisValue[] SortedSetRangeByValue( + RedisKey key, + RedisValue min = default, + RedisValue max = default, + Exclude exclude = Exclude.None, + Order order = Order.Ascending, + long skip = 0, + long take = -1, + CommandFlags flags = CommandFlags.None) { ReverseLimits(order, ref exclude, ref min, ref max); - var msg = GetLexMessage(order == Order.Ascending ? RedisCommand.ZRANGEBYLEX : RedisCommand.ZREVRANGEBYLEX, key, min, max, exclude, skip, take, flags); - return ExecuteSync(msg, ResultProcessor.RedisValueArray); + var msg = GetLexMessage(order == Order.Ascending ? RedisCommand.ZRANGEBYLEX : RedisCommand.ZREVRANGEBYLEX, key, min, max, exclude, skip, take, flags, order); + return ExecuteSync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); } public long SortedSetRemoveRangeByValue(RedisKey key, RedisValue min, RedisValue max, Exclude exclude = Exclude.None, CommandFlags flags = CommandFlags.None) { - var msg = GetLexMessage(RedisCommand.ZREMRANGEBYLEX, key, min, max, exclude, 0, -1, flags); + var msg = GetLexMessage(RedisCommand.ZREMRANGEBYLEX, key, min, max, exclude, 0, -1, flags, Order.Ascending); return ExecuteSync(msg, ResultProcessor.Int64); } public Task SortedSetLengthByValueAsync(RedisKey key, RedisValue min, RedisValue max, Exclude exclude = Exclude.None, CommandFlags flags = CommandFlags.None) { - var msg = GetLexMessage(RedisCommand.ZLEXCOUNT, key, min, max, exclude, 0, -1, flags); + var msg = GetLexMessage(RedisCommand.ZLEXCOUNT, key, min, max, exclude, 0, -1, flags, Order.Ascending); return ExecuteAsync(msg, ResultProcessor.Int64); } public Task SortedSetRangeByValueAsync(RedisKey key, RedisValue min, RedisValue max, Exclude exclude, long skip, long take, CommandFlags flags) => SortedSetRangeByValueAsync(key, min, max, exclude, Order.Ascending, skip, take, flags); - public Task SortedSetRangeByValueAsync(RedisKey key, RedisValue min = default(RedisValue), RedisValue max = default(RedisValue), - Exclude exclude = Exclude.None, Order order = Order.Ascending, long skip = 0, long take = -1, CommandFlags flags = CommandFlags.None) + public Task SortedSetRangeByValueAsync( + RedisKey key, + RedisValue min = default, + RedisValue max = default, + Exclude exclude = Exclude.None, + Order order = Order.Ascending, + long skip = 0, + long take = -1, + CommandFlags flags = CommandFlags.None) { ReverseLimits(order, ref exclude, ref min, ref max); - var msg = GetLexMessage(order == Order.Ascending ? RedisCommand.ZRANGEBYLEX : RedisCommand.ZREVRANGEBYLEX, key, min, max, exclude, skip, take, flags); - return ExecuteAsync(msg, ResultProcessor.RedisValueArray); + var msg = GetLexMessage(order == Order.Ascending ? RedisCommand.ZRANGEBYLEX : RedisCommand.ZREVRANGEBYLEX, key, min, max, exclude, skip, take, flags, order); + return ExecuteAsync(msg, ResultProcessor.RedisValueArray, defaultValue: Array.Empty()); } public Task SortedSetRemoveRangeByValueAsync(RedisKey key, RedisValue min, RedisValue max, Exclude exclude = Exclude.None, CommandFlags flags = CommandFlags.None) { - var msg = GetLexMessage(RedisCommand.ZREMRANGEBYLEX, key, min, max, exclude, 0, -1, flags); + var msg = GetLexMessage(RedisCommand.ZREMRANGEBYLEX, key, min, max, exclude, 0, -1, flags, Order.Ascending); return ExecuteAsync(msg, ResultProcessor.Int64); } - internal class ScanEnumerable : CursorEnumerable + internal sealed class ScanEnumerable : CursorEnumerable { private readonly RedisKey key; private readonly RedisValue pattern; private readonly RedisCommand command; - - public ScanEnumerable(RedisDatabase database, ServerEndPoint server, RedisKey key, in RedisValue pattern, int pageSize, in RedisValue cursor, int pageOffset, CommandFlags flags, - RedisCommand command, ResultProcessor processor) + private readonly bool noValues; + + public ScanEnumerable( + RedisDatabase database, + ServerEndPoint? server, + RedisKey key, + in RedisValue pattern, + int pageSize, + in RedisValue cursor, + int pageOffset, + CommandFlags flags, + RedisCommand command, + ResultProcessor processor, + bool noValues) : base(database, server, database.Database, pageSize, cursor, pageOffset, flags) { this.key = key; this.pattern = pattern; this.command = command; Processor = processor; + this.noValues = noValues; } private protected override ResultProcessor.ScanResult> Processor { get; } private protected override Message CreateMessage(in RedisValue cursor) { + if (noValues) + { + if (CursorUtils.IsNil(pattern) && pageSize == CursorUtils.DefaultRedisPageSize) return Message.Create(db, flags, command, key, cursor, RedisLiterals.NOVALUES); + if (CursorUtils.IsNil(pattern)) return Message.Create(db, flags, command, key, cursor, RedisLiterals.COUNT, pageSize, RedisLiterals.NOVALUES); + return Message.Create(db, flags, command, key, cursor, RedisLiterals.MATCH, pattern, RedisLiterals.COUNT, pageSize, RedisLiterals.NOVALUES); + } + if (CursorUtils.IsNil(pattern)) { if (pageSize == CursorUtils.DefaultRedisPageSize) @@ -3646,7 +5544,7 @@ private protected override Message CreateMessage(in RedisValue cursor) } else { - return Message.Create(db, flags, command, key, new RedisValue[] { cursor, RedisLiterals.MATCH, pattern, RedisLiterals.COUNT, pageSize }); + return Message.Create(db, flags, command, key, cursor, RedisLiterals.MATCH, pattern, RedisLiterals.COUNT, pageSize); } } } @@ -3664,7 +5562,7 @@ public ScriptLoadMessage(CommandFlags flags, string script) protected override void WriteImpl(PhysicalConnection physical) { physical.WriteHeader(Command, 2); - physical.WriteBulkString(RedisLiterals.LOAD); + physical.WriteBulkString("LOAD"u8); physical.WriteBulkString((RedisValue)Script); } public override int ArgCount => 2; @@ -3674,26 +5572,26 @@ private sealed class HashScanResultProcessor : ScanResultProcessor { public static readonly ResultProcessor.ScanResult> Default = new HashScanResultProcessor(); private HashScanResultProcessor() { } - protected override HashEntry[] Parse(in RawResult result, out int count) - => HashEntryArray.TryParse(result, out HashEntry[] pairs, true, out count) ? pairs : null; + protected override HashEntry[]? Parse(in RawResult result, out int count) + => HashEntryArray.TryParse(result, out HashEntry[]? pairs, true, out count) ? pairs : null; } private abstract class ScanResultProcessor : ResultProcessor.ScanResult> { - protected abstract T[] Parse(in RawResult result, out int count); + protected abstract T[]? Parse(in RawResult result, out int count); protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { - switch (result.Type) + switch (result.Resp2TypeArray) { - case ResultType.MultiBulk: + case ResultType.Array: var arr = result.GetItems(); if (arr.Length == 2) { ref RawResult inner = ref arr[1]; - if (inner.Type == ResultType.MultiBulk && arr[0].TryGetInt64(out var i64)) + if (inner.Resp2TypeArray == ResultType.Array && arr[0].TryGetInt64(out var i64)) { - T[] oversized = Parse(inner, out int count); + T[]? oversized = Parse(inner, out int count); var sscanResult = new ScanEnumerable.ScanResult(i64, oversized, count, true); SetResult(message, sscanResult); return true; @@ -3710,7 +5608,7 @@ internal sealed class ExecuteMessage : Message private readonly ICollection _args; public new CommandBytes Command { get; } - public ExecuteMessage(CommandMap map, int db, CommandFlags flags, string command, ICollection args) : base(db, flags, RedisCommand.UNKNOWN) + public ExecuteMessage(CommandMap? map, int db, CommandFlags flags, string command, ICollection? args) : base(db, flags, RedisCommand.UNKNOWN) { if (args != null && args.Count >= PhysicalConnection.REDIS_MAX_ARGS) // using >= here because we will be adding 1 for the command itself (which is an arg for the purposes of the multi-bulk protocol) { @@ -3735,7 +5633,7 @@ protected override void WriteImpl(PhysicalConnection physical) physical.Write(channel); } else - { // recognises well-known types + { // recognises well-known types var val = RedisValue.TryParse(arg, out var valid); if (!valid) throw new InvalidCastException($"Unable to parse value: '{arg}'"); physical.WriteBulkString(val); @@ -3743,6 +5641,7 @@ protected override void WriteImpl(PhysicalConnection physical) } } + public override string CommandString => Command.ToString(); public override string CommandAndKey => Command.ToString(); public override int GetHashSlot(ServerSelectionStrategy serverSelectionStrategy) @@ -3763,25 +5662,25 @@ public override int GetHashSlot(ServerSelectionStrategy serverSelectionStrategy) private sealed class ScriptEvalMessage : Message, IMultiMessage { private readonly RedisKey[] keys; - private readonly string script; + private readonly string? script; private readonly RedisValue[] values; - private byte[] asciiHash; - private readonly byte[] hexHash; + private byte[]? asciiHash; + private readonly byte[]? hexHash; - public ScriptEvalMessage(int db, CommandFlags flags, string script, RedisKey[] keys, RedisValue[] values) - : this(db, flags, ResultProcessor.ScriptLoadProcessor.IsSHA1(script) ? RedisCommand.EVALSHA : RedisCommand.EVAL, script, null, keys, values) + public ScriptEvalMessage(int db, CommandFlags flags, RedisCommand command, string script, RedisKey[]? keys, RedisValue[]? values) + : this(db, flags, command, script, null, keys, values) { if (script == null) throw new ArgumentNullException(nameof(script)); } - public ScriptEvalMessage(int db, CommandFlags flags, byte[] hash, RedisKey[] keys, RedisValue[] values) - : this(db, flags, RedisCommand.EVAL, null, hash, keys, values) + public ScriptEvalMessage(int db, CommandFlags flags, RedisCommand command, byte[] hash, RedisKey[]? keys, RedisValue[]? values) + : this(db, flags, command, null, hash, keys, values) { if (hash == null) throw new ArgumentNullException(nameof(hash)); if (hash.Length != ResultProcessor.ScriptLoadProcessor.Sha1HashLength) throw new ArgumentOutOfRangeException(nameof(hash), "Invalid hash length"); } - private ScriptEvalMessage(int db, CommandFlags flags, RedisCommand command, string script, byte[] hexHash, RedisKey[] keys, RedisValue[] values) + private ScriptEvalMessage(int db, CommandFlags flags, RedisCommand command, string? script, byte[]? hexHash, RedisKey[]? keys, RedisValue[]? values) : base(db, flags, command) { this.script = script; @@ -3807,7 +5706,7 @@ public override int GetHashSlot(ServerSelectionStrategy serverSelectionStrategy) public IEnumerable GetMessages(PhysicalConnection connection) { - PhysicalBridge bridge; + PhysicalBridge? bridge; if (script != null && (bridge = connection.BridgeCouldBeNull) != null && bridge.Multiplexer.CommandMap.IsAvailable(RedisCommand.SCRIPT) && (Flags & CommandFlags.NoScriptCache) == 0) @@ -3871,6 +5770,71 @@ protected override RedisValue[] Parse(in RawResult result, out int count) } } + private static Message CreateListPositionMessage(int db, CommandFlags flags, RedisKey key, RedisValue element, long rank, long maxLen, long? count = null) => + count != null + ? Message.Create(db, flags, RedisCommand.LPOS, key, element, RedisLiterals.RANK, rank, RedisLiterals.MAXLEN, maxLen, RedisLiterals.COUNT, count) + : Message.Create(db, flags, RedisCommand.LPOS, key, element, RedisLiterals.RANK, rank, RedisLiterals.MAXLEN, maxLen); + + private static Message CreateSortedSetRangeStoreMessage( + int db, + CommandFlags flags, + RedisKey sourceKey, + RedisKey destinationKey, + RedisValue start, + RedisValue stop, + SortedSetOrder sortedSetOrder, + Order order, + Exclude exclude, + long skip, + long? take) + { + if (sortedSetOrder == SortedSetOrder.ByRank) + { + if (take > 0) + { + throw new ArgumentException("take argument is not valid when sortedSetOrder is ByRank you may want to try setting the SortedSetOrder to ByLex or ByScore", nameof(take)); + } + if (exclude != Exclude.None) + { + throw new ArgumentException("exclude argument is not valid when sortedSetOrder is ByRank, you may want to try setting the sortedSetOrder to ByLex or ByScore", nameof(exclude)); + } + + return order switch + { + Order.Ascending => Message.Create(db, flags, RedisCommand.ZRANGESTORE, destinationKey, sourceKey, start, stop), + Order.Descending => Message.Create(db, flags, RedisCommand.ZRANGESTORE, destinationKey, sourceKey, start, stop, RedisLiterals.REV), + _ => throw new ArgumentOutOfRangeException(nameof(order)), + }; + } + + RedisValue formattedStart = exclude switch + { + Exclude.Both or Exclude.Start => $"({start}", + _ when sortedSetOrder == SortedSetOrder.ByLex => $"[{start}", + _ => start, + }; + + RedisValue formattedStop = exclude switch + { + Exclude.Both or Exclude.Stop => $"({stop}", + _ when sortedSetOrder == SortedSetOrder.ByLex => $"[{stop}", + _ => stop, + }; + + return order switch + { + Order.Ascending when take != null && take > 0 => + Message.Create(db, flags, RedisCommand.ZRANGESTORE, destinationKey, sourceKey, formattedStart, formattedStop, sortedSetOrder.GetLiteral(), RedisLiterals.LIMIT, skip, take), + Order.Ascending => + Message.Create(db, flags, RedisCommand.ZRANGESTORE, destinationKey, sourceKey, formattedStart, formattedStop, sortedSetOrder.GetLiteral()), + Order.Descending when take != null && take > 0 => + Message.Create(db, flags, RedisCommand.ZRANGESTORE, destinationKey, sourceKey, formattedStart, formattedStop, sortedSetOrder.GetLiteral(), RedisLiterals.REV, RedisLiterals.LIMIT, skip, take), + Order.Descending => + Message.Create(db, flags, RedisCommand.ZRANGESTORE, destinationKey, sourceKey, formattedStart, formattedStop, sortedSetOrder.GetLiteral(), RedisLiterals.REV), + _ => throw new ArgumentOutOfRangeException(nameof(order)), + }; + } + private sealed class SortedSetCombineAndStoreCommandMessage : Message.CommandKeyBase // ZINTERSTORE and ZUNIONSTORE have a very unusual signature { private readonly RedisKey[] keys; @@ -3911,14 +5875,14 @@ private sealed class SortedSetScanResultProcessor : ScanResultProcessor.ScanResult> Default = new SortedSetScanResultProcessor(); private SortedSetScanResultProcessor() { } - protected override SortedSetEntry[] Parse(in RawResult result, out int count) - => SortedSetWithScores.TryParse(result, out SortedSetEntry[] pairs, true, out count) ? pairs : null; + protected override SortedSetEntry[]? Parse(in RawResult result, out int count) + => SortedSetWithScores.TryParse(result, out SortedSetEntry[]? pairs, true, out count) ? pairs : null; } - private class StringGetWithExpiryMessage : Message.CommandKeyBase, IMultiMessage + private sealed class StringGetWithExpiryMessage : Message.CommandKeyBase, IMultiMessage { private readonly RedisCommand ttlCommand; - private IResultBox box; + private IResultBox? box; public StringGetWithExpiryMessage(int db, CommandFlags flags, RedisCommand ttlCommand, in RedisKey key) : base(db, flags, RedisCommand.GET, key) @@ -3926,7 +5890,7 @@ public StringGetWithExpiryMessage(int db, CommandFlags flags, RedisCommand ttlCo this.ttlCommand = ttlCommand; } - public override string CommandAndKey => ttlCommand + "+" + RedisCommand.GET + " " + (string)Key; + public override string CommandAndKey => ttlCommand + "+" + RedisCommand.GET + " " + (string?)Key; public IEnumerable GetMessages(PhysicalConnection connection) { @@ -3938,7 +5902,7 @@ public IEnumerable GetMessages(PhysicalConnection connection) yield return this; } - public bool UnwrapValue(out TimeSpan? value, out Exception ex) + public bool UnwrapValue(out TimeSpan? value, out Exception? ex) { if (box != null) { @@ -3959,13 +5923,13 @@ protected override void WriteImpl(PhysicalConnection physical) public override int ArgCount => 1; } - private class StringGetWithExpiryProcessor : ResultProcessor + private sealed class StringGetWithExpiryProcessor : ResultProcessor { public static readonly ResultProcessor Default = new StringGetWithExpiryProcessor(); private StringGetWithExpiryProcessor() { } protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { - switch (result.Type) + switch (result.Resp2TypeBulkString) { case ResultType.Integer: case ResultType.SimpleString: diff --git a/src/StackExchange.Redis/RedisErrorEventArgs.cs b/src/StackExchange.Redis/RedisErrorEventArgs.cs index 4b34b7a5f..2213baf1c 100644 --- a/src/StackExchange.Redis/RedisErrorEventArgs.cs +++ b/src/StackExchange.Redis/RedisErrorEventArgs.cs @@ -5,15 +5,17 @@ namespace StackExchange.Redis { /// - /// Notification of errors from the redis server + /// Notification of errors from the redis server. /// public class RedisErrorEventArgs : EventArgs, ICompletable { - private readonly EventHandler handler; + private readonly EventHandler? handler; private readonly object sender; internal RedisErrorEventArgs( - EventHandler handler, object sender, - EndPoint endpoint, string message) + EventHandler? handler, + object sender, + EndPoint endpoint, + string message) { this.handler = handler; this.sender = sender; @@ -28,24 +30,21 @@ internal RedisErrorEventArgs( /// Redis endpoint. /// Error message. public RedisErrorEventArgs(object sender, EndPoint endpoint, string message) - : this (null, sender, endpoint, message) + : this(null, sender, endpoint, message) { } /// - /// The origin of the message + /// The origin of the message. /// public EndPoint EndPoint { get; } /// - /// The message from the server + /// The message from the server. /// public string Message { get; } - void ICompletable.AppendStormLog(StringBuilder sb) - { - sb.Append("event, error: ").Append(Message); - } + void ICompletable.AppendStormLog(StringBuilder sb) => sb.Append("event, error: ").Append(Message); bool ICompletable.TryComplete(bool isAsync) => ConnectionMultiplexer.TryCompleteHandler(handler, sender, this, isAsync); } diff --git a/src/StackExchange.Redis/RedisFeatures.cs b/src/StackExchange.Redis/RedisFeatures.cs index 37251e93f..d185089e6 100644 --- a/src/StackExchange.Redis/RedisFeatures.cs +++ b/src/StackExchange.Redis/RedisFeatures.cs @@ -7,10 +7,12 @@ namespace StackExchange.Redis { /// - /// Provides basic information about the features available on a particular version of Redis + /// Provides basic information about the features available on a particular version of Redis. /// - public readonly struct RedisFeatures + public readonly struct RedisFeatures : IEquatable { +#pragma warning disable SA1310 // Field names should not contain underscore +#pragma warning disable SA1311 // Static readonly fields should begin with upper-case letter internal static readonly Version v2_0_0 = new Version(2, 0, 0), v2_1_0 = new Version(2, 1, 0), v2_1_1 = new Version(2, 1, 1), @@ -35,12 +37,27 @@ public readonly struct RedisFeatures v3_2_1 = new Version(3, 2, 1), v4_0_0 = new Version(4, 0, 0), v4_9_1 = new Version(4, 9, 1), // 5.0 RC1 is version 4.9.1; // 5.0 RC1 is version 4.9.1 - v5_0_0 = new Version(5, 0, 0); + v5_0_0 = new Version(5, 0, 0), + v6_0_0 = new Version(6, 0, 0), + v6_0_6 = new Version(6, 0, 6), + v6_2_0 = new Version(6, 2, 0), + v7_0_0_rc1 = new Version(6, 9, 240), // 7.0 RC1 is version 6.9.240 + v7_2_0_rc1 = new Version(7, 1, 240), // 7.2 RC1 is version 7.1.240 + v7_4_0_rc1 = new Version(7, 3, 240), // 7.4 RC1 is version 7.3.240 + v7_4_0_rc2 = new Version(7, 3, 241), // 7.4 RC2 is version 7.3.241 + v7_4_0 = new Version(7, 4, 0), + v8_0_0_M04 = new Version(7, 9, 227), // 8.0 M04 is version 7.9.227 + v8_2_0_rc1 = new Version(8, 1, 240), // 8.2 RC1 is version 8.1.240 + v8_4_0_rc1 = new Version(8, 3, 224), // 8.4 RC1 is version 8.3.224 + v8_6_0 = new Version(8, 6, 0); + +#pragma warning restore SA1310 // Field names should not contain underscore +#pragma warning restore SA1311 // Static readonly fields should begin with upper-case letter private readonly Version version; /// - /// Create a new RedisFeatures instance for the given version + /// Create a new RedisFeatures instance for the given version. /// /// The version of redis to base the feature set on. public RedisFeatures(Version version) @@ -48,190 +65,247 @@ public RedisFeatures(Version version) this.version = version ?? throw new ArgumentNullException(nameof(version)); } +#pragma warning disable SA1629 // Documentation should end with a period + /// - /// Does BITOP / BITCOUNT exist? + /// Are BITOP and BITCOUNT available? /// - public bool BitwiseOperations => Version >= v2_5_10; + public bool BitwiseOperations => Version.IsAtLeast(v2_6_0); /// - /// Is CLIENT SETNAME available? + /// Is CLIENT SETNAME available? /// - public bool ClientName => Version >= v2_6_9; + public bool ClientName => Version.IsAtLeast(v2_6_9); /// - /// Does EXEC support EXECABORT if there are errors? + /// Is CLIENT ID available? /// - public bool ExecAbort => Version >= v2_6_5 && Version != v2_9_5; + public bool ClientId => Version.IsAtLeast(v5_0_0); /// - /// Can EXPIRE be used to set expiration on a key that is already volatile (i.e. has an expiration)? + /// Does EXEC support EXECABORT if there are errors? /// - public bool ExpireOverwrite => Version >= v2_1_3; + public bool ExecAbort => Version.IsAtLeast(v2_6_5) && !Version.IsEqual(v2_9_5); /// - /// Is HSTRLEN available? + /// Can EXPIRE be used to set expiration on a key that is already volatile (i.e. has an expiration)? /// - public bool HashStringLength => Version >= v3_2_0; + public bool ExpireOverwrite => Version.IsAtLeast(v2_1_3); /// - /// Does HDEL support varadic usage? + /// Is GETDEL available? /// - public bool HashVaradicDelete => Version >= v2_4_0; + public bool GetDelete => Version.IsAtLeast(v6_2_0); /// - /// Does INCRBYFLOAT / HINCRBYFLOAT exist? + /// Is HSTRLEN available? /// - public bool IncrementFloat => Version >= v2_5_7; + public bool HashStringLength => Version.IsAtLeast(v3_2_0); /// - /// Does INFO support sections? + /// Does HDEL support variadic usage? /// - public bool InfoSections => Version >= v2_8_0; + public bool HashVaradicDelete => Version.IsAtLeast(v2_4_0); /// - /// Is LINSERT available? + /// Are INCRBYFLOAT and HINCRBYFLOAT available? /// - public bool ListInsert => Version >= v2_1_1; + public bool IncrementFloat => Version.IsAtLeast(v2_6_0); /// - /// Is MEMORY available? + /// Does INFO support sections? /// - public bool Memory => Version >= v4_0_0; + public bool InfoSections => Version.IsAtLeast(v2_8_0); /// - /// Indicates whether PEXPIRE and PTTL are supported + /// Is LINSERT available? /// - public bool MillisecondExpiry => Version >= v2_6_0; + public bool ListInsert => Version.IsAtLeast(v2_1_1); /// - /// Is MODULE available? + /// Is MEMORY available? /// - public bool Module => Version >= v4_0_0; + public bool Memory => Version.IsAtLeast(v4_0_0); /// - /// Does SRANDMEMBER support "count"? + /// Are PEXPIRE and PTTL available? /// - public bool MultipleRandom => Version >= v2_5_14; + public bool MillisecondExpiry => Version.IsAtLeast(v2_6_0); /// - /// Is the PERSIST operation supported? + /// Is MODULE available? /// - public bool Persist => Version >= v2_1_2; + public bool Module => Version.IsAtLeast(v4_0_0); /// - /// Is RPUSHX and LPUSHX available? + /// Does SRANDMEMBER support the "count" option? /// - public bool PushIfNotExists => Version >= v2_1_1; + public bool MultipleRandom => Version.IsAtLeast(v2_5_14); /// - /// Are cursor-based scans available? + /// Is PERSIST available? /// - public bool Scan => Version >= v2_8_0; + public bool Persist => Version.IsAtLeast(v2_1_2); /// - /// Does EVAL / EVALSHA / etc exist? + /// Are LPUSHX and RPUSHX available? /// - public bool Scripting => Version >= v2_5_7; + public bool PushIfNotExists => Version.IsAtLeast(v2_1_1); /// - /// Does SET have the EX|PX|NX|XX extensions? + /// Does this support SORT_RO? /// - public bool SetConditional => Version >= v2_6_12; + internal bool ReadOnlySort => Version.IsAtLeast(v7_0_0_rc1); /// - /// Does SADD support varadic usage? + /// Is SCAN (cursor-based scanning) available? /// - public bool SetVaradicAddRemove => Version >= v2_4_0; + public bool Scan => Version.IsAtLeast(v2_8_0); /// - /// Is ZPOPMAX and ZPOPMIN available? + /// Are EVAL, EVALSHA, and other script commands available? /// - public bool SortedSetPop => Version >= v4_9_1; + public bool Scripting => Version.IsAtLeast(v2_6_0); /// - /// Are Redis Streams available? + /// Does SET support the GET option? /// - public bool Streams => Version >= v4_9_1; + public bool SetAndGet => Version.IsAtLeast(v6_2_0); /// - /// Is STRLEN available? + /// Does SET support the EX, PX, NX, and XX options? /// - public bool StringLength => Version >= v2_1_2; + public bool SetConditional => Version.IsAtLeast(v2_6_12); /// - /// Is SETRANGE available? + /// Does SET have the KEEPTTL option? /// - public bool StringSetRange => Version >= v2_1_8; + public bool SetKeepTtl => Version.IsAtLeast(v6_0_0); /// - /// Is SWAPDB available? + /// Does SET allow the NX and GET options to be used together? /// - public bool SwapDB => Version >= v4_0_0; + public bool SetNotExistsAndGet => Version.IsAtLeast(v7_0_0_rc1); /// - /// Does TIME exist? + /// Does SADD support variadic usage? /// - public bool Time => Version >= v2_6_0; + public bool SetVaradicAddRemove => Version.IsAtLeast(v2_4_0); /// - /// Does UNLINK exist? + /// Are SSUBSCRIBE and SPUBLISH available? /// - public bool Unlink => Version >= v4_0_0; + public bool ShardedPubSub => Version.IsAtLeast(v7_0_0_rc1); /// - /// Are Lua changes to the calling database transparent to the calling client? + /// Are ZPOPMIN and ZPOPMAX available? + /// + public bool SortedSetPop => Version.IsAtLeast(v5_0_0); + + /// + /// Is ZRANGESTORE available? + /// + public bool SortedSetRangeStore => Version.IsAtLeast(v6_2_0); + + /// + /// Are Redis Streams available? /// - public bool ScriptingDatabaseSafe => Version >= v2_8_12; + public bool Streams => Version.IsAtLeast(v4_9_1); /// - /// Is PFCOUNT supported on replicas? + /// Is STRLEN available? /// - [Obsolete("Starting with Redis version 5, Redis has moved to 'replica' terminology. Please use " + nameof(HyperLogLogCountReplicaSafe) + " instead.")] + public bool StringLength => Version.IsAtLeast(v2_1_2); + + /// + /// Is SETRANGE available? + /// + public bool StringSetRange => Version.IsAtLeast(v2_1_8); + + /// + /// Is SWAPDB available? + /// + public bool SwapDB => Version.IsAtLeast(v4_0_0); + + /// + /// Is TIME available? + /// + public bool Time => Version.IsAtLeast(v2_6_0); + + /// + /// Is UNLINK available? + /// + public bool Unlink => Version.IsAtLeast(v4_0_0); + + /// + /// Are Lua changes to the calling database transparent to the calling client? + /// + public bool ScriptingDatabaseSafe => Version.IsAtLeast(v2_8_12); + + /// + [Obsolete("Starting with Redis version 5, Redis has moved to 'replica' terminology. Please use " + nameof(HyperLogLogCountReplicaSafe) + " instead, this will be removed in 3.0.")] [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] public bool HyperLogLogCountSlaveSafe => HyperLogLogCountReplicaSafe; /// - /// Is PFCOUNT supported on replicas? + /// Is PFCOUNT available on replicas? /// - public bool HyperLogLogCountReplicaSafe => Version >= v2_8_18; + public bool HyperLogLogCountReplicaSafe => Version.IsAtLeast(v2_8_18); /// - /// Are the GEO commands available? + /// Are geospatial commands available? /// - public bool Geo => Version >= v3_2_0; + public bool Geo => Version.IsAtLeast(v3_2_0); /// - /// Can PING be used on a subscription connection? + /// Can PING be used on a subscription connection? /// - internal bool PingOnSubscriber => Version >= v3_0_0; + internal bool PingOnSubscriber => Version.IsAtLeast(v3_0_0); /// - /// Does SetPop support popping multiple items? + /// Does SPOP support popping multiple items? /// - public bool SetPopMultiple => Version >= v3_2_0; + public bool SetPopMultiple => Version.IsAtLeast(v3_2_0); /// - /// The Redis version of the server + /// Is TOUCH available? /// - public Version Version => version ?? v2_0_0; + public bool KeyTouch => Version.IsAtLeast(v3_2_1); /// - /// Are the Touch command available? + /// Does the server prefer 'replica' terminology - 'REPLICAOF', etc? /// - public bool KeyTouch => Version >= v3_2_1; + public bool ReplicaCommands => Version.IsAtLeast(v5_0_0); /// - /// Does the server prefer 'replica' terminology - 'REPLICAOF', etc? + /// Do list-push commands support multiple arguments? /// - public bool ReplicaCommands => Version >= v5_0_0; + public bool PushMultiple => Version.IsAtLeast(v4_0_0); /// - /// Do list-push commands support multiple arguments? + /// Is the RESP3 protocol available? + /// + public bool Resp3 => Version.IsAtLeast(v6_0_0); + + /// + /// Are the IF* modifiers on SET available? /// - public bool PushMultiple => Version >= v4_0_0; + public bool SetWithValueCheck => Version.IsAtLeast(v8_4_0_rc1); /// - /// Create a string representation of the available features + /// Are the IF* modifiers on DEL available? + /// + public bool DeleteWithValueCheck => Version.IsAtLeast(v8_4_0_rc1); + +#pragma warning restore 1629 // Documentation text should end with a period. + + /// + /// The Redis version of the server. + /// + public Version Version => version ?? v2_0_0; + + /// + /// Create a string representation of the available features. /// public override string ToString() { @@ -241,7 +315,7 @@ public override string ToString() if (v.Build >= 0) sb.Append('.').Append(v.Build); sb.AppendLine(); object boxed = this; - foreach(var prop in s_props) + foreach (var prop in s_props) { sb.Append(prop.Name).Append(": ").Append(prop.GetValue(boxed)).AppendLine(); } @@ -258,10 +332,65 @@ orderby prop.Name /// Returns the hash code for this instance. /// A 32-bit signed integer that is the hash code for this instance. - public override int GetHashCode() => Version.GetHashCode(); - /// Indicates whether this instance and a specified object are equal. - /// true if and this instance are the same type and represent the same value; otherwise, false. - /// The object to compare with the current instance. - public override bool Equals(object obj) => obj is RedisFeatures f && f.Version == Version; + public override int GetHashCode() => Version.GetNormalizedHashCode(); + + /// + /// Indicates whether this instance and a specified object are equal. + /// + /// + /// if and this instance are the same type and represent the same value, otherwise. + /// + /// The object to compare with the current instance. + public override bool Equals(object? obj) => obj is RedisFeatures f && f.Version.IsEqual(Version); + + /// + /// Indicates whether this instance and a specified object are equal. + /// + /// + /// if and this instance are the same type and represent the same value, otherwise. + /// + /// The object to compare with the current instance. + public bool Equals(RedisFeatures other) => other.Version.IsEqual(Version); + + /// + /// Checks if 2 are .Equal(). + /// + public static bool operator ==(RedisFeatures left, RedisFeatures right) => left.Version.IsEqual(right.Version); + + /// + /// Checks if 2 are not .Equal(). + /// + public static bool operator !=(RedisFeatures left, RedisFeatures right) => !left.Version.IsEqual(right.Version); + } +} + +internal static class VersionExtensions +{ + // normalize two version parts and smash them together into a long; if either part is -ve, + // zero is used instead; this gives us consistent ordering following "long" rules + private static long ComposeMajorMinor(Version version) // always specified + => (((long)version.Major) << 32) | (long)version.Minor; + + private static long ComposeBuildRevision(Version version) // can be -ve for "not specified" + { + int build = version.Build, revision = version.Revision; + return (((long)(build < 0 ? 0 : build)) << 32) | (long)(revision < 0 ? 0 : revision); + } + + internal static int GetNormalizedHashCode(this Version value) + => (ComposeMajorMinor(value) * ComposeBuildRevision(value)).GetHashCode(); + + internal static bool IsEqual(this Version x, Version y) + => ComposeMajorMinor(x) == ComposeMajorMinor(y) + && ComposeBuildRevision(x) == ComposeBuildRevision(y); + + internal static bool IsAtLeast(this Version x, Version y) + { + // think >=, but: without the... "unusual behaviour" in how Version's >= operator + // compares values with different part lengths, i.e. "6.0" **is not** >= "6.0.0" + // under the inbuilt operator + var delta = ComposeMajorMinor(x) - ComposeMajorMinor(y); + if (delta > 0) return true; + return delta < 0 ? false : ComposeBuildRevision(x) >= ComposeBuildRevision(y); } } diff --git a/src/StackExchange.Redis/RedisKey.cs b/src/StackExchange.Redis/RedisKey.cs index cd409caa9..2d192c244 100644 --- a/src/StackExchange.Redis/RedisKey.cs +++ b/src/StackExchange.Redis/RedisKey.cs @@ -1,14 +1,17 @@ using System; +using System.Buffers; +using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; using System.Text; namespace StackExchange.Redis { /// - /// Represents a key that can be stored in redis + /// Represents a key that can be stored in redis. /// public readonly struct RedisKey : IEquatable { - internal RedisKey(byte[] keyPrefix, object keyValue) + internal RedisKey(byte[]? keyPrefix, object? keyValue) { KeyPrefix = keyPrefix?.Length == 0 ? null : keyPrefix; KeyValue = keyValue; @@ -17,12 +20,14 @@ internal RedisKey(byte[] keyPrefix, object keyValue) /// /// Creates a from a string. /// - public RedisKey(string key) : this(null, key) { } + public RedisKey(string? key) : this(null, key) { } - internal RedisKey AsPrefix() => new RedisKey((byte[])this, null); + internal RedisKey AsPrefix() => new RedisKey((byte[]?)this, null); internal bool IsNull => KeyPrefix == null && KeyValue == null; + internal static RedisKey Null { get; } = new RedisKey(null, null); + internal bool IsEmpty { get @@ -34,136 +39,190 @@ internal bool IsEmpty } } - internal byte[] KeyPrefix { get; } - internal object KeyValue { get; } + internal byte[]? KeyPrefix { get; } + internal object? KeyValue { get; } /// - /// Indicate whether two keys are not equal + /// Indicate whether two keys are not equal. /// /// The first to compare. /// The second to compare. - public static bool operator !=(RedisKey x, RedisKey y) => !(x == y); + public static bool operator !=(RedisKey x, RedisKey y) => !x.EqualsImpl(in y); /// - /// Indicate whether two keys are not equal + /// Indicate whether two keys are not equal. /// /// The first to compare. /// The second to compare. - public static bool operator !=(string x, RedisKey y) => !(x == y); + public static bool operator !=(string x, RedisKey y) => !y.EqualsImpl(new RedisKey(x)); /// - /// Indicate whether two keys are not equal + /// Indicate whether two keys are not equal. /// /// The first to compare. /// The second to compare. - public static bool operator !=(byte[] x, RedisKey y) => !(x == y); + public static bool operator !=(byte[] x, RedisKey y) => !y.EqualsImpl(new RedisKey(null, x)); /// - /// Indicate whether two keys are not equal + /// Indicate whether two keys are not equal. /// /// The first to compare. /// The second to compare. - public static bool operator !=(RedisKey x, string y) => !(x == y); + public static bool operator !=(RedisKey x, string y) => !x.EqualsImpl(new RedisKey(y)); /// - /// Indicate whether two keys are not equal + /// Indicate whether two keys are not equal. /// /// The first to compare. /// The second to compare. - public static bool operator !=(RedisKey x, byte[] y) => !(x == y); + public static bool operator !=(RedisKey x, byte[] y) => !x.EqualsImpl(new RedisKey(null, y)); /// - /// Indicate whether two keys are equal + /// Indicate whether two keys are equal. /// /// The first to compare. /// The second to compare. - public static bool operator ==(RedisKey x, RedisKey y) => CompositeEquals(x.KeyPrefix, x.KeyValue, y.KeyPrefix, y.KeyValue); + public static bool operator ==(RedisKey x, RedisKey y) => x.EqualsImpl(in y); /// - /// Indicate whether two keys are equal + /// Indicate whether two keys are equal. /// /// The first to compare. /// The second to compare. - public static bool operator ==(string x, RedisKey y) => CompositeEquals(null, x, y.KeyPrefix, y.KeyValue); + public static bool operator ==(string x, RedisKey y) => y.EqualsImpl(new RedisKey(x)); /// - /// Indicate whether two keys are equal + /// Indicate whether two keys are equal. /// /// The first to compare. /// The second to compare. - public static bool operator ==(byte[] x, RedisKey y) => CompositeEquals(null, x, y.KeyPrefix, y.KeyValue); + public static bool operator ==(byte[] x, RedisKey y) => y.EqualsImpl(new RedisKey(null, x)); /// - /// Indicate whether two keys are equal + /// Indicate whether two keys are equal. /// /// The first to compare. /// The second to compare. - public static bool operator ==(RedisKey x, string y) => CompositeEquals(x.KeyPrefix, x.KeyValue, null, y); + public static bool operator ==(RedisKey x, string y) => x.EqualsImpl(new RedisKey(y)); /// - /// Indicate whether two keys are equal + /// Indicate whether two keys are equal. /// /// The first to compare. /// The second to compare. - public static bool operator ==(RedisKey x, byte[] y) => CompositeEquals(x.KeyPrefix, x.KeyValue, null, y); + public static bool operator ==(RedisKey x, byte[] y) => x.EqualsImpl(new RedisKey(null, y)); /// - /// See Object.Equals + /// See . /// /// The to compare to. - public override bool Equals(object obj) + public override bool Equals(object? obj) => obj switch { - if (obj is RedisKey other) - { - return CompositeEquals(KeyPrefix, KeyValue, other.KeyPrefix, other.KeyValue); - } - if (obj is string || obj is byte[]) - { - return CompositeEquals(KeyPrefix, KeyValue, null, obj); - } - return false; - } + null => IsNull, + RedisKey key => EqualsImpl(in key), + string s => EqualsImpl(new RedisKey(s)), + byte[] b => EqualsImpl(new RedisKey(null, b)), + _ => false, + }; /// - /// Indicate whether two keys are equal + /// Indicate whether two keys are equal. /// /// The to compare to. - public bool Equals(RedisKey other) => CompositeEquals(KeyPrefix, KeyValue, other.KeyPrefix, other.KeyValue); + public bool Equals(RedisKey other) => EqualsImpl(in other); - private static bool CompositeEquals(byte[] keyPrefix0, object keyValue0, byte[] keyPrefix1, object keyValue1) + private bool EqualsImpl(in RedisKey other) { - if (RedisValue.Equals(keyPrefix0, keyPrefix1)) + if (IsNull) + { + return other.IsNull; + } + else if (other.IsNull) { - if (keyValue0 == keyValue1) return true; // ref equal - if (keyValue0 == null || keyValue1 == null) return false; // null vs non-null + return false; + } + + // if there's no prefix, we might be able to do a simple compare + if (RedisValue.Equals(KeyPrefix, other.KeyPrefix)) + { + if ((object?)KeyValue == (object?)other.KeyValue) return true; // ref equal + + if (KeyValue is string keyString1 && other.KeyValue is string keyString2) return keyString1 == keyString2; + if (KeyValue is byte[] keyBytes1 && other.KeyValue is byte[] keyBytes2) return RedisValue.Equals(keyBytes1, keyBytes2); + } - if (keyValue0 is string && keyValue1 is string) return ((string)keyValue0) == ((string)keyValue1); - if (keyValue0 is byte[] && keyValue1 is byte[]) return RedisValue.Equals((byte[])keyValue0, (byte[])keyValue1); + int len = TotalLength(); + if (len != other.TotalLength()) + { + return false; // different length; can't be equal + } + if (len == 0) + { + return true; // both empty + } + if (len <= 128) + { + return CopyCompare(in this, in other, len, stackalloc byte[len * 2]); + } + else + { + byte[] arr = ArrayPool.Shared.Rent(len * 2); + var result = CopyCompare(in this, in other, len, arr); + ArrayPool.Shared.Return(arr); + return result; } - return RedisValue.Equals(ConcatenateBytes(keyPrefix0, keyValue0, null), ConcatenateBytes(keyPrefix1, keyValue1, null)); + static bool CopyCompare(in RedisKey x, in RedisKey y, int length, Span span) + { + Span span1 = span.Slice(0, length), span2 = span.Slice(length, length); + var written = x.CopyTo(span1); + Debug.Assert(written == length, "length error (1)"); + written = y.CopyTo(span2); + Debug.Assert(written == length, "length error (2)"); + return span1.SequenceEqual(span2); + } } - /// - /// See Object.GetHashCode - /// + /// public override int GetHashCode() { - int chk0 = KeyPrefix == null ? 0 : RedisValue.GetHashCode(KeyPrefix), - chk1 = KeyValue is string ? KeyValue.GetHashCode() : RedisValue.GetHashCode((byte[])KeyValue); + // note that we need need eaulity-like behavior, regardless of whether the + // parts look like bytes or strings, and with/without prefix + + // the simplest way to do this is to use the CopyTo version, which normalizes that + if (IsNull) return -1; + if (TryGetSimpleBuffer(out var buffer)) return RedisValue.GetHashCode(buffer); + var len = TotalLength(); + if (len == 0) return 0; - return unchecked((17 * chk0) + chk1); + if (len <= 256) + { + Span span = stackalloc byte[len]; + var written = CopyTo(span); + Debug.Assert(written == len); + return RedisValue.GetHashCode(span); + } + else + { + var arr = ArrayPool.Shared.Rent(len); + var span = new Span(arr, 0, len); + var written = CopyTo(span); + Debug.Assert(written == len); + var result = RedisValue.GetHashCode(span); + ArrayPool.Shared.Return(arr); + return result; + } } /// - /// Obtains a string representation of the key + /// Obtains a string representation of the key. /// - public override string ToString() => ((string)this) ?? "(null)"; + public override string ToString() => ((string?)this) ?? "(null)"; internal RedisValue AsRedisValue() { - if (KeyPrefix == null && KeyValue is string) return (string)KeyValue; - return (byte[])this; + if (KeyPrefix == null && KeyValue is string keyString) return keyString; + return (byte[]?)this; } internal void AssertNotNull() @@ -175,18 +234,19 @@ internal void AssertNotNull() /// Create a from a . /// /// The string to get a key from. - public static implicit operator RedisKey(string key) + public static implicit operator RedisKey(string? key) { - if (key == null) return default(RedisKey); + if (key == null) return default; return new RedisKey(null, key); } + /// /// Create a from a . /// /// The byte array to get a key from. - public static implicit operator RedisKey(byte[] key) + public static implicit operator RedisKey(byte[]? key) { - if (key == null) return default(RedisKey); + if (key == null) return default; return new RedisKey(null, key); } @@ -194,50 +254,71 @@ public static implicit operator RedisKey(byte[] key) /// Obtain the as a . /// /// The key to get a byte array for. - public static implicit operator byte[] (RedisKey key) => ConcatenateBytes(key.KeyPrefix, key.KeyValue, null); + public static implicit operator byte[]?(RedisKey key) + { + if (key.IsNull) return null; + if (key.TryGetSimpleBuffer(out var arr)) return arr; + + var len = key.TotalLength(); + if (len == 0) return Array.Empty(); + arr = new byte[len]; + var written = key.CopyTo(arr); + Debug.Assert(written == len, "length/copyto error"); + return arr; + } /// /// Obtain the key as a . /// /// The key to get a string for. - public static implicit operator string(RedisKey key) + public static implicit operator string?(RedisKey key) { - byte[] arr; - if (key.KeyPrefix == null) + if (key.KeyPrefix is null) { - if (key.KeyValue == null) return null; + return key.KeyValue switch + { + null => null, + string s => s, + object o => Get((byte[])o, -1), + }; + } - if (key.KeyValue is string) return (string)key.KeyValue; + var len = key.TotalLength(); + var arr = ArrayPool.Shared.Rent(len); + var written = key.CopyTo(arr); + Debug.Assert(written == len, "length error"); + var result = Get(arr, len); + ArrayPool.Shared.Return(arr); + return result; - arr = (byte[])key.KeyValue; - } - else - { - arr = (byte[])key; - } - if (arr == null) return null; - try - { - return Encoding.UTF8.GetString(arr); - } - catch + static string? Get(byte[] arr, int length) { - return BitConverter.ToString(arr); + if (length == -1) length = arr.Length; + if (length == 0) return ""; + try + { + return Encoding.UTF8.GetString(arr, 0, length); + } + catch (Exception e) when // Only catch exception throwed by Encoding.UTF8.GetString + (e is DecoderFallbackException + || e is ArgumentException + || e is ArgumentNullException) + { + return BitConverter.ToString(arr, 0, length); + } } } /// - /// Concatenate two keys + /// Concatenate two keys. /// /// The first to add. /// The second to add. - [Obsolete] - public static RedisKey operator +(RedisKey x, RedisKey y) - { - return new RedisKey(ConcatenateBytes(x.KeyPrefix, x.KeyValue, y.KeyPrefix), y.KeyValue); - } + [Obsolete("Prefer WithPrefix")] + public static RedisKey operator +(RedisKey x, RedisKey y) => + new RedisKey(ConcatenateBytes(x.KeyPrefix, x.KeyValue, y.KeyPrefix), y.KeyValue); - internal static RedisKey WithPrefix(byte[] prefix, RedisKey value) + internal static RedisKey WithPrefix(byte[]? prefix, RedisKey value) { if (prefix == null || prefix.Length == 0) return value; if (value.KeyPrefix == null) return new RedisKey(prefix, value.KeyValue); @@ -250,7 +331,7 @@ internal static RedisKey WithPrefix(byte[] prefix, RedisKey value) return new RedisKey(copy, value.KeyValue); } - internal static byte[] ConcatenateBytes(byte[] a, object b, byte[] c) + internal static byte[]? ConcatenateBytes(byte[]? a, object? b, byte[]? c) { if ((a == null || a.Length == 0) && (c == null || c.Length == 0)) { @@ -260,13 +341,13 @@ internal static byte[] ConcatenateBytes(byte[] a, object b, byte[] c) } int aLen = a?.Length ?? 0, - bLen = b == null ? 0 : (b is string - ? Encoding.UTF8.GetByteCount((string)b) + bLen = b == null ? 0 : (b is string bString + ? Encoding.UTF8.GetByteCount(bString) : ((byte[])b).Length), cLen = c?.Length ?? 0; var result = new byte[aLen + bLen + cLen]; - if (aLen != 0) Buffer.BlockCopy(a, 0, result, 0, aLen); + if (aLen != 0) Buffer.BlockCopy(a!, 0, result, 0, aLen); if (bLen != 0) { if (b is string s) @@ -275,18 +356,17 @@ internal static byte[] ConcatenateBytes(byte[] a, object b, byte[] c) } else { - Buffer.BlockCopy((byte[])b, 0, result, aLen, bLen); + Buffer.BlockCopy((byte[])b!, 0, result, aLen, bLen); } } - if (cLen != 0) Buffer.BlockCopy(c, 0, result, aLen + bLen, cLen); + if (cLen != 0) Buffer.BlockCopy(c!, 0, result, aLen + bLen, cLen); return result; } /// /// Prepends p to this RedisKey, returning a new RedisKey. /// - /// Avoids some allocations if possible, repeated Prepend/Appends make - /// it less possible. + /// Avoids some allocations if possible, repeated Prepend/Appends make it less possible. /// /// /// The prefix to prepend. @@ -295,11 +375,73 @@ internal static byte[] ConcatenateBytes(byte[] a, object b, byte[] c) /// /// Appends p to this RedisKey, returning a new RedisKey. /// - /// Avoids some allocations if possible, repeated Prepend/Appends make - /// it less possible. + /// Avoids some allocations if possible, repeated Prepend/Appends make it less possible. /// /// /// The suffix to append. public RedisKey Append(RedisKey suffix) => WithPrefix(this, suffix); + + internal bool TryGetSimpleBuffer([NotNullWhen(true)] out byte[]? arr) + { + arr = KeyValue is null ? Array.Empty() : KeyValue as byte[]; + return arr is not null && (KeyPrefix is null || KeyPrefix.Length == 0); + } + + internal int TotalLength() => + (KeyPrefix is null ? 0 : KeyPrefix.Length) + KeyValue switch + { + null => 0, + string s => Encoding.UTF8.GetByteCount(s), + _ => ((byte[])KeyValue).Length, + }; + + internal int MaxByteCount() => + (KeyPrefix is null ? 0 : KeyPrefix.Length) + KeyValue switch + { + null => 0, + string s => Encoding.UTF8.GetMaxByteCount(s.Length), + _ => ((byte[])KeyValue).Length, + }; + + internal int CopyTo(Span destination) + { + int written = 0; + if (KeyPrefix is not null && KeyPrefix.Length != 0) + { + KeyPrefix.CopyTo(destination); + written += KeyPrefix.Length; + destination = destination.Slice(KeyPrefix.Length); + } + switch (KeyValue) + { + case null: + break; // nothing to do + case string s: + if (s.Length != 0) + { +#if NET + written += Encoding.UTF8.GetBytes(s, destination); +#else + unsafe + { + fixed (byte* bPtr = destination) + { + fixed (char* cPtr = s) + { + written += Encoding.UTF8.GetBytes(cPtr, s.Length, bPtr, destination.Length); + } + } + } +#endif + } + break; + default: + var arr = (byte[])KeyValue; + arr.CopyTo(destination); + written += arr.Length; + break; + } + return written; + } } } diff --git a/src/StackExchange.Redis/RedisLiterals.cs b/src/StackExchange.Redis/RedisLiterals.cs index d0806bd9c..9a8f54971 100644 --- a/src/StackExchange.Redis/RedisLiterals.cs +++ b/src/StackExchange.Redis/RedisLiterals.cs @@ -1,14 +1,17 @@ using System; -using System.Text; namespace StackExchange.Redis { - internal static class CommonReplies +#pragma warning disable SA1310 // Field names should not contain underscore +#pragma warning disable SA1311 // Static readonly fields should begin with upper-case letter + internal static partial class CommonReplies { public static readonly CommandBytes ASK = "ASK ", authFail_trimmed = CommandBytes.TrimToFit("ERR operation not permitted"), backgroundSavingStarted_trimmed = CommandBytes.TrimToFit("Background saving started"), + backgroundSavingAOFStarted_trimmed = + CommandBytes.TrimToFit("Background append only file rewriting started"), databases = "databases", loading = "LOADING ", MOVED = "MOVED ", @@ -24,94 +27,145 @@ public static readonly CommandBytes slave_read_only = "slave-read-only", timeout = "timeout", wildcard = "*", + WRONGPASS = "WRONGPASS", yes = "yes", zero = "0", - // streams - length = "length", - radixTreeKeys = "radix-tree-keys", - radixTreeNodes = "radix-tree-nodes", - groups = "groups", - lastGeneratedId = "last-generated-id", - firstEntry = "first-entry", - lastEntry = "last-entry"; + // HELLO + version = "version", + proto = "proto", + role = "role", + mode = "mode", + id = "id"; } + internal static class RedisLiterals { // unlike primary commands, these do not get altered by the command-map; we may as // well compute the bytes once and share them public static readonly RedisValue + ACLCAT = "ACLCAT", ADDR = "ADDR", AFTER = "AFTER", AGGREGATE = "AGGREGATE", ALPHA = "ALPHA", AND = "AND", + ANDOR = "ANDOR", + ANY = "ANY", + ASC = "ASC", BEFORE = "BEFORE", + BIT = "BIT", BY = "BY", + BYLEX = "BYLEX", + BYSCORE = "BYSCORE", + BYTE = "BYTE", + CH = "CH", CHANNELS = "CHANNELS", - COPY = "COPY", COUNT = "COUNT", + DB = "DB", + @default = "default", DESC = "DESC", + DIFF = "DIFF", + DIFF1 = "DIFF1", DOCTOR = "DOCTOR", + ENCODING = "ENCODING", EX = "EX", + EXAT = "EXAT", EXISTS = "EXISTS", + FIELDS = "FIELDS", + FILTERBY = "FILTERBY", FLUSH = "FLUSH", + FNX = "FNX", + FREQ = "FREQ", + FXX = "FXX", GET = "GET", + GETKEYS = "GETKEYS", GETNAME = "GETNAME", + GT = "GT", HISTORY = "HISTORY", ID = "ID", + IDX = "IDX", IDLETIME = "IDLETIME", + IDMP = "IDMP", + IDMPAUTO = "IDMPAUTO", + IDMP_DURATION = "IDMP-DURATION", + IDMP_MAXSIZE = "IDMP-MAXSIZE", + KEEPTTL = "KEEPTTL", KILL = "KILL", + LADDR = "LADDR", LATEST = "LATEST", + LEFT = "LEFT", + LEN = "LEN", + lib_name = "lib-name", + lib_ver = "lib-ver", LIMIT = "LIMIT", LIST = "LIST", - LOAD = "LOAD", + LT = "LT", MATCH = "MATCH", MALLOC_STATS = "MALLOC-STATS", MAX = "MAX", + MAXAGE = "MAXAGE", + MAXLEN = "MAXLEN", MIN = "MIN", + MINMATCHLEN = "MINMATCHLEN", + MODULE = "MODULE", NODES = "NODES", NOSAVE = "NOSAVE", NOT = "NOT", + NOVALUES = "NOVALUES", NUMPAT = "NUMPAT", NUMSUB = "NUMSUB", NX = "NX", OBJECT = "OBJECT", + ONE = "ONE", OR = "OR", + PATTERN = "PATTERN", PAUSE = "PAUSE", + PERSIST = "PERSIST", PING = "PING", PURGE = "PURGE", PX = "PX", + PXAT = "PXAT", + RANK = "RANK", + REFCOUNT = "REFCOUNT", REPLACE = "REPLACE", RESET = "RESET", RESETSTAT = "RESETSTAT", + REV = "REV", REWRITE = "REWRITE", + RIGHT = "RIGHT", SAVE = "SAVE", SEGFAULT = "SEGFAULT", SET = "SET", + SETINFO = "SETINFO", SETNAME = "SETNAME", SKIPME = "SKIPME", STATS = "STATS", + STOP = "STOP", STORE = "STORE", TYPE = "TYPE", + USERNAME = "USERNAME", WEIGHTS = "WEIGHTS", + WITHMATCHLEN = "WITHMATCHLEN", WITHSCORES = "WITHSCORES", + WITHVALUES = "WITHVALUES", XOR = "XOR", XX = "XX", // Sentinel Literals MASTERS = "MASTERS", MASTER = "MASTER", + REPLICAS = "REPLICAS", SLAVES = "SLAVES", GETMASTERADDRBYNAME = "GET-MASTER-ADDR-BY-NAME", - // RESET = "RESET", + // RESET = "RESET", FAILOVER = "FAILOVER", SENTINELS = "SENTINELS", // Sentinel Literals as of 2.8.4 MONITOR = "MONITOR", REMOVE = "REMOVE", - // SET = "SET", + // SET = "SET", // replication states connect = "connect", @@ -122,9 +176,25 @@ public static readonly RedisValue sync = "sync", MinusSymbol = "-", - PlusSumbol = "+", + PlusSymbol = "+", Wildcard = "*", + // Geo Radius/Search Literals + BYBOX = "BYBOX", + BYRADIUS = "BYRADIUS", + FROMMEMBER = "FROMMEMBER", + FROMLONLAT = "FROMLONLAT", + STOREDIST = "STOREDIST", + WITHCOORD = "WITHCOORD", + WITHDIST = "WITHDIST", + WITHHASH = "WITHHASH", + + // geo units + ft = "ft", + km = "km", + m = "m", + mi = "mi", + // misc (config, etc) databases = "databases", master = "master", @@ -141,16 +211,19 @@ public static readonly RedisValue timeout = "timeout", yes = "yes"; - internal static RedisValue Get(Bitwise operation) + internal static RedisValue Get(Bitwise operation) => operation switch { - switch (operation) - { - case Bitwise.And: return AND; - case Bitwise.Or: return OR; - case Bitwise.Xor: return XOR; - case Bitwise.Not: return NOT; - default: throw new ArgumentOutOfRangeException(nameof(operation)); - } - } + Bitwise.And => AND, + Bitwise.Or => OR, + Bitwise.Xor => XOR, + Bitwise.Not => NOT, + Bitwise.Diff => DIFF, + Bitwise.Diff1 => DIFF1, + Bitwise.AndOr => ANDOR, + Bitwise.One => ONE, + _ => throw new ArgumentOutOfRangeException(nameof(operation)), + }; } +#pragma warning restore SA1310 // Field names should not contain underscore +#pragma warning restore SA1311 // Static readonly fields should begin with upper-case letter } diff --git a/src/StackExchange.Redis/RedisProtocol.cs b/src/StackExchange.Redis/RedisProtocol.cs new file mode 100644 index 000000000..077671bd6 --- /dev/null +++ b/src/StackExchange.Redis/RedisProtocol.cs @@ -0,0 +1,22 @@ +namespace StackExchange.Redis; + +/// +/// Indicates the protocol for communicating with the server. +/// +public enum RedisProtocol +{ + // note: the non-binary safe protocol is not supported by the client, although the parser does support it (it is used in the toy server) + + // important: please use "major_minor_revision" numbers (two digit minor/revision), to allow for possible scenarios like + // "hey, we've added RESP 3.1; oops, we've added RESP 3.1.1" + + /// + /// The protocol used by all redis server versions since 1.2, as defined by https://github.com/redis/redis-specifications/blob/master/protocol/RESP2.md. + /// + Resp2 = 2_00_00, // major__minor__revision + + /// + /// Opt-in variant introduced in server version 6, as defined by https://github.com/redis/redis-specifications/blob/master/protocol/RESP3.md. + /// + Resp3 = 3_00_00, // major__minor__revision +} diff --git a/src/StackExchange.Redis/RedisResult.cs b/src/StackExchange.Redis/RedisResult.cs index 5ac1735e2..4a1644c36 100644 --- a/src/StackExchange.Redis/RedisResult.cs +++ b/src/StackExchange.Redis/RedisResult.cs @@ -1,20 +1,31 @@ using System; using System.Collections.Generic; +using System.ComponentModel; +using System.Diagnostics.CodeAnalysis; using System.Runtime.CompilerServices; namespace StackExchange.Redis { /// - /// Represents a general-purpose result from redis, that may be cast into various anticipated types + /// Represents a general-purpose result from redis, that may be cast into various anticipated types. /// public abstract class RedisResult { + /// + /// Do not use. + /// + [Obsolete("Please specify a result type", true)] // retained purely for binary compat + public RedisResult() : this(default) { } + + internal RedisResult(ResultType resultType) => Resp3Type = resultType; + /// /// Create a new RedisResult representing a single value. /// /// The to create a result from. - /// The type of result being represented + /// The type of result being represented. /// new . + [SuppressMessage("ApiDesign", "RS0027:Public API with optional parameter(s) should have the most parameters amongst its public overloads", Justification = "Legacy compat.")] public static RedisResult Create(RedisValue value, ResultType? resultType = null) => new SingleRedisResult(value, resultType); /// @@ -22,9 +33,18 @@ public abstract class RedisResult /// /// The s to create a result from. /// new . - public static RedisResult Create(RedisValue[] values) => - values == null ? NullArray : values.Length == 0 ? EmptyArray : - new ArrayRedisResult(Array.ConvertAll(values, value => new SingleRedisResult(value, null))); + public static RedisResult Create(RedisValue[] values) + => Create(values, ResultType.Array); + + /// + /// Create a new RedisResult representing an array of values. + /// + /// The s to create a result from. + /// The explicit data type. + /// new . + public static RedisResult Create(RedisValue[] values, ResultType resultType) => + values == null ? NullArray : values.Length == 0 ? EmptyArray(resultType) : + new ArrayRedisResult(Array.ConvertAll(values, value => new SingleRedisResult(value, null)), resultType); /// /// Create a new RedisResult representing an array of values. @@ -32,64 +52,135 @@ public static RedisResult Create(RedisValue[] values) => /// The s to create a result from. /// new . public static RedisResult Create(RedisResult[] values) - => values == null ? NullArray : values.Length == 0 ? EmptyArray : new ArrayRedisResult(values); + => Create(values, ResultType.Array); + + /// + /// Create a new RedisResult representing an array of values. + /// + /// The s to create a result from. + /// The explicit data type. + /// new . + public static RedisResult Create(RedisResult[] values, ResultType resultType) + => values == null ? NullArray : values.Length == 0 ? EmptyArray(resultType) : new ArrayRedisResult(values, resultType); + + /// + /// An empty array result. + /// + internal static RedisResult EmptyArray(ResultType type) => type switch + { + ResultType.Array => s_EmptyArray ??= new ArrayRedisResult(Array.Empty(), type), + ResultType.Set => s_EmptySet ??= new ArrayRedisResult(Array.Empty(), type), + ResultType.Map => s_EmptyMap ??= new ArrayRedisResult(Array.Empty(), type), + _ => new ArrayRedisResult(Array.Empty(), type), + }; + + private static RedisResult? s_EmptyArray, s_EmptySet, s_EmptyMap; + + /// + /// A null array result. + /// + internal static RedisResult NullArray { get; } = new ArrayRedisResult(null, ResultType.Null); /// - /// An empty array result + /// A null single result, to use as a default for invalid returns. /// - internal static RedisResult EmptyArray { get; } = new ArrayRedisResult(Array.Empty()); + internal static RedisResult NullSingle { get; } = new SingleRedisResult(RedisValue.Null, ResultType.Null); /// - /// A null array result + /// Gets the number of elements in this item if it is a valid array, or -1 otherwise. /// - internal static RedisResult NullArray { get; } = new ArrayRedisResult(null); + public virtual int Length => -1; + + /// + public sealed override string ToString() => ToString(out _) ?? ""; - // internally, this is very similar to RawResult, except it is designed to be usable - // outside of the IO-processing pipeline: the buffers are standalone, etc + /// + /// Gets the string content as per , but also obtains the declared type from verbatim strings (for example LATENCY DOCTOR). + /// + /// The type of the returned string. + /// The content. + public abstract string? ToString(out string? type); - internal static RedisResult TryCreate(PhysicalConnection connection, in RawResult result) + /// + /// Internally, this is very similar to RawResult, except it is designed to be usable, + /// outside of the IO-processing pipeline: the buffers are standalone, etc. + /// + internal static bool TryCreate(PhysicalConnection? connection, in RawResult result, [NotNullWhen(true)] out RedisResult? redisResult) { try { - switch (result.Type) + switch (result.Resp2TypeBulkString) { case ResultType.Integer: case ResultType.SimpleString: case ResultType.BulkString: - return new SingleRedisResult(result.AsRedisValue(), result.Type); - case ResultType.MultiBulk: - if (result.IsNull) return NullArray; + redisResult = new SingleRedisResult(result.AsRedisValue(), result.Resp3Type); + return true; + case ResultType.Array: + if (result.IsNull) + { + redisResult = NullArray; + return true; + } var items = result.GetItems(); - if (items.Length == 0) return EmptyArray; + if (items.Length == 0) + { + redisResult = EmptyArray(result.Resp3Type); + return true; + } var arr = new RedisResult[items.Length]; int i = 0; foreach (ref RawResult item in items) { - var next = TryCreate(connection, in item); - if (next == null) return null; // means we didn't understand - arr[i++] = next; + if (TryCreate(connection, in item, out var next)) + { + arr[i++] = next; + } + else + { + redisResult = null; + return false; + } } - return new ArrayRedisResult(arr); + redisResult = new ArrayRedisResult(arr, result.Resp3Type); + return true; case ResultType.Error: - return new ErrorRedisResult(result.GetString()); + redisResult = new ErrorRedisResult(result.GetString(), result.Resp3Type); + return true; default: - return null; + redisResult = null; + return false; } } catch (Exception ex) { connection?.OnInternalError(ex); - return null; // will be logged as a protocol fail by the processor + redisResult = null; + return false; // will be logged as a protocol fail by the processor } } /// - /// Indicate the type of result that was received from redis + /// Indicate the type of result that was received from redis, in RESP2 terms. /// - public abstract ResultType Type { get; } + [Obsolete($"Please use either {nameof(Resp2Type)} (simplified) or {nameof(Resp3Type)} (full)")] + [Browsable(false), EditorBrowsable(EditorBrowsableState.Never)] + public ResultType Type => Resp2Type; /// - /// Indicates whether this result was a null result + /// Indicate the type of result that was received from redis, in RESP3 terms. + /// + public ResultType Resp3Type { get; } + + /// + /// Indicate the type of result that was received from redis, in RESP2 terms. + /// + public ResultType Resp2Type => Resp3Type == ResultType.Null ? Resp2NullType : Resp3Type.ToResp2(); + + internal virtual ResultType Resp2NullType => ResultType.BulkString; + + /// + /// Indicates whether this result was a null result. /// public abstract bool IsNull { get; } @@ -97,306 +188,349 @@ internal static RedisResult TryCreate(PhysicalConnection connection, in RawResul /// Interprets the result as a . /// /// The result to convert to a . - public static explicit operator string(RedisResult result) => result?.AsString(); + public static explicit operator string?(RedisResult? result) => result?.AsString(); + /// /// Interprets the result as a . /// /// The result to convert to a . - public static explicit operator byte[](RedisResult result) => result?.AsByteArray(); + public static explicit operator byte[]?(RedisResult? result) => result?.AsByteArray(); + /// /// Interprets the result as a . /// /// The result to convert to a . public static explicit operator double(RedisResult result) => result.AsDouble(); + /// /// Interprets the result as an . /// /// The result to convert to a . public static explicit operator long(RedisResult result) => result.AsInt64(); + /// /// Interprets the result as an . /// /// The result to convert to a . [CLSCompliant(false)] public static explicit operator ulong(RedisResult result) => result.AsUInt64(); + /// /// Interprets the result as an . /// /// The result to convert to a . public static explicit operator int(RedisResult result) => result.AsInt32(); + /// - /// Interprets the result as a + /// Interprets the result as a . /// /// The result to convert to a . public static explicit operator bool(RedisResult result) => result.AsBoolean(); + /// /// Interprets the result as a . /// /// The result to convert to a . - public static explicit operator RedisValue(RedisResult result) => result?.AsRedisValue() ?? RedisValue.Null; + public static explicit operator RedisValue(RedisResult? result) => result?.AsRedisValue() ?? RedisValue.Null; + /// /// Interprets the result as a . /// /// The result to convert to a . - public static explicit operator RedisKey(RedisResult result) => result?.AsRedisKey() ?? default; + public static explicit operator RedisKey(RedisResult? result) => result?.AsRedisKey() ?? default; + /// /// Interprets the result as a . /// /// The result to convert to a . - public static explicit operator double?(RedisResult result) => result?.AsNullableDouble(); + public static explicit operator double?(RedisResult? result) => result?.AsNullableDouble(); + /// /// Interprets the result as a . /// /// The result to convert to a . - public static explicit operator long?(RedisResult result) => result?.AsNullableInt64(); + public static explicit operator long?(RedisResult? result) => result?.AsNullableInt64(); + /// /// Interprets the result as a . /// /// The result to convert to a . [CLSCompliant(false)] - public static explicit operator ulong?(RedisResult result) => result?.AsNullableUInt64(); + public static explicit operator ulong?(RedisResult? result) => result?.AsNullableUInt64(); + /// /// Interprets the result as a . /// /// The result to convert to a . - public static explicit operator int?(RedisResult result) => result?.AsNullableInt32(); + public static explicit operator int?(RedisResult? result) => result?.AsNullableInt32(); + /// /// Interprets the result as a . /// /// The result to convert to a . - public static explicit operator bool?(RedisResult result) => result?.AsNullableBoolean(); + public static explicit operator bool?(RedisResult? result) => result?.AsNullableBoolean(); + /// /// Interprets the result as a . /// /// The result to convert to a . - public static explicit operator string[](RedisResult result) => result?.AsStringArray(); + public static explicit operator string?[]?(RedisResult? result) => result?.AsStringArray(); + /// /// Interprets the result as a . /// /// The result to convert to a . - public static explicit operator byte[][](RedisResult result) => result?.AsByteArrayArray(); + public static explicit operator byte[]?[]?(RedisResult? result) => result?.AsByteArrayArray(); + /// /// Interprets the result as a . /// /// The result to convert to a . - public static explicit operator double[](RedisResult result) => result?.AsDoubleArray(); + public static explicit operator double[]?(RedisResult? result) => result?.AsDoubleArray(); + /// /// Interprets the result as a . /// /// The result to convert to a . - public static explicit operator long[](RedisResult result) => result?.AsInt64Array(); + public static explicit operator long[]?(RedisResult? result) => result?.AsInt64Array(); + /// /// Interprets the result as a . /// /// The result to convert to a . [CLSCompliant(false)] - public static explicit operator ulong[](RedisResult result) => result?.AsUInt64Array(); + public static explicit operator ulong[]?(RedisResult? result) => result?.AsUInt64Array(); + /// /// Interprets the result as a . /// /// The result to convert to a . - public static explicit operator int[](RedisResult result) => result?.AsInt32Array(); + public static explicit operator int[]?(RedisResult? result) => result?.AsInt32Array(); + /// /// Interprets the result as a . /// /// The result to convert to a . - public static explicit operator bool[](RedisResult result) => result?.AsBooleanArray(); + public static explicit operator bool[]?(RedisResult? result) => result?.AsBooleanArray(); + /// /// Interprets the result as a . /// /// The result to convert to a . - public static explicit operator RedisValue[](RedisResult result) => result?.AsRedisValueArray(); + public static explicit operator RedisValue[]?(RedisResult? result) => result?.AsRedisValueArray(); + /// /// Interprets the result as a . /// /// The result to convert to a . - public static explicit operator RedisKey[](RedisResult result) => result?.AsRedisKeyArray(); + public static explicit operator RedisKey[]?(RedisResult? result) => result?.AsRedisKeyArray(); + /// /// Interprets the result as a . /// /// The result to convert to a . - public static explicit operator RedisResult[](RedisResult result) => result?.AsRedisResultArray(); + public static explicit operator RedisResult[]?(RedisResult? result) => result?.AsRedisResultArray(); /// - /// Interprets a multi-bulk result with successive key/name values as a dictionary keyed by name + /// Interprets a multi-bulk result with successive key/name values as a dictionary keyed by name. /// - /// The key comparator to use, or by default - public Dictionary ToDictionary(IEqualityComparer comparer = null) + /// The key comparator to use, or by default. + public Dictionary ToDictionary(IEqualityComparer? comparer = null) { var arr = AsRedisResultArray(); + if (arr is null) + { + return new Dictionary(); + } int len = arr.Length / 2; var result = new Dictionary(len, comparer ?? StringComparer.InvariantCultureIgnoreCase); for (int i = 0; i < arr.Length; i += 2) { - result.Add(arr[i].AsString(), arr[i + 1]); + result.Add(arr[i].AsString()!, arr[i + 1]); } return result; } + /// + /// Get a sub-item by index. + /// + public virtual RedisResult this[int index] => throw new InvalidOperationException("Indexers can only be used on array results"); + internal abstract bool AsBoolean(); - internal abstract bool[] AsBooleanArray(); - internal abstract byte[] AsByteArray(); - internal abstract byte[][] AsByteArrayArray(); + internal abstract bool[]? AsBooleanArray(); + internal abstract byte[]? AsByteArray(); + internal abstract byte[][]? AsByteArrayArray(); internal abstract double AsDouble(); - internal abstract double[] AsDoubleArray(); + internal abstract double[]? AsDoubleArray(); internal abstract int AsInt32(); - internal abstract int[] AsInt32Array(); + internal abstract int[]? AsInt32Array(); internal abstract long AsInt64(); internal abstract ulong AsUInt64(); - internal abstract long[] AsInt64Array(); - internal abstract ulong[] AsUInt64Array(); + internal abstract long[]? AsInt64Array(); + internal abstract ulong[]? AsUInt64Array(); internal abstract bool? AsNullableBoolean(); internal abstract double? AsNullableDouble(); internal abstract int? AsNullableInt32(); internal abstract long? AsNullableInt64(); internal abstract ulong? AsNullableUInt64(); internal abstract RedisKey AsRedisKey(); - internal abstract RedisKey[] AsRedisKeyArray(); - internal abstract RedisResult[] AsRedisResultArray(); + internal abstract RedisKey[]? AsRedisKeyArray(); + internal abstract RedisResult[]? AsRedisResultArray(); internal abstract RedisValue AsRedisValue(); - internal abstract RedisValue[] AsRedisValueArray(); - internal abstract string AsString(); - internal abstract string[] AsStringArray(); + internal abstract RedisValue[]? AsRedisValueArray(); + internal abstract string? AsString(); + internal abstract string?[]? AsStringArray(); + private sealed class ArrayRedisResult : RedisResult { - public override bool IsNull => _value == null; - private readonly RedisResult[] _value; + public override bool IsNull => _value is null; + private readonly RedisResult[]? _value; - public override ResultType Type => ResultType.MultiBulk; - public ArrayRedisResult(RedisResult[] value) + internal override ResultType Resp2NullType => ResultType.Array; + + public ArrayRedisResult(RedisResult[]? value, ResultType resultType) : base(value is null ? ResultType.Null : resultType) { _value = value; } - public override string ToString() => _value == null ? "(nil)" : (_value.Length + " element(s)"); + public override int Length => _value is null ? -1 : _value.Length; + + public override string? ToString(out string? type) + { + type = null; + return _value == null ? "(nil)" : (_value.Length + " element(s)"); + } internal override bool AsBoolean() { - if (IsSingleton) return _value[0].AsBoolean(); + if (IsSingleton) return _value![0].AsBoolean(); throw new InvalidCastException(); } - internal override bool[] AsBooleanArray() => IsNull ? null : Array.ConvertAll(_value, x => x.AsBoolean()); + public override RedisResult this[int index] => _value![index]; + + internal override bool[]? AsBooleanArray() => IsNull ? null : Array.ConvertAll(_value!, x => x.AsBoolean()); - internal override byte[] AsByteArray() + internal override byte[]? AsByteArray() { - if (IsSingleton) return _value[0].AsByteArray(); + if (IsSingleton) return _value![0].AsByteArray(); throw new InvalidCastException(); } - internal override byte[][] AsByteArrayArray() + internal override byte[][]? AsByteArrayArray() => IsNull ? null - : _value.Length == 0 ? Array.Empty() - : Array.ConvertAll(_value, x => x.AsByteArray()); + : _value!.Length == 0 + ? Array.Empty() + : Array.ConvertAll(_value, x => x.AsByteArray()!); private bool IsSingleton => _value?.Length == 1; private bool IsEmpty => _value?.Length == 0; internal override double AsDouble() { - if (IsSingleton) return _value[0].AsDouble(); + if (IsSingleton) return _value![0].AsDouble(); throw new InvalidCastException(); } - internal override double[] AsDoubleArray() + internal override double[]? AsDoubleArray() => IsNull ? null : IsEmpty ? Array.Empty() - : Array.ConvertAll(_value, x => x.AsDouble()); + : Array.ConvertAll(_value!, x => x.AsDouble()); internal override int AsInt32() { - if (IsSingleton) return _value[0].AsInt32(); + if (IsSingleton) return _value![0].AsInt32(); throw new InvalidCastException(); } - internal override int[] AsInt32Array() + internal override int[]? AsInt32Array() => IsNull ? null : IsEmpty ? Array.Empty() - : Array.ConvertAll(_value, x => x.AsInt32()); + : Array.ConvertAll(_value!, x => x.AsInt32()); internal override long AsInt64() { - if (IsSingleton) return _value[0].AsInt64(); + if (IsSingleton) return _value![0].AsInt64(); throw new InvalidCastException(); } internal override ulong AsUInt64() { - if (IsSingleton) return _value[0].AsUInt64(); + if (IsSingleton) return _value![0].AsUInt64(); throw new InvalidCastException(); } - internal override long[] AsInt64Array() + internal override long[]? AsInt64Array() => IsNull ? null : IsEmpty ? Array.Empty() - : Array.ConvertAll(_value, x => x.AsInt64()); + : Array.ConvertAll(_value!, x => x.AsInt64()); - internal override ulong[] AsUInt64Array() + internal override ulong[]? AsUInt64Array() => IsNull ? null : IsEmpty ? Array.Empty() - : Array.ConvertAll(_value, x => x.AsUInt64()); + : Array.ConvertAll(_value!, x => x.AsUInt64()); internal override bool? AsNullableBoolean() { - if (IsSingleton) return _value[0].AsNullableBoolean(); + if (IsSingleton) return _value![0].AsNullableBoolean(); throw new InvalidCastException(); } internal override double? AsNullableDouble() { - if (IsSingleton) return _value[0].AsNullableDouble(); + if (IsSingleton) return _value![0].AsNullableDouble(); throw new InvalidCastException(); } internal override int? AsNullableInt32() { - if (IsSingleton) return _value[0].AsNullableInt32(); + if (IsSingleton) return _value![0].AsNullableInt32(); throw new InvalidCastException(); } internal override long? AsNullableInt64() { - if (IsSingleton) return _value[0].AsNullableInt64(); + if (IsSingleton) return _value![0].AsNullableInt64(); throw new InvalidCastException(); } internal override ulong? AsNullableUInt64() { - if (IsSingleton) return _value[0].AsNullableUInt64(); + if (IsSingleton) return _value![0].AsNullableUInt64(); throw new InvalidCastException(); } internal override RedisKey AsRedisKey() { - if (IsSingleton) return _value[0].AsRedisKey(); + if (IsSingleton) return _value![0].AsRedisKey(); throw new InvalidCastException(); } - internal override RedisKey[] AsRedisKeyArray() + internal override RedisKey[]? AsRedisKeyArray() => IsNull ? null : IsEmpty ? Array.Empty() - : Array.ConvertAll(_value, x => x.AsRedisKey()); + : Array.ConvertAll(_value!, x => x.AsRedisKey()); - internal override RedisResult[] AsRedisResultArray() => _value; + internal override RedisResult[]? AsRedisResultArray() => _value; internal override RedisValue AsRedisValue() { - if (IsSingleton) return _value[0].AsRedisValue(); + if (IsSingleton) return _value![0].AsRedisValue(); throw new InvalidCastException(); } - internal override RedisValue[] AsRedisValueArray() + internal override RedisValue[]? AsRedisValueArray() => IsNull ? null : IsEmpty ? Array.Empty() - : Array.ConvertAll(_value, x => x.AsRedisValue()); + : Array.ConvertAll(_value!, x => x.AsRedisValue()); - internal override string AsString() + internal override string? AsString() { - if (IsSingleton) return _value[0].AsString(); + if (IsSingleton) return _value![0].AsString(); throw new InvalidCastException(); } - internal override string[] AsStringArray() + internal override string?[]? AsStringArray() => IsNull ? null : IsEmpty ? Array.Empty() - : Array.ConvertAll(_value, x => x.AsString()); + : Array.ConvertAll(_value!, x => x.AsString()); } /// @@ -409,20 +543,23 @@ internal override string[] AsStringArray() /// Create a from a channel. /// /// The to create a from. - public static RedisResult Create(RedisChannel channel) => Create((byte[])channel, ResultType.BulkString); + public static RedisResult Create(RedisChannel channel) => Create((byte[]?)channel, ResultType.BulkString); private sealed class ErrorRedisResult : RedisResult { private readonly string value; - public override ResultType Type => ResultType.Error; - public ErrorRedisResult(string value) + public ErrorRedisResult(string? value, ResultType type) : base(type) { this.value = value ?? throw new ArgumentNullException(nameof(value)); } public override bool IsNull => value == null; - public override string ToString() => value; + public override string? ToString(out string? type) + { + type = null; + return value; + } internal override bool AsBoolean() => throw new RedisServerException(value); internal override bool[] AsBooleanArray() => throw new RedisServerException(value); internal override byte[] AsByteArray() => throw new RedisServerException(value); @@ -445,28 +582,38 @@ public ErrorRedisResult(string value) internal override RedisResult[] AsRedisResultArray() => throw new RedisServerException(value); internal override RedisValue AsRedisValue() => throw new RedisServerException(value); internal override RedisValue[] AsRedisValueArray() => throw new RedisServerException(value); - internal override string AsString() => throw new RedisServerException(value); - internal override string[] AsStringArray() => throw new RedisServerException(value); + internal override string? AsString() => throw new RedisServerException(value); + internal override string?[]? AsStringArray() => throw new RedisServerException(value); } private sealed class SingleRedisResult : RedisResult, IConvertible { private readonly RedisValue _value; - public override ResultType Type { get; } - public SingleRedisResult(RedisValue value, ResultType? resultType) + public SingleRedisResult(RedisValue value, ResultType? resultType) : base(value.IsNull ? ResultType.Null : resultType ?? (value.IsInteger ? ResultType.Integer : ResultType.BulkString)) { _value = value; - Type = resultType ?? (value.IsInteger ? ResultType.Integer : ResultType.BulkString); } - public override bool IsNull => _value.IsNull; + public override bool IsNull => Resp3Type == ResultType.Null || _value.IsNull; + + public override string? ToString(out string? type) + { + type = null; + string? s = _value; + if (Resp3Type == ResultType.VerbatimString && s is not null && s.Length >= 4 && s[3] == ':') + { + // remove the prefix + type = s.Substring(0, 3); + s = s.Substring(4); + } + return s; + } - public override string ToString() => _value.ToString(); internal override bool AsBoolean() => (bool)_value; internal override bool[] AsBooleanArray() => new[] { AsBoolean() }; - internal override byte[] AsByteArray() => (byte[])_value; - internal override byte[][] AsByteArrayArray() => new[] { AsByteArray() }; + internal override byte[]? AsByteArray() => (byte[]?)_value; + internal override byte[][] AsByteArrayArray() => new[] { AsByteArray()! }; internal override double AsDouble() => (double)_value; internal override double[] AsDoubleArray() => new[] { AsDouble() }; internal override int AsInt32() => (int)_value; @@ -480,96 +627,87 @@ public SingleRedisResult(RedisValue value, ResultType? resultType) internal override int? AsNullableInt32() => (int?)_value; internal override long? AsNullableInt64() => (long?)_value; internal override ulong? AsNullableUInt64() => (ulong?)_value; - internal override RedisKey AsRedisKey() => (byte[])_value; + internal override RedisKey AsRedisKey() => (byte[]?)_value; internal override RedisKey[] AsRedisKeyArray() => new[] { AsRedisKey() }; internal override RedisResult[] AsRedisResultArray() => throw new InvalidCastException(); internal override RedisValue AsRedisValue() => _value; internal override RedisValue[] AsRedisValueArray() => new[] { AsRedisValue() }; - internal override string AsString() => (string)_value; - internal override string[] AsStringArray() => new[] { AsString() }; + internal override string? AsString() => (string?)_value; + internal override string?[]? AsStringArray() => new[] { AsString() }; TypeCode IConvertible.GetTypeCode() => TypeCode.Object; - bool IConvertible.ToBoolean(IFormatProvider provider) => AsBoolean(); - char IConvertible.ToChar(IFormatProvider provider) + bool IConvertible.ToBoolean(IFormatProvider? provider) => AsBoolean(); + char IConvertible.ToChar(IFormatProvider? provider) { checked { return (char)AsInt32(); } } - sbyte IConvertible.ToSByte(IFormatProvider provider) + sbyte IConvertible.ToSByte(IFormatProvider? provider) { checked { return (sbyte)AsInt32(); } } - byte IConvertible.ToByte(IFormatProvider provider) + byte IConvertible.ToByte(IFormatProvider? provider) { checked { return (byte)AsInt32(); } } - short IConvertible.ToInt16(IFormatProvider provider) + short IConvertible.ToInt16(IFormatProvider? provider) { checked { return (short)AsInt32(); } } - ushort IConvertible.ToUInt16(IFormatProvider provider) + ushort IConvertible.ToUInt16(IFormatProvider? provider) { checked { return (ushort)AsInt32(); } } - int IConvertible.ToInt32(IFormatProvider provider) => AsInt32(); - uint IConvertible.ToUInt32(IFormatProvider provider) + int IConvertible.ToInt32(IFormatProvider? provider) => AsInt32(); + uint IConvertible.ToUInt32(IFormatProvider? provider) { checked { return (uint)AsInt64(); } } - long IConvertible.ToInt64(IFormatProvider provider) => AsInt64(); - ulong IConvertible.ToUInt64(IFormatProvider provider) + long IConvertible.ToInt64(IFormatProvider? provider) => AsInt64(); + ulong IConvertible.ToUInt64(IFormatProvider? provider) { checked { return (ulong)AsInt64(); } } - float IConvertible.ToSingle(IFormatProvider provider) => (float)AsDouble(); - double IConvertible.ToDouble(IFormatProvider provider) => AsDouble(); - decimal IConvertible.ToDecimal(IFormatProvider provider) + float IConvertible.ToSingle(IFormatProvider? provider) => (float)AsDouble(); + double IConvertible.ToDouble(IFormatProvider? provider) => AsDouble(); + decimal IConvertible.ToDecimal(IFormatProvider? provider) { // we can do this safely *sometimes* - if (Type == ResultType.Integer) return AsInt64(); + if (Resp2Type == ResultType.Integer) return AsInt64(); // but not always ThrowNotSupported(); return default; } - DateTime IConvertible.ToDateTime(IFormatProvider provider) { ThrowNotSupported(); return default; } - string IConvertible.ToString(IFormatProvider provider) => AsString(); - object IConvertible.ToType(Type conversionType, IFormatProvider provider) + DateTime IConvertible.ToDateTime(IFormatProvider? provider) + { + ThrowNotSupported(); + return default; + } + string IConvertible.ToString(IFormatProvider? provider) => AsString()!; + object IConvertible.ToType(Type conversionType, IFormatProvider? provider) { switch (System.Type.GetTypeCode(conversionType)) { - case TypeCode.Boolean: - return AsBoolean(); - case TypeCode.Char: - checked { return (char)AsInt32(); } - case TypeCode.SByte: - checked { return (sbyte)AsInt32(); } - case TypeCode.Byte: - checked { return (byte)AsInt32(); } - case TypeCode.Int16: - checked { return (short)AsInt32(); } - case TypeCode.UInt16: - checked { return (ushort)AsInt32(); } - case TypeCode.Int32: - return AsInt32(); - case TypeCode.UInt32: - checked { return (uint)AsInt64(); } - case TypeCode.Int64: - return AsInt64(); - case TypeCode.UInt64: - checked { return (ulong)AsInt64(); } - case TypeCode.Single: - return (float)AsDouble(); - case TypeCode.Double: - return AsDouble(); - case TypeCode.Decimal: - if (Type == ResultType.Integer) return AsInt64(); - break; - case TypeCode.String: - return AsString(); + case TypeCode.Boolean: return AsBoolean(); + case TypeCode.Char: checked { return (char)AsInt32(); } + case TypeCode.SByte: checked { return (sbyte)AsInt32(); } + case TypeCode.Byte: checked { return (byte)AsInt32(); } + case TypeCode.Int16: checked { return (short)AsInt32(); } + case TypeCode.UInt16: checked { return (ushort)AsInt32(); } + case TypeCode.Int32: return AsInt32(); + case TypeCode.UInt32: checked { return (uint)AsInt64(); } + case TypeCode.Int64: return AsInt64(); + case TypeCode.UInt64: checked { return (ulong)AsInt64(); } + case TypeCode.Single: return (float)AsDouble(); + case TypeCode.Double: return AsDouble(); + case TypeCode.Decimal when Resp2Type == ResultType.Integer: return AsInt64(); + case TypeCode.String: return AsString()!; + default: + ThrowNotSupported(); + return default; } - ThrowNotSupported(); - return default; } - void ThrowNotSupported([CallerMemberName] string caller = null) + [DoesNotReturn] + private void ThrowNotSupported([CallerMemberName] string? caller = null) => throw new NotSupportedException($"{typeof(SingleRedisResult).FullName} does not support {nameof(IConvertible)}.{caller} with value '{AsString()}'"); } } diff --git a/src/StackExchange.Redis/RedisServer.cs b/src/StackExchange.Redis/RedisServer.cs index 271562106..2d7e184ad 100644 --- a/src/StackExchange.Redis/RedisServer.cs +++ b/src/StackExchange.Redis/RedisServer.cs @@ -1,6 +1,7 @@ using System; using System.Buffers; using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; using System.IO; using System.Linq; using System.Net; @@ -8,24 +9,21 @@ using System.Text; using System.Threading.Tasks; using Pipelines.Sockets.Unofficial.Arenas; -using static StackExchange.Redis.ConnectionMultiplexer; - -#pragma warning disable RCS1231 // Make parameter ref read-only. namespace StackExchange.Redis { - internal sealed class RedisServer : RedisBase, IServer + internal sealed partial class RedisServer : RedisBase, IServer { private readonly ServerEndPoint server; - internal RedisServer(ConnectionMultiplexer multiplexer, ServerEndPoint server, object asyncState) : base(multiplexer, asyncState) + internal RedisServer(ServerEndPoint server, object? asyncState) : base(server.Multiplexer, asyncState) { - this.server = server ?? throw new ArgumentNullException(nameof(server)); + this.server = server; // definitely can't be null because .Multiplexer in base call } int IServer.DatabaseCount => server.Databases; - public ClusterConfiguration ClusterConfiguration => server.ClusterConfiguration; + public ClusterConfiguration? ClusterConfiguration => server.ClusterConfiguration; public EndPoint EndPoint => server.EndPoint; @@ -36,6 +34,8 @@ internal RedisServer(ConnectionMultiplexer multiplexer, ServerEndPoint server, o bool IServer.IsSlave => IsReplica; public bool IsReplica => server.IsReplica; + public RedisProtocol Protocol => server.Protocol ?? (multiplexer.RawConfig.TryResp3() ? RedisProtocol.Resp3 : RedisProtocol.Resp2); + bool IServer.AllowSlaveWrites { get => AllowReplicaWrites; @@ -63,109 +63,84 @@ public Task ClientKillAsync(EndPoint endpoint, CommandFlags flags = CommandFlags return ExecuteAsync(msg, ResultProcessor.DemandOK); } - public long ClientKill(long? id = null, ClientType? clientType = null, EndPoint endpoint = null, bool skipMe = true, CommandFlags flags = CommandFlags.None) + public long ClientKill(long? id = null, ClientType? clientType = null, EndPoint? endpoint = null, bool skipMe = true, CommandFlags flags = CommandFlags.None) { var msg = GetClientKillMessage(endpoint, id, clientType, skipMe, flags); return ExecuteSync(msg, ResultProcessor.Int64); } - public Task ClientKillAsync(long? id = null, ClientType? clientType = null, EndPoint endpoint = null, bool skipMe = true, CommandFlags flags = CommandFlags.None) + public Task ClientKillAsync(long? id = null, ClientType? clientType = null, EndPoint? endpoint = null, bool skipMe = true, CommandFlags flags = CommandFlags.None) { var msg = GetClientKillMessage(endpoint, id, clientType, skipMe, flags); return ExecuteAsync(msg, ResultProcessor.Int64); } - private Message GetClientKillMessage(EndPoint endpoint, long? id, ClientType? clientType, bool skipMe, CommandFlags flags) + public long ClientKill(ClientKillFilter filter, CommandFlags flags = CommandFlags.None) { - var parts = new List(9) - { - RedisLiterals.KILL - }; - if (id != null) - { - parts.Add(RedisLiterals.ID); - parts.Add(id.Value); - } - if (clientType != null) - { - parts.Add(RedisLiterals.TYPE); - switch (clientType.Value) - { - case ClientType.Normal: - parts.Add(RedisLiterals.normal); - break; - case ClientType.Replica: - parts.Add(Features.ReplicaCommands ? RedisLiterals.replica : RedisLiterals.slave); - break; - case ClientType.PubSub: - parts.Add(RedisLiterals.pubsub); - break; - default: - throw new ArgumentOutOfRangeException(nameof(clientType)); - } - parts.Add(id.Value); - } - if (endpoint != null) - { - parts.Add(RedisLiterals.ADDR); - parts.Add((RedisValue)Format.ToString(endpoint)); - } - if (!skipMe) - { - parts.Add(RedisLiterals.SKIPME); - parts.Add(RedisLiterals.no); - } - return Message.Create(-1, flags, RedisCommand.CLIENT, parts); + var msg = Message.Create(-1, flags, RedisCommand.CLIENT, filter.ToList(Features.ReplicaCommands)); + return ExecuteSync(msg, ResultProcessor.Int64); + } + + public Task ClientKillAsync(ClientKillFilter filter, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(-1, flags, RedisCommand.CLIENT, filter.ToList(Features.ReplicaCommands)); + return ExecuteAsync(msg, ResultProcessor.Int64); + } + + private Message GetClientKillMessage(EndPoint? endpoint, long? id, ClientType? clientType, bool? skipMe, CommandFlags flags) + { + var args = new ClientKillFilter().WithId(id).WithClientType(clientType).WithEndpoint(endpoint).WithSkipMe(skipMe).ToList(Features.ReplicaCommands); + return Message.Create(-1, flags, RedisCommand.CLIENT, args); } public ClientInfo[] ClientList(CommandFlags flags = CommandFlags.None) { var msg = Message.Create(-1, flags, RedisCommand.CLIENT, RedisLiterals.LIST); - return ExecuteSync(msg, ClientInfo.Processor); + return ExecuteSync(msg, ClientInfo.Processor, defaultValue: Array.Empty()); } public Task ClientListAsync(CommandFlags flags = CommandFlags.None) { var msg = Message.Create(-1, flags, RedisCommand.CLIENT, RedisLiterals.LIST); - return ExecuteAsync(msg, ClientInfo.Processor); + return ExecuteAsync(msg, ClientInfo.Processor, defaultValue: Array.Empty()); } - public ClusterConfiguration ClusterNodes(CommandFlags flags = CommandFlags.None) + public ClusterConfiguration? ClusterNodes(CommandFlags flags = CommandFlags.None) { var msg = Message.Create(-1, flags, RedisCommand.CLUSTER, RedisLiterals.NODES); return ExecuteSync(msg, ResultProcessor.ClusterNodes); } - public Task ClusterNodesAsync(CommandFlags flags = CommandFlags.None) + public Task ClusterNodesAsync(CommandFlags flags = CommandFlags.None) { var msg = Message.Create(-1, flags, RedisCommand.CLUSTER, RedisLiterals.NODES); return ExecuteAsync(msg, ResultProcessor.ClusterNodes); } - public string ClusterNodesRaw(CommandFlags flags = CommandFlags.None) + public string? ClusterNodesRaw(CommandFlags flags = CommandFlags.None) { var msg = Message.Create(-1, flags, RedisCommand.CLUSTER, RedisLiterals.NODES); return ExecuteSync(msg, ResultProcessor.ClusterNodesRaw); } - public Task ClusterNodesRawAsync(CommandFlags flags = CommandFlags.None) + public Task ClusterNodesRawAsync(CommandFlags flags = CommandFlags.None) { var msg = Message.Create(-1, flags, RedisCommand.CLUSTER, RedisLiterals.NODES); return ExecuteAsync(msg, ResultProcessor.ClusterNodesRaw); } - public KeyValuePair[] ConfigGet(RedisValue pattern = default(RedisValue), CommandFlags flags = CommandFlags.None) + public KeyValuePair[] ConfigGet(RedisValue pattern = default, CommandFlags flags = CommandFlags.None) { if (pattern.IsNullOrEmpty) pattern = RedisLiterals.Wildcard; var msg = Message.Create(-1, flags, RedisCommand.CONFIG, RedisLiterals.GET, pattern); - return ExecuteSync(msg, ResultProcessor.StringPairInterleaved); + return ExecuteSync(msg, ResultProcessor.StringPairInterleaved, defaultValue: Array.Empty>()); } - public Task[]> ConfigGetAsync(RedisValue pattern = default(RedisValue), CommandFlags flags = CommandFlags.None) + public Task[]> ConfigGetAsync(RedisValue pattern = default, CommandFlags flags = CommandFlags.None) { if (pattern.IsNullOrEmpty) pattern = RedisLiterals.Wildcard; var msg = Message.Create(-1, flags, RedisCommand.CONFIG, RedisLiterals.GET, pattern); - return ExecuteAsync(msg, ResultProcessor.StringPairInterleaved); + return ExecuteAsync(msg, ResultProcessor.StringPairInterleaved, defaultValue: Array.Empty>()); } public void ConfigResetStatistics(CommandFlags flags = CommandFlags.None) @@ -207,6 +182,77 @@ public Task ConfigSetAsync(RedisValue setting, RedisValue value, CommandFlags fl return task; } + public long CommandCount(CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(-1, flags, RedisCommand.COMMAND, RedisLiterals.COUNT); + return ExecuteSync(msg, ResultProcessor.Int64); + } + + public Task CommandCountAsync(CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(-1, flags, RedisCommand.COMMAND, RedisLiterals.COUNT); + return ExecuteAsync(msg, ResultProcessor.Int64); + } + + public RedisKey[] CommandGetKeys(RedisValue[] command, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(-1, flags, RedisCommand.COMMAND, AddValueToArray(RedisLiterals.GETKEYS, command)); + return ExecuteSync(msg, ResultProcessor.RedisKeyArray, defaultValue: Array.Empty()); + } + + public Task CommandGetKeysAsync(RedisValue[] command, CommandFlags flags = CommandFlags.None) + { + var msg = Message.Create(-1, flags, RedisCommand.COMMAND, AddValueToArray(RedisLiterals.GETKEYS, command)); + return ExecuteAsync(msg, ResultProcessor.RedisKeyArray, defaultValue: Array.Empty()); + } + + public string[] CommandList(RedisValue? moduleName = null, RedisValue? category = null, RedisValue? pattern = null, CommandFlags flags = CommandFlags.None) + { + var msg = GetCommandListMessage(moduleName, category, pattern, flags); + return ExecuteSync(msg, ResultProcessor.StringArray, defaultValue: Array.Empty()); + } + + public Task CommandListAsync(RedisValue? moduleName = null, RedisValue? category = null, RedisValue? pattern = null, CommandFlags flags = CommandFlags.None) + { + var msg = GetCommandListMessage(moduleName, category, pattern, flags); + return ExecuteAsync(msg, ResultProcessor.StringArray, defaultValue: Array.Empty()); + } + + private Message GetCommandListMessage(RedisValue? moduleName = null, RedisValue? category = null, RedisValue? pattern = null, CommandFlags flags = CommandFlags.None) + { + if (moduleName == null && category == null && pattern == null) + { + return Message.Create(-1, flags, RedisCommand.COMMAND, RedisLiterals.LIST); + } + else if (moduleName != null && category == null && pattern == null) + { + return Message.Create(-1, flags, RedisCommand.COMMAND, MakeArray(RedisLiterals.LIST, RedisLiterals.FILTERBY, RedisLiterals.MODULE, (RedisValue)moduleName)); + } + else if (moduleName == null && category != null && pattern == null) + { + return Message.Create(-1, flags, RedisCommand.COMMAND, MakeArray(RedisLiterals.LIST, RedisLiterals.FILTERBY, RedisLiterals.ACLCAT, (RedisValue)category)); + } + else if (moduleName == null && category == null && pattern != null) + { + return Message.Create(-1, flags, RedisCommand.COMMAND, MakeArray(RedisLiterals.LIST, RedisLiterals.FILTERBY, RedisLiterals.PATTERN, (RedisValue)pattern)); + } + else + { + throw new ArgumentException("More then one filter is not allowed"); + } + } + + private RedisValue[] AddValueToArray(RedisValue val, RedisValue[] arr) + { + var result = new RedisValue[arr.Length + 1]; + var i = 0; + result[i++] = val; + foreach (var item in arr) result[i++] = item; + return result; + } + + private RedisValue[] MakeArray(params RedisValue[] redisValues) => redisValues; + public long DatabaseSize(int database = -1, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(multiplexer.ApplyDefaultDatabase(database), flags, RedisCommand.DBSIZE); @@ -257,25 +303,28 @@ public Task FlushDatabaseAsync(int database = -1, CommandFlags flags = CommandFl public ServerCounters GetCounters() => server.GetCounters(); - public IGrouping>[] Info(RedisValue section = default(RedisValue), CommandFlags flags = CommandFlags.None) + private static IGrouping>[] InfoDefault => + Array.Empty>>(); + + public IGrouping>[] Info(RedisValue section = default, CommandFlags flags = CommandFlags.None) { var msg = section.IsNullOrEmpty ? Message.Create(-1, flags, RedisCommand.INFO) : Message.Create(-1, flags, RedisCommand.INFO, section); - return ExecuteSync(msg, ResultProcessor.Info); + return ExecuteSync(msg, ResultProcessor.Info, defaultValue: InfoDefault); } - public Task>[]> InfoAsync(RedisValue section = default(RedisValue), CommandFlags flags = CommandFlags.None) + public Task>[]> InfoAsync(RedisValue section = default, CommandFlags flags = CommandFlags.None) { var msg = section.IsNullOrEmpty ? Message.Create(-1, flags, RedisCommand.INFO) : Message.Create(-1, flags, RedisCommand.INFO, section); - return ExecuteAsync(msg, ResultProcessor.Info); + return ExecuteAsync(msg, ResultProcessor.Info, defaultValue: InfoDefault); } - public string InfoRaw(RedisValue section = default(RedisValue), CommandFlags flags = CommandFlags.None) + public string? InfoRaw(RedisValue section = default, CommandFlags flags = CommandFlags.None) { var msg = section.IsNullOrEmpty ? Message.Create(-1, flags, RedisCommand.INFO) @@ -284,7 +333,7 @@ public Task FlushDatabaseAsync(int database = -1, CommandFlags flags = CommandFl return ExecuteSync(msg, ResultProcessor.String); } - public Task InfoRawAsync(RedisValue section = default(RedisValue), CommandFlags flags = CommandFlags.None) + public Task InfoRawAsync(RedisValue section = default, CommandFlags flags = CommandFlags.None) { var msg = section.IsNullOrEmpty ? Message.Create(-1, flags, RedisCommand.INFO) @@ -317,7 +366,7 @@ private CursorEnumerable KeysAsync(int database, RedisValue pattern, i if (cursor != 0) throw ExceptionFactory.NoCursor(RedisCommand.KEYS); Message msg = Message.Create(database, flags, RedisCommand.KEYS, pattern); - return CursorEnumerable.From(this, server, ExecuteAsync(msg, ResultProcessor.RedisKeyArray), pageOffset); + return CursorEnumerable.From(this, server, ExecuteAsync(msg, ResultProcessor.RedisKeyArray, defaultValue: Array.Empty()), pageOffset); } public DateTime LastSave(CommandFlags flags = CommandFlags.None) @@ -332,24 +381,27 @@ public Task LastSaveAsync(CommandFlags flags = CommandFlags.None) return ExecuteAsync(msg, ResultProcessor.DateTime); } - public void MakeMaster(ReplicationChangeOptions options, TextWriter log = null) + public void MakeMaster(ReplicationChangeOptions options, TextWriter? log = null) { - using (var proxy = LogProxy.TryCreate(log)) - { - multiplexer.MakeMaster(server, options, proxy); - } + // Do you believe in magic? + multiplexer.MakePrimaryAsync(server, options, log).Wait(60000); + } + + public async Task MakePrimaryAsync(ReplicationChangeOptions options, TextWriter? log = null) + { + await multiplexer.MakePrimaryAsync(server, options, log).ForAwait(); } public Role Role(CommandFlags flags = CommandFlags.None) { var msg = Message.Create(-1, flags, RedisCommand.ROLE); - return ExecuteSync(msg, ResultProcessor.Role); + return ExecuteSync(msg, ResultProcessor.Role, defaultValue: Redis.Role.Null); } public Task RoleAsync(CommandFlags flags = CommandFlags.None) { var msg = Message.Create(-1, flags, RedisCommand.ROLE); - return ExecuteAsync(msg, ResultProcessor.Role); + return ExecuteAsync(msg, ResultProcessor.Role, defaultValue: Redis.Role.Null); } public void Save(SaveType type, CommandFlags flags = CommandFlags.None) @@ -390,14 +442,14 @@ public Task ScriptExistsAsync(byte[] sha1, CommandFlags flags = CommandFla public void ScriptFlush(CommandFlags flags = CommandFlags.None) { - if (!multiplexer.RawConfig.AllowAdmin) throw ExceptionFactory.AdminModeNotEnabled(multiplexer.IncludeDetailInExceptions, RedisCommand.SCRIPT, null, server); + if (!multiplexer.RawConfig.AllowAdmin) throw ExceptionFactory.AdminModeNotEnabled(multiplexer.RawConfig.IncludeDetailInExceptions, RedisCommand.SCRIPT, null, server); var msg = Message.Create(-1, flags, RedisCommand.SCRIPT, RedisLiterals.FLUSH); ExecuteSync(msg, ResultProcessor.DemandOK); } public Task ScriptFlushAsync(CommandFlags flags = CommandFlags.None) { - if (!multiplexer.RawConfig.AllowAdmin) throw ExceptionFactory.AdminModeNotEnabled(multiplexer.IncludeDetailInExceptions, RedisCommand.SCRIPT, null, server); + if (!multiplexer.RawConfig.AllowAdmin) throw ExceptionFactory.AdminModeNotEnabled(multiplexer.RawConfig.IncludeDetailInExceptions, RedisCommand.SCRIPT, null, server); var msg = Message.Create(-1, flags, RedisCommand.SCRIPT, RedisLiterals.FLUSH); return ExecuteAsync(msg, ResultProcessor.DemandOK); } @@ -405,13 +457,13 @@ public Task ScriptFlushAsync(CommandFlags flags = CommandFlags.None) public byte[] ScriptLoad(string script, CommandFlags flags = CommandFlags.None) { var msg = new RedisDatabase.ScriptLoadMessage(flags, script); - return ExecuteSync(msg, ResultProcessor.ScriptLoad); + return ExecuteSync(msg, ResultProcessor.ScriptLoad, defaultValue: Array.Empty()); // Note: default isn't used on failure - we'll throw } public Task ScriptLoadAsync(string script, CommandFlags flags = CommandFlags.None) { var msg = new RedisDatabase.ScriptLoadMessage(flags, script); - return ExecuteAsync(msg, ResultProcessor.ScriptLoad); + return ExecuteAsync(msg, ResultProcessor.ScriptLoad, defaultValue: Array.Empty()); // Note: default isn't used on failure - we'll throw } public LoadedLuaScript ScriptLoad(LuaScript script, CommandFlags flags = CommandFlags.None) @@ -426,22 +478,13 @@ public Task ScriptLoadAsync(LuaScript script, CommandFlags flag public void Shutdown(ShutdownMode shutdownMode = ShutdownMode.Default, CommandFlags flags = CommandFlags.None) { - Message msg; - switch (shutdownMode) + Message msg = shutdownMode switch { - case ShutdownMode.Default: - msg = Message.Create(-1, flags, RedisCommand.SHUTDOWN); - break; - case ShutdownMode.Always: - msg = Message.Create(-1, flags, RedisCommand.SHUTDOWN, RedisLiterals.SAVE); - break; - case ShutdownMode.Never: - msg = Message.Create(-1, flags, RedisCommand.SHUTDOWN, RedisLiterals.NOSAVE); - break; - default: - throw new ArgumentOutOfRangeException(nameof(shutdownMode)); - } - + ShutdownMode.Default => Message.Create(-1, flags, RedisCommand.SHUTDOWN), + ShutdownMode.Always => Message.Create(-1, flags, RedisCommand.SHUTDOWN, RedisLiterals.SAVE), + ShutdownMode.Never => Message.Create(-1, flags, RedisCommand.SHUTDOWN, RedisLiterals.NOSAVE), + _ => throw new ArgumentOutOfRangeException(nameof(shutdownMode)), + }; try { ExecuteSync(msg, ResultProcessor.DemandOK); @@ -459,7 +502,7 @@ public CommandTrace[] SlowlogGet(int count = 0, CommandFlags flags = CommandFlag ? Message.Create(-1, flags, RedisCommand.SLOWLOG, RedisLiterals.GET, count) : Message.Create(-1, flags, RedisCommand.SLOWLOG, RedisLiterals.GET); - return ExecuteSync(msg, CommandTrace.Processor); + return ExecuteSync(msg, CommandTrace.Processor, defaultValue: Array.Empty()); } public Task SlowlogGetAsync(int count = 0, CommandFlags flags = CommandFlags.None) @@ -468,7 +511,7 @@ public Task SlowlogGetAsync(int count = 0, CommandFlags flags = ? Message.Create(-1, flags, RedisCommand.SLOWLOG, RedisLiterals.GET, count) : Message.Create(-1, flags, RedisCommand.SLOWLOG, RedisLiterals.GET); - return ExecuteAsync(msg, CommandTrace.Processor); + return ExecuteAsync(msg, CommandTrace.Processor, defaultValue: Array.Empty()); } public void SlowlogReset(CommandFlags flags = CommandFlags.None) @@ -495,18 +538,18 @@ public Task StringGetAsync(int db, RedisKey key, CommandFlags flags return ExecuteAsync(msg, ResultProcessor.RedisValue); } - public RedisChannel[] SubscriptionChannels(RedisChannel pattern = default(RedisChannel), CommandFlags flags = CommandFlags.None) + public RedisChannel[] SubscriptionChannels(RedisChannel pattern = default, CommandFlags flags = CommandFlags.None) { var msg = pattern.IsNullOrEmpty ? Message.Create(-1, flags, RedisCommand.PUBSUB, RedisLiterals.CHANNELS) : Message.Create(-1, flags, RedisCommand.PUBSUB, RedisLiterals.CHANNELS, pattern); - return ExecuteSync(msg, ResultProcessor.RedisChannelArrayLiteral); + return ExecuteSync(msg, ResultProcessor.RedisChannelArrayLiteral, defaultValue: Array.Empty()); } - public Task SubscriptionChannelsAsync(RedisChannel pattern = default(RedisChannel), CommandFlags flags = CommandFlags.None) + public Task SubscriptionChannelsAsync(RedisChannel pattern = default, CommandFlags flags = CommandFlags.None) { var msg = pattern.IsNullOrEmpty ? Message.Create(-1, flags, RedisCommand.PUBSUB, RedisLiterals.CHANNELS) : Message.Create(-1, flags, RedisCommand.PUBSUB, RedisLiterals.CHANNELS, pattern); - return ExecuteAsync(msg, ResultProcessor.RedisChannelArrayLiteral); + return ExecuteAsync(msg, ResultProcessor.RedisChannelArrayLiteral, defaultValue: Array.Empty()); } public long SubscriptionPatternCount(CommandFlags flags = CommandFlags.None) @@ -557,59 +600,119 @@ public Task TimeAsync(CommandFlags flags = CommandFlags.None) return ExecuteAsync(msg, ResultProcessor.DateTime); } - internal static Message CreateReplicaOfMessage(ServerEndPoint sendMessageTo, EndPoint masterEndpoint, CommandFlags flags = CommandFlags.None) + internal static Message CreateReplicaOfMessage(ServerEndPoint sendMessageTo, EndPoint? primaryEndpoint, CommandFlags flags = CommandFlags.None) { RedisValue host, port; - if (masterEndpoint == null) + if (primaryEndpoint == null) { host = "NO"; port = "ONE"; } else { - if (Format.TryGetHostPort(masterEndpoint, out string hostRaw, out int portRaw)) + if (Format.TryGetHostPort(primaryEndpoint, out string? hostRaw, out int? portRaw)) { host = hostRaw; port = portRaw; } else { - throw new NotSupportedException("Unknown endpoint type: " + masterEndpoint.GetType().Name); + throw new NotSupportedException("Unknown endpoint type: " + primaryEndpoint.GetType().Name); } } return Message.Create(-1, flags, sendMessageTo.GetFeatures().ReplicaCommands ? RedisCommand.REPLICAOF : RedisCommand.SLAVEOF, host, port); } - internal override Task ExecuteAsync(Message message, ResultProcessor processor, ServerEndPoint server = null) - { // inject our expected server automatically - if (server == null) server = this.server; + private Message? GetTiebreakerRemovalMessage() + { + var configuration = multiplexer.RawConfig; + + if (configuration.TryGetTieBreaker(out var tieBreakerKey) && multiplexer.CommandMap.IsAvailable(RedisCommand.DEL)) + { + var msg = Message.Create(0, CommandFlags.FireAndForget | CommandFlags.NoRedirect, RedisCommand.DEL, tieBreakerKey); + msg.SetInternalCall(); + return msg; + } + return null; + } + + private Message? GetConfigChangeMessage() + { + // attempt to broadcast a reconfigure message to anybody listening to this server + var channel = multiplexer.ConfigurationChangedChannel; + if (channel != null && multiplexer.CommandMap.IsAvailable(RedisCommand.PUBLISH)) + { + var msg = Message.Create(-1, CommandFlags.FireAndForget | CommandFlags.NoRedirect, RedisCommand.PUBLISH, (RedisValue)channel, RedisLiterals.Wildcard); + msg.SetInternalCall(); + return msg; + } + return null; + } + + internal override Task ExecuteAsync(Message? message, ResultProcessor? processor, T defaultValue, ServerEndPoint? server = null) + { + // inject our expected server automatically + server ??= this.server; + FixFlags(message, server); + if (!server.IsConnected) + { + if (message == null) return CompletedTask.FromDefault(defaultValue, asyncState); + if (message.IsFireAndForget) return CompletedTask.FromDefault(defaultValue, null); // F+F explicitly does not get async-state + + // After the "don't care" cases above, if we can't queue then it's time to error - otherwise call through to queuing. + if (!multiplexer.RawConfig.BacklogPolicy.QueueWhileDisconnected) + { + // no need to deny exec-sync here; will be complete before they see if + var tcs = TaskSource.Create(asyncState); + ConnectionMultiplexer.ThrowFailed(tcs, ExceptionFactory.NoConnectionAvailable(multiplexer, message, server)); + return tcs.Task; + } + } + return base.ExecuteAsync(message, processor, defaultValue, server); + } + + internal override Task ExecuteAsync(Message? message, ResultProcessor? processor, ServerEndPoint? server = null) where T : default + { + // inject our expected server automatically + server ??= this.server; FixFlags(message, server); if (!server.IsConnected) { if (message == null) return CompletedTask.Default(asyncState); if (message.IsFireAndForget) return CompletedTask.Default(null); // F+F explicitly does not get async-state - // no need to deny exec-sync here; will be complete before they see if - var tcs = TaskSource.Create(asyncState); - ConnectionMultiplexer.ThrowFailed(tcs, ExceptionFactory.NoConnectionAvailable(multiplexer, message, server)); - return tcs.Task; + // After the "don't care" cases above, if we can't queue then it's time to error - otherwise call through to queuing. + if (!multiplexer.RawConfig.BacklogPolicy.QueueWhileDisconnected) + { + // no need to deny exec-sync here; will be complete before they see if + var tcs = TaskSource.Create(asyncState); + ConnectionMultiplexer.ThrowFailed(tcs, ExceptionFactory.NoConnectionAvailable(multiplexer, message, server)); + return tcs.Task; + } } - return base.ExecuteAsync(message, processor, server); + return base.ExecuteAsync(message, processor, server); } - internal override T ExecuteSync(Message message, ResultProcessor processor, ServerEndPoint server = null) - { // inject our expected server automatically + [return: NotNullIfNotNull("defaultValue")] + internal override T? ExecuteSync(Message? message, ResultProcessor? processor, ServerEndPoint? server = null, T? defaultValue = default) where T : default + { + // inject our expected server automatically if (server == null) server = this.server; FixFlags(message, server); if (!server.IsConnected) { - if (message == null || message.IsFireAndForget) return default(T); - throw ExceptionFactory.NoConnectionAvailable(multiplexer, message, server); + if (message == null || message.IsFireAndForget) return defaultValue; + + // After the "don't care" cases above, if we can't queue then it's time to error - otherwise call through to queuing. + if (!multiplexer.RawConfig.BacklogPolicy.QueueWhileDisconnected) + { + throw ExceptionFactory.NoConnectionAvailable(multiplexer, message, server); + } } - return base.ExecuteSync(message, processor, server); + return base.ExecuteSync(message, processor, server, defaultValue); } - internal override RedisFeatures GetFeatures(in RedisKey key, CommandFlags flags, out ServerEndPoint server) + internal override RedisFeatures GetFeatures(in RedisKey key, CommandFlags flags, RedisCommand command, out ServerEndPoint server) { server = this.server; return server.GetFeatures(); @@ -623,96 +726,108 @@ public void ReplicaOf(EndPoint master, CommandFlags flags = CommandFlags.None) { throw new ArgumentException("Cannot replicate to self"); } - // prepare the actual replicaof message (not sent yet) - var replicaOfMsg = CreateReplicaOfMessage(server, master, flags); - - var configuration = multiplexer.RawConfig; +#pragma warning disable CS0618 // Type or member is obsolete // attempt to cease having an opinion on the master; will resume that when replication completes // (note that this may fail; we aren't depending on it) - if (!string.IsNullOrWhiteSpace(configuration.TieBreaker) - && multiplexer.CommandMap.IsAvailable(RedisCommand.DEL)) + if (GetTiebreakerRemovalMessage() is Message tieBreakerRemoval) { - var del = Message.Create(0, CommandFlags.FireAndForget | CommandFlags.NoRedirect, RedisCommand.DEL, (RedisKey)configuration.TieBreaker); - del.SetInternalCall(); -#pragma warning disable CS0618 - server.WriteDirectFireAndForgetSync(del, ResultProcessor.Boolean); -#pragma warning restore CS0618 + tieBreakerRemoval.SetSource(ResultProcessor.Boolean, null); + server.GetBridge(tieBreakerRemoval)?.TryWriteSync(tieBreakerRemoval, server.IsReplica); } + + var replicaOfMsg = CreateReplicaOfMessage(server, master, flags); ExecuteSync(replicaOfMsg, ResultProcessor.DemandOK); // attempt to broadcast a reconfigure message to anybody listening to this server - var channel = multiplexer.ConfigurationChangedChannel; - if (channel != null && multiplexer.CommandMap.IsAvailable(RedisCommand.PUBLISH)) + if (GetConfigChangeMessage() is Message configChangeMessage) { - var pub = Message.Create(-1, CommandFlags.FireAndForget | CommandFlags.NoRedirect, RedisCommand.PUBLISH, (RedisValue)channel, RedisLiterals.Wildcard); - pub.SetInternalCall(); -#pragma warning disable CS0618 - server.WriteDirectFireAndForgetSync(pub, ResultProcessor.Int64); -#pragma warning restore CS0618 + configChangeMessage.SetSource(ResultProcessor.Int64, null); + server.GetBridge(configChangeMessage)?.TryWriteSync(configChangeMessage, server.IsReplica); } +#pragma warning restore CS0618 } Task IServer.SlaveOfAsync(EndPoint master, CommandFlags flags) => ReplicaOfAsync(master, flags); - public Task ReplicaOfAsync(EndPoint master, CommandFlags flags = CommandFlags.None) + public async Task ReplicaOfAsync(EndPoint? master, CommandFlags flags = CommandFlags.None) { - var msg = CreateReplicaOfMessage(server, master, flags); if (master == server.EndPoint) { throw new ArgumentException("Cannot replicate to self"); } - return ExecuteAsync(msg, ResultProcessor.DemandOK); + + // Attempt to cease having an opinion on the primary - will resume that when replication completes + // (note that this may fail - we aren't depending on it) + if (GetTiebreakerRemovalMessage() is Message tieBreakerRemoval && !server.IsReplica) + { + try + { + await server.WriteDirectAsync(tieBreakerRemoval, ResultProcessor.Boolean).ForAwait(); + } + catch { } + } + + var msg = CreateReplicaOfMessage(server, master, flags); + await ExecuteAsync(msg, ResultProcessor.DemandOK).ForAwait(); + + // attempt to broadcast a reconfigure message to anybody listening to this server + if (GetConfigChangeMessage() is Message configChangeMessage) + { + await server.WriteDirectAsync(configChangeMessage, ResultProcessor.Int64).ForAwait(); + } } - private void FixFlags(Message message, ServerEndPoint server) + private static void FixFlags(Message? message, ServerEndPoint server) { + if (message is null) + { + return; + } + // since the server is specified explicitly, we don't want defaults // to make the "non-preferred-endpoint" counters look artificially // inflated; note we only change *prefer* options - switch (Message.GetMasterReplicaFlags(message.Flags)) + switch (Message.GetPrimaryReplicaFlags(message.Flags)) { case CommandFlags.PreferMaster: if (server.IsReplica) message.SetPreferReplica(); break; case CommandFlags.PreferReplica: - if (!server.IsReplica) message.SetPreferMaster(); + if (!server.IsReplica) message.SetPreferPrimary(); break; } } - private Message GetSaveMessage(SaveType type, CommandFlags flags = CommandFlags.None) + private static Message GetSaveMessage(SaveType type, CommandFlags flags = CommandFlags.None) => type switch { - switch (type) - { - case SaveType.BackgroundRewriteAppendOnlyFile: return Message.Create(-1, flags, RedisCommand.BGREWRITEAOF); - case SaveType.BackgroundSave: return Message.Create(-1, flags, RedisCommand.BGSAVE); -#pragma warning disable 0618 - case SaveType.ForegroundSave: return Message.Create(-1, flags, RedisCommand.SAVE); -#pragma warning restore 0618 - default: throw new ArgumentOutOfRangeException(nameof(type)); - } - } + SaveType.BackgroundRewriteAppendOnlyFile => Message.Create(-1, flags, RedisCommand.BGREWRITEAOF), + SaveType.BackgroundSave => Message.Create(-1, flags, RedisCommand.BGSAVE), +#pragma warning disable CS0618 // Type or member is obsolete + SaveType.ForegroundSave => Message.Create(-1, flags, RedisCommand.SAVE), +#pragma warning restore CS0618 + _ => throw new ArgumentOutOfRangeException(nameof(type)), + }; - private ResultProcessor GetSaveResultProcessor(SaveType type) + private static ResultProcessor GetSaveResultProcessor(SaveType type) => type switch { - switch (type) - { - case SaveType.BackgroundRewriteAppendOnlyFile: return ResultProcessor.DemandOK; - case SaveType.BackgroundSave: return ResultProcessor.BackgroundSaveStarted; -#pragma warning disable 0618 - case SaveType.ForegroundSave: return ResultProcessor.DemandOK; -#pragma warning restore 0618 - default: throw new ArgumentOutOfRangeException(nameof(type)); - } - } + SaveType.BackgroundRewriteAppendOnlyFile => ResultProcessor.BackgroundSaveAOFStarted, + SaveType.BackgroundSave => ResultProcessor.BackgroundSaveStarted, +#pragma warning disable CS0618 // Type or member is obsolete + SaveType.ForegroundSave => ResultProcessor.DemandOK, +#pragma warning restore CS0618 + _ => throw new ArgumentOutOfRangeException(nameof(type)), + }; private static class ScriptHash { public static RedisValue Encode(byte[] value) { const string hex = "0123456789abcdef"; - if (value == null) return default(RedisValue); + if (value == null) + { + return default; + } var result = new byte[value.Length * 2]; int offset = 0; for (int i = 0; i < value.Length; i++) @@ -726,7 +841,7 @@ public static RedisValue Encode(byte[] value) public static RedisValue Hash(string value) { - if (value == null) return default(RedisValue); + if (value is null) return default; using (var sha1 = SHA1.Create()) { var bytes = sha1.ComputeHash(Encoding.UTF8.GetBytes(value)); @@ -774,16 +889,16 @@ private protected override Message CreateMessage(in RedisValue cursor) private protected override ResultProcessor Processor => processor; public static readonly ResultProcessor processor = new ScanResultProcessor(); - private class ScanResultProcessor : ResultProcessor + private sealed class ScanResultProcessor : ResultProcessor { protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { - switch (result.Type) + switch (result.Resp2TypeArray) { - case ResultType.MultiBulk: + case ResultType.Array: var arr = result.GetItems(); RawResult inner; - if (arr.Length == 2 && (inner = arr[1]).Type == ResultType.MultiBulk) + if (arr.Length == 2 && (inner = arr[1]).Resp2TypeArray == ResultType.Array) { var items = inner.GetItems(); RedisKey[] keys; @@ -810,54 +925,52 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes } } - #region Sentinel - - public EndPoint SentinelGetMasterAddressByName(string serviceName, CommandFlags flags = CommandFlags.None) + public EndPoint? SentinelGetMasterAddressByName(string serviceName, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(-1, flags, RedisCommand.SENTINEL, RedisLiterals.GETMASTERADDRBYNAME, (RedisValue)serviceName); - return ExecuteSync(msg, ResultProcessor.SentinelMasterEndpoint); + return ExecuteSync(msg, ResultProcessor.SentinelPrimaryEndpoint); } - public Task SentinelGetMasterAddressByNameAsync(string serviceName, CommandFlags flags = CommandFlags.None) + public Task SentinelGetMasterAddressByNameAsync(string serviceName, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(-1, flags, RedisCommand.SENTINEL, RedisLiterals.GETMASTERADDRBYNAME, (RedisValue)serviceName); - return ExecuteAsync(msg, ResultProcessor.SentinelMasterEndpoint); + return ExecuteAsync(msg, ResultProcessor.SentinelPrimaryEndpoint); } public EndPoint[] SentinelGetSentinelAddresses(string serviceName, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(-1, flags, RedisCommand.SENTINEL, RedisLiterals.SENTINELS, (RedisValue)serviceName); - return ExecuteSync(msg, ResultProcessor.SentinelAddressesEndPoints); + return ExecuteSync(msg, ResultProcessor.SentinelAddressesEndPoints, defaultValue: Array.Empty()); } public Task SentinelGetSentinelAddressesAsync(string serviceName, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(-1, flags, RedisCommand.SENTINEL, RedisLiterals.SENTINELS, (RedisValue)serviceName); - return ExecuteAsync(msg, ResultProcessor.SentinelAddressesEndPoints); + return ExecuteAsync(msg, ResultProcessor.SentinelAddressesEndPoints, defaultValue: Array.Empty()); } public EndPoint[] SentinelGetReplicaAddresses(string serviceName, CommandFlags flags = CommandFlags.None) { - var msg = Message.Create(-1, flags, RedisCommand.SENTINEL, RedisLiterals.SLAVES, (RedisValue)serviceName); - return ExecuteSync(msg, ResultProcessor.SentinelAddressesEndPoints); + var msg = Message.Create(-1, flags, RedisCommand.SENTINEL, Features.ReplicaCommands ? RedisLiterals.REPLICAS : RedisLiterals.SLAVES, (RedisValue)serviceName); + return ExecuteSync(msg, ResultProcessor.SentinelAddressesEndPoints, defaultValue: Array.Empty()); } public Task SentinelGetReplicaAddressesAsync(string serviceName, CommandFlags flags = CommandFlags.None) { - var msg = Message.Create(-1, flags, RedisCommand.SENTINEL, RedisLiterals.SLAVES, (RedisValue)serviceName); - return ExecuteAsync(msg, ResultProcessor.SentinelAddressesEndPoints); + var msg = Message.Create(-1, flags, RedisCommand.SENTINEL, Features.ReplicaCommands ? RedisLiterals.REPLICAS : RedisLiterals.SLAVES, (RedisValue)serviceName); + return ExecuteAsync(msg, ResultProcessor.SentinelAddressesEndPoints, defaultValue: Array.Empty()); } public KeyValuePair[] SentinelMaster(string serviceName, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(-1, flags, RedisCommand.SENTINEL, RedisLiterals.MASTER, (RedisValue)serviceName); - return ExecuteSync(msg, ResultProcessor.StringPairInterleaved); + return ExecuteSync(msg, ResultProcessor.StringPairInterleaved, defaultValue: Array.Empty>()); } public Task[]> SentinelMasterAsync(string serviceName, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(-1, flags, RedisCommand.SENTINEL, RedisLiterals.MASTER, (RedisValue)serviceName); - return ExecuteAsync(msg, ResultProcessor.StringPairInterleaved); + return ExecuteAsync(msg, ResultProcessor.StringPairInterleaved, defaultValue: Array.Empty>()); } public void SentinelFailover(string serviceName, CommandFlags flags = CommandFlags.None) @@ -875,13 +988,13 @@ public Task SentinelFailoverAsync(string serviceName, CommandFlags flags = Comma public KeyValuePair[][] SentinelMasters(CommandFlags flags = CommandFlags.None) { var msg = Message.Create(-1, flags, RedisCommand.SENTINEL, RedisLiterals.MASTERS); - return ExecuteSync(msg, ResultProcessor.SentinelArrayOfArrays); + return ExecuteSync(msg, ResultProcessor.SentinelArrayOfArrays, defaultValue: Array.Empty[]>()); } public Task[][]> SentinelMastersAsync(CommandFlags flags = CommandFlags.None) { var msg = Message.Create(-1, flags, RedisCommand.SENTINEL, RedisLiterals.MASTERS); - return ExecuteAsync(msg, ResultProcessor.SentinelArrayOfArrays); + return ExecuteAsync(msg, ResultProcessor.SentinelArrayOfArrays, defaultValue: Array.Empty[]>()); } // For previous compat only @@ -890,9 +1003,8 @@ KeyValuePair[][] IServer.SentinelSlaves(string serviceName, Comm public KeyValuePair[][] SentinelReplicas(string serviceName, CommandFlags flags = CommandFlags.None) { - // note: sentinel does not have "replicas" terminology at the current time - var msg = Message.Create(-1, flags, RedisCommand.SENTINEL, RedisLiterals.SLAVES, (RedisValue)serviceName); - return ExecuteSync(msg, ResultProcessor.SentinelArrayOfArrays); + var msg = Message.Create(-1, flags, RedisCommand.SENTINEL, Features.ReplicaCommands ? RedisLiterals.REPLICAS : RedisLiterals.SLAVES, (RedisValue)serviceName); + return ExecuteSync(msg, ResultProcessor.SentinelArrayOfArrays, defaultValue: Array.Empty[]>()); } // For previous compat only @@ -901,31 +1013,28 @@ Task[][]> IServer.SentinelSlavesAsync(string servic public Task[][]> SentinelReplicasAsync(string serviceName, CommandFlags flags = CommandFlags.None) { - // note: sentinel does not have "replicas" terminology at the current time - var msg = Message.Create(-1, flags, RedisCommand.SENTINEL, RedisLiterals.SLAVES, (RedisValue)serviceName); - return ExecuteAsync(msg, ResultProcessor.SentinelArrayOfArrays); + var msg = Message.Create(-1, flags, RedisCommand.SENTINEL, Features.ReplicaCommands ? RedisLiterals.REPLICAS : RedisLiterals.SLAVES, (RedisValue)serviceName); + return ExecuteAsync(msg, ResultProcessor.SentinelArrayOfArrays, defaultValue: Array.Empty[]>()); } public KeyValuePair[][] SentinelSentinels(string serviceName, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(-1, flags, RedisCommand.SENTINEL, RedisLiterals.SENTINELS, (RedisValue)serviceName); - return ExecuteSync(msg, ResultProcessor.SentinelArrayOfArrays); + return ExecuteSync(msg, ResultProcessor.SentinelArrayOfArrays, defaultValue: Array.Empty[]>()); } public Task[][]> SentinelSentinelsAsync(string serviceName, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(-1, flags, RedisCommand.SENTINEL, RedisLiterals.SENTINELS, (RedisValue)serviceName); - return ExecuteAsync(msg, ResultProcessor.SentinelArrayOfArrays); + return ExecuteAsync(msg, ResultProcessor.SentinelArrayOfArrays, defaultValue: Array.Empty[]>()); } - #endregion - public RedisResult Execute(string command, params object[] args) => Execute(command, args, CommandFlags.None); public RedisResult Execute(string command, ICollection args, CommandFlags flags = CommandFlags.None) { var msg = new RedisDatabase.ExecuteMessage(multiplexer?.CommandMap, -1, flags, command, args); - return ExecuteSync(msg, ResultProcessor.ScriptResult); + return ExecuteSync(msg, ResultProcessor.ScriptResult, defaultValue: RedisResult.NullSingle); } public Task ExecuteAsync(string command, params object[] args) => ExecuteAsync(command, args, CommandFlags.None); @@ -933,27 +1042,41 @@ public RedisResult Execute(string command, ICollection args, CommandFlag public Task ExecuteAsync(string command, ICollection args, CommandFlags flags = CommandFlags.None) { var msg = new RedisDatabase.ExecuteMessage(multiplexer?.CommandMap, -1, flags, command, args); - return ExecuteAsync(msg, ResultProcessor.ScriptResult); + return ExecuteAsync(msg, ResultProcessor.ScriptResult, defaultValue: RedisResult.NullSingle); + } + + public RedisResult Execute(int? database, string command, ICollection args, CommandFlags flags = CommandFlags.None) + { + var db = multiplexer.ApplyDefaultDatabase(database ?? -1); + var msg = new RedisDatabase.ExecuteMessage(multiplexer?.CommandMap, db, flags, command, args); + return ExecuteSync(msg, ResultProcessor.ScriptResult, defaultValue: RedisResult.NullSingle); + } + + public Task ExecuteAsync(int? database, string command, ICollection args, CommandFlags flags = CommandFlags.None) + { + var db = multiplexer.ApplyDefaultDatabase(database ?? -1); + var msg = new RedisDatabase.ExecuteMessage(multiplexer?.CommandMap, db, flags, command, args); + return ExecuteAsync(msg, ResultProcessor.ScriptResult, defaultValue: RedisResult.NullSingle); } /// - /// For testing only + /// For testing only. /// - internal void SimulateConnectionFailure() => server.SimulateConnectionFailure(); + internal void SimulateConnectionFailure(SimulatedFailureType failureType) => server.SimulateConnectionFailure(failureType); public Task LatencyDoctorAsync(CommandFlags flags = CommandFlags.None) { var msg = Message.Create(-1, flags, RedisCommand.LATENCY, RedisLiterals.DOCTOR); - return ExecuteAsync(msg, ResultProcessor.String); + return ExecuteAsync(msg, ResultProcessor.String!, defaultValue: string.Empty); } public string LatencyDoctor(CommandFlags flags = CommandFlags.None) { var msg = Message.Create(-1, flags, RedisCommand.LATENCY, RedisLiterals.DOCTOR); - return ExecuteSync(msg, ResultProcessor.String); + return ExecuteSync(msg, ResultProcessor.String, defaultValue: string.Empty); } - private static Message LatencyResetCommand(string[] eventNames, CommandFlags flags) + private static Message LatencyResetCommand(string[]? eventNames, CommandFlags flags) { if (eventNames == null) eventNames = Array.Empty(); switch (eventNames.Length) @@ -968,16 +1091,15 @@ private static Message LatencyResetCommand(string[] eventNames, CommandFlags fla for (int i = 0; i < eventNames.Length; i++) arr[i + 1] = eventNames[i]; return Message.Create(-1, flags, RedisCommand.LATENCY, arr); - } } - public Task LatencyResetAsync(string[] eventNames = null, CommandFlags flags = CommandFlags.None) + public Task LatencyResetAsync(string[]? eventNames = null, CommandFlags flags = CommandFlags.None) { var msg = LatencyResetCommand(eventNames, flags); return ExecuteAsync(msg, ResultProcessor.Int64); } - public long LatencyReset(string[] eventNames = null, CommandFlags flags = CommandFlags.None) + public long LatencyReset(string[]? eventNames = null, CommandFlags flags = CommandFlags.None) { var msg = LatencyResetCommand(eventNames, flags); return ExecuteSync(msg, ResultProcessor.Int64); @@ -986,37 +1108,37 @@ public long LatencyReset(string[] eventNames = null, CommandFlags flags = Comman public Task LatencyHistoryAsync(string eventName, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(-1, flags, RedisCommand.LATENCY, RedisLiterals.HISTORY, (RedisValue)eventName); - return ExecuteAsync(msg, LatencyHistoryEntry.ToArray); + return ExecuteAsync(msg, LatencyHistoryEntry.ToArray, defaultValue: Array.Empty()); } public LatencyHistoryEntry[] LatencyHistory(string eventName, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(-1, flags, RedisCommand.LATENCY, RedisLiterals.HISTORY, (RedisValue)eventName); - return ExecuteSync(msg, LatencyHistoryEntry.ToArray); + return ExecuteSync(msg, LatencyHistoryEntry.ToArray, defaultValue: Array.Empty()); } public Task LatencyLatestAsync(CommandFlags flags = CommandFlags.None) { var msg = Message.Create(-1, flags, RedisCommand.LATENCY, RedisLiterals.LATEST); - return ExecuteAsync(msg, LatencyLatestEntry.ToArray); + return ExecuteAsync(msg, LatencyLatestEntry.ToArray, defaultValue: Array.Empty()); } public LatencyLatestEntry[] LatencyLatest(CommandFlags flags = CommandFlags.None) { var msg = Message.Create(-1, flags, RedisCommand.LATENCY, RedisLiterals.LATEST); - return ExecuteSync(msg, LatencyLatestEntry.ToArray); + return ExecuteSync(msg, LatencyLatestEntry.ToArray, defaultValue: Array.Empty()); } public Task MemoryDoctorAsync(CommandFlags flags = CommandFlags.None) { var msg = Message.Create(-1, flags, RedisCommand.MEMORY, RedisLiterals.DOCTOR); - return ExecuteAsync(msg, ResultProcessor.String); + return ExecuteAsync(msg, ResultProcessor.String!, defaultValue: string.Empty); } public string MemoryDoctor(CommandFlags flags = CommandFlags.None) { var msg = Message.Create(-1, flags, RedisCommand.MEMORY, RedisLiterals.DOCTOR); - return ExecuteSync(msg, ResultProcessor.String); + return ExecuteSync(msg, ResultProcessor.String, defaultValue: string.Empty); } public Task MemoryPurgeAsync(CommandFlags flags = CommandFlags.None) @@ -1031,13 +1153,13 @@ public void MemoryPurge(CommandFlags flags = CommandFlags.None) ExecuteSync(msg, ResultProcessor.DemandOK); } - public Task MemoryAllocatorStatsAsync(CommandFlags flags = CommandFlags.None) + public Task MemoryAllocatorStatsAsync(CommandFlags flags = CommandFlags.None) { var msg = Message.Create(-1, flags, RedisCommand.MEMORY, RedisLiterals.MALLOC_STATS); return ExecuteAsync(msg, ResultProcessor.String); } - public string MemoryAllocatorStats(CommandFlags flags = CommandFlags.None) + public string? MemoryAllocatorStats(CommandFlags flags = CommandFlags.None) { var msg = Message.Create(-1, flags, RedisCommand.MEMORY, RedisLiterals.MALLOC_STATS); return ExecuteSync(msg, ResultProcessor.String); @@ -1046,13 +1168,13 @@ public string MemoryAllocatorStats(CommandFlags flags = CommandFlags.None) public Task MemoryStatsAsync(CommandFlags flags = CommandFlags.None) { var msg = Message.Create(-1, flags, RedisCommand.MEMORY, RedisLiterals.STATS); - return ExecuteAsync(msg, ResultProcessor.ScriptResult); + return ExecuteAsync(msg, ResultProcessor.ScriptResult, defaultValue: RedisResult.NullArray); } public RedisResult MemoryStats(CommandFlags flags = CommandFlags.None) { var msg = Message.Create(-1, flags, RedisCommand.MEMORY, RedisLiterals.STATS); - return ExecuteSync(msg, ResultProcessor.ScriptResult); + return ExecuteSync(msg, ResultProcessor.ScriptResult, defaultValue: RedisResult.NullArray); } } } diff --git a/src/StackExchange.Redis/RedisStream.cs b/src/StackExchange.Redis/RedisStream.cs deleted file mode 100644 index 6dc6ac1e5..000000000 --- a/src/StackExchange.Redis/RedisStream.cs +++ /dev/null @@ -1,24 +0,0 @@ -namespace StackExchange.Redis -{ - /// - /// Describes a Redis Stream with an associated array of entries. - /// - public readonly struct RedisStream - { - internal RedisStream(RedisKey key, StreamEntry[] entries) - { - Key = key; - Entries = entries; - } - - /// - /// The key for the stream. - /// - public RedisKey Key { get; } - - /// - /// An arry of entries contained within the stream. - /// - public StreamEntry[] Entries { get; } - } -} diff --git a/src/StackExchange.Redis/RedisSubscriber.cs b/src/StackExchange.Redis/RedisSubscriber.cs index 40684f105..bd2434771 100644 --- a/src/StackExchange.Redis/RedisSubscriber.cs +++ b/src/StackExchange.Redis/RedisSubscriber.cs @@ -1,57 +1,54 @@ using System; -using System.Collections.Generic; -using System.Diagnostics; +using System.Collections.Concurrent; +using System.Diagnostics.CodeAnalysis; using System.Net; -using System.Runtime.CompilerServices; -using System.Threading; using System.Threading.Tasks; -using Pipelines.Sockets.Unofficial; +using Pipelines.Sockets.Unofficial.Arenas; +using static StackExchange.Redis.ConnectionMultiplexer; namespace StackExchange.Redis { public partial class ConnectionMultiplexer { - private readonly Dictionary subscriptions = new Dictionary(); + private RedisSubscriber? _defaultSubscriber; + internal RedisSubscriber DefaultSubscriber => _defaultSubscriber ??= new RedisSubscriber(this, null); - internal static void CompleteAsWorker(ICompletable completable) - { - if (completable != null) ThreadPool.QueueUserWorkItem(s_CompleteAsWorker, completable); - } + private readonly ConcurrentDictionary subscriptions = new(); + + internal ConcurrentDictionary GetSubscriptions() => subscriptions; + ConcurrentDictionary IInternalConnectionMultiplexer.GetSubscriptions() => GetSubscriptions(); - private static readonly WaitCallback s_CompleteAsWorker = s => ((ICompletable)s).TryComplete(true); + internal int GetSubscriptionsCount() => subscriptions.Count; + int IInternalConnectionMultiplexer.GetSubscriptionsCount() => GetSubscriptionsCount(); - internal static bool TryCompleteHandler(EventHandler handler, object sender, T args, bool isAsync) where T : EventArgs, ICompletable + internal Subscription GetOrAddSubscription(in RedisChannel channel, CommandFlags flags) { - if (handler == null) return true; - if (isAsync) + lock (subscriptions) { - if (handler.IsSingle()) + if (!subscriptions.TryGetValue(channel, out var sub)) { - try { handler(sender, args); } catch { } + sub = channel.IsMultiNode ? new MultiNodeSubscription(flags) : new SingleNodeSubscription(flags); + subscriptions.TryAdd(channel, sub); } - else - { - foreach (EventHandler sub in handler.AsEnumerable()) - { - try { sub(sender, args); } catch { } - } - } - return true; + return sub; } - else + } + internal bool TryGetSubscription(in RedisChannel channel, [NotNullWhen(true)] out Subscription? sub) => subscriptions.TryGetValue(channel, out sub); + internal bool TryRemoveSubscription(in RedisChannel channel, [NotNullWhen(true)] out Subscription? sub) + { + lock (subscriptions) { - return false; + return subscriptions.TryRemove(channel, out sub); } } + /// + /// Gets the subscriber counts for a channel. + /// + /// if there's a subscription registered at all. internal bool GetSubscriberCounts(in RedisChannel channel, out int handlers, out int queues) { - Subscription sub; - lock (subscriptions) - { - if (!subscriptions.TryGetValue(channel, out sub)) sub = null; - } - if (sub != null) + if (subscriptions.TryGetValue(channel, out var sub)) { sub.GetSubscriberCounts(out handlers, out queues); return true; @@ -60,422 +57,204 @@ internal bool GetSubscriberCounts(in RedisChannel channel, out int handlers, out return false; } - internal Task AddSubscription(in RedisChannel channel, Action handler, ChannelMessageQueue queue, CommandFlags flags, object asyncState) + /// + /// Gets which server, if any, there's a registered subscription to for this channel. + /// + /// + /// This may be null if there is a subscription, but we don't have a connected server at the moment. + /// This behavior is fine but IsConnected checks, but is a subtle difference in . + /// + internal ServerEndPoint? GetSubscribedServer(in RedisChannel channel) { - Task task = null; - if (handler != null | queue != null) + if (!channel.IsNullOrEmpty && subscriptions.TryGetValue(channel, out Subscription? sub)) { - lock (subscriptions) - { - if (!subscriptions.TryGetValue(channel, out Subscription sub)) - { - sub = new Subscription(); - subscriptions.Add(channel, sub); - task = sub.SubscribeToServer(this, channel, flags, asyncState, false); - } - sub.Add(handler, queue); - } - } - return task ?? CompletedTask.Default(asyncState); - } - - internal ServerEndPoint GetSubscribedServer(in RedisChannel channel) - { - if (!channel.IsNullOrEmpty) - { - lock (subscriptions) - { - if (subscriptions.TryGetValue(channel, out Subscription sub)) - { - return sub.GetOwner(); - } - } + return sub.GetAnyCurrentServer(); } return null; } + /// + /// Handler that executes whenever a message comes in, this doles out messages to any registered handlers. + /// internal void OnMessage(in RedisChannel subscription, in RedisChannel channel, in RedisValue payload) { - ICompletable completable = null; - ChannelMessageQueue queues = null; - Subscription sub; - lock (subscriptions) + ICompletable? completable = null; + ChannelMessageQueue? queues = null; + if (subscriptions.TryGetValue(subscription, out Subscription? sub)) { - if (subscriptions.TryGetValue(subscription, out sub)) - { - completable = sub.ForInvoke(channel, payload, out queues); - } + completable = sub.ForInvoke(channel, payload, out queues); } - if (queues != null) ChannelMessageQueue.WriteAll(ref queues, channel, payload); - if (completable != null && !completable.TryComplete(false)) ConnectionMultiplexer.CompleteAsWorker(completable); - } - - internal Task RemoveAllSubscriptions(CommandFlags flags, object asyncState) - { - Task last = null; - lock (subscriptions) + if (queues != null) { - foreach (var pair in subscriptions) - { - pair.Value.MarkCompleted(); - var task = pair.Value.UnsubscribeFromServer(pair.Key, flags, asyncState, false); - if (task != null) last = task; - } - subscriptions.Clear(); + ChannelMessageQueue.WriteAll(ref queues, channel, payload); } - return last ?? CompletedTask.Default(asyncState); - } - - internal Task RemoveSubscription(in RedisChannel channel, Action handler, ChannelMessageQueue queue, CommandFlags flags, object asyncState) - { - Task task = null; - lock (subscriptions) + if (completable != null && !completable.TryComplete(false)) { - if (subscriptions.TryGetValue(channel, out Subscription sub)) - { - bool remove; - if (handler == null & queue == null) // blanket wipe - { - sub.MarkCompleted(); - remove = true; - } - else - { - remove = sub.Remove(handler, queue); - } - if (remove) - { - subscriptions.Remove(channel); - task = sub.UnsubscribeFromServer(channel, flags, asyncState, false); - } - } + CompleteAsWorker(completable); } - return task ?? CompletedTask.Default(asyncState); } - internal void ResendSubscriptions(ServerEndPoint server) + internal void OnMessage(in RedisChannel subscription, in RedisChannel channel, Sequence payload) { - if (server == null) return; - lock (subscriptions) + if (payload.IsSingleSegment) { - foreach (var pair in subscriptions) + foreach (var message in payload.FirstSpan) { - pair.Value.Resubscribe(pair.Key, server); + OnMessage(subscription, channel, message.AsRedisValue()); } } - } - - internal bool SubscriberConnected(in RedisChannel channel = default(RedisChannel)) - { - var server = GetSubscribedServer(channel); - if (server != null) return server.IsConnected; - - server = SelectServer(RedisCommand.SUBSCRIBE, CommandFlags.DemandMaster, default(RedisKey)); - return server?.IsConnected == true; - } - - internal long ValidateSubscriptions() - { - lock (subscriptions) + else { - long count = 0; - foreach (var pair in subscriptions) + foreach (var message in payload) { - if (pair.Value.Validate(this, pair.Key)) count++; + OnMessage(subscription, channel, message.AsRedisValue()); } - return count; } } - internal sealed class Subscription + /// + /// Updates all subscriptions re-evaluating their state. + /// This clears the current server if it's not connected, prepping them to reconnect. + /// + internal void UpdateSubscriptions() { - private Action _handlers; - private ChannelMessageQueue _queues; - private ServerEndPoint owner; - - public void Add(Action handler, ChannelMessageQueue queue) - { - if (handler != null) _handlers += handler; - if (queue != null) ChannelMessageQueue.Combine(ref _queues, queue); - } - - public ICompletable ForInvoke(in RedisChannel channel, in RedisValue message, out ChannelMessageQueue queues) + foreach (var pair in subscriptions) { - var handlers = _handlers; - queues = Volatile.Read(ref _queues); - return handlers == null ? null : new MessageCompletable(channel, message, handlers); - } - - internal void MarkCompleted() - { - _handlers = null; - ChannelMessageQueue.MarkAllCompleted(ref _queues); - } - - public bool Remove(Action handler, ChannelMessageQueue queue) - { - if (handler != null) _handlers -= handler; - if (queue != null) ChannelMessageQueue.Remove(ref _queues, queue); - return _handlers == null & _queues == null; + pair.Value.RemoveDisconnectedEndpoints(); } + } - public Task SubscribeToServer(ConnectionMultiplexer multiplexer, in RedisChannel channel, CommandFlags flags, object asyncState, bool internalCall) + /// + /// Ensures all subscriptions are connected to a server, if possible. + /// + /// The count of subscriptions attempting to reconnect (same as the count currently not connected). + internal long EnsureSubscriptions(CommandFlags flags = CommandFlags.None) + { + // TODO: Subscribe with variadic commands to reduce round trips + long count = 0; + var subscriber = DefaultSubscriber; + foreach (var pair in subscriptions) { - var selected = multiplexer.SelectServer(RedisCommand.SUBSCRIBE, flags, default(RedisKey)); - var bridge = selected?.GetBridge(ConnectionType.Subscription, true); - if (bridge == null) return null; - - // note: check we can create the message validly *before* we swap the owner over (Interlocked) - var state = PendingSubscriptionState.Create(channel, this, flags, true, internalCall, asyncState, selected.IsReplica); - - if (Interlocked.CompareExchange(ref owner, selected, null) != null) return null; try { - if (!bridge.TryEnqueueBackgroundSubscriptionWrite(state)) - { - state.Abort(); - return null; - } - return state.Task; + count += pair.Value.EnsureSubscribedToServer(subscriber, pair.Key, flags, true); } - catch - { - // clear the owner if it is still us - Interlocked.CompareExchange(ref owner, null, selected); - throw; - } - } - - public Task UnsubscribeFromServer(in RedisChannel channel, CommandFlags flags, object asyncState, bool internalCall) - { - var oldOwner = Interlocked.Exchange(ref owner, null); - var bridge = oldOwner?.GetBridge(ConnectionType.Subscription, false); - if (bridge == null) return null; - - var state = PendingSubscriptionState.Create(channel, this, flags, false, internalCall, asyncState, oldOwner.IsReplica); - - if (!bridge.TryEnqueueBackgroundSubscriptionWrite(state)) - { - state.Abort(); - return null; - } - return state.Task; - } - - internal readonly struct PendingSubscriptionState - { - public override string ToString() => Message.ToString(); - public Subscription Subscription { get; } - public Message Message { get; } - public bool IsReplica { get; } - public Task Task => _taskSource.Task; - private readonly TaskCompletionSource _taskSource; - - public static PendingSubscriptionState Create(RedisChannel channel, Subscription subscription, CommandFlags flags, bool subscribe, bool internalCall, object asyncState, bool isReplica) - => new PendingSubscriptionState(asyncState, channel, subscription, flags, subscribe, internalCall, isReplica); - - public void Abort() => _taskSource.TrySetCanceled(); - public void Fail(Exception ex) => _taskSource.TrySetException(ex); - - private PendingSubscriptionState(object asyncState, RedisChannel channel, Subscription subscription, CommandFlags flags, bool subscribe, bool internalCall, bool isReplica) - { - var cmd = subscribe - ? (channel.IsPatternBased ? RedisCommand.PSUBSCRIBE : RedisCommand.SUBSCRIBE) - : (channel.IsPatternBased ? RedisCommand.PUNSUBSCRIBE : RedisCommand.UNSUBSCRIBE); - var msg = Message.Create(-1, flags, cmd, channel); - if (internalCall) msg.SetInternalCall(); - - var source = TaskResultBox.Create(out _taskSource, asyncState); - msg.SetSource(ResultProcessor.TrackSubscriptions, source); - - Subscription = subscription; - Message = msg; - IsReplica = isReplica; - } - } - - internal ServerEndPoint GetOwner() => Volatile.Read(ref owner); - - internal void Resubscribe(in RedisChannel channel, ServerEndPoint server) - { - if (server != null && Interlocked.CompareExchange(ref owner, server, server) == server) + catch (Exception ex) { - var cmd = channel.IsPatternBased ? RedisCommand.PSUBSCRIBE : RedisCommand.SUBSCRIBE; - var msg = Message.Create(-1, CommandFlags.FireAndForget, cmd, channel); - msg.SetInternalCall(); -#pragma warning disable CS0618 - server.WriteDirectFireAndForgetSync(msg, ResultProcessor.TrackSubscriptions); -#pragma warning restore CS0618 - } - } - - internal bool Validate(ConnectionMultiplexer multiplexer, in RedisChannel channel) - { - bool changed = false; - var oldOwner = Volatile.Read(ref owner); - if (oldOwner != null && !oldOwner.IsSelectable(RedisCommand.PSUBSCRIBE)) - { - if (UnsubscribeFromServer(channel, CommandFlags.FireAndForget, null, true) != null) - { - changed = true; - } - oldOwner = null; - } - if (oldOwner == null && SubscribeToServer(multiplexer, channel, CommandFlags.FireAndForget, null, true) != null) - { - changed = true; - } - return changed; - } - - internal void GetSubscriberCounts(out int handlers, out int queues) - { - queues = ChannelMessageQueue.Count(ref _queues); - var tmp = _handlers; - if (tmp == null) - { - handlers = 0; - } - else if (tmp.IsSingle()) - { - handlers = 1; - } - else - { - handlers = 0; - foreach (var sub in tmp.AsEnumerable()) { handlers++; } + OnInternalError(ex); } } + return count; } - internal string GetConnectionName(EndPoint endPoint, ConnectionType connectionType) - => GetServerEndPoint(endPoint)?.GetBridge(connectionType, false)?.PhysicalName; - - internal event Action MessageFaulted; - internal event Action Closing; - internal event Action PreTransactionExec, TransactionLog, InfoMessage; - internal event Action Connecting; - internal event Action Resurrecting; - - [Conditional("VERBOSE")] - internal void OnMessageFaulted(Message msg, Exception fault, [CallerMemberName] string origin = default, [CallerFilePath] string path = default, [CallerLineNumber] int lineNumber = default) - { - MessageFaulted?.Invoke(msg?.CommandAndKey, fault, $"{origin} ({path}#{lineNumber})"); - } - [Conditional("VERBOSE")] - internal void OnInfoMessage(string message) + internal enum SubscriptionAction { - InfoMessage?.Invoke(message); - } - [Conditional("VERBOSE")] - internal void OnClosing(bool complete) - { - Closing?.Invoke(complete); - } - [Conditional("VERBOSE")] - internal void OnConnecting(EndPoint endpoint, ConnectionType connectionType) - { - Connecting?.Invoke(endpoint, connectionType); - } - [Conditional("VERBOSE")] - internal void OnResurrecting(EndPoint endpoint, ConnectionType connectionType) - { - Resurrecting.Invoke(endpoint, connectionType); - } - [Conditional("VERBOSE")] - internal void OnPreTransactionExec(Message message) - { - PreTransactionExec?.Invoke(message.CommandAndKey); - } - [Conditional("VERBOSE")] - internal void OnTransactionLog(string message) - { - TransactionLog?.Invoke(message); + Subscribe, + Unsubscribe, } } + /// + /// A wrapper for subscription actions. + /// + /// + /// By having most functionality here and state on , we can + /// use the baseline execution methods to take the normal message paths. + /// internal sealed class RedisSubscriber : RedisBase, ISubscriber { - internal RedisSubscriber(ConnectionMultiplexer multiplexer, object asyncState) : base(multiplexer, asyncState) + internal RedisSubscriber(ConnectionMultiplexer multiplexer, object? asyncState) : base(multiplexer, asyncState) { } - public EndPoint IdentifyEndpoint(RedisChannel channel, CommandFlags flags = CommandFlags.None) + public EndPoint? IdentifyEndpoint(RedisChannel channel, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(-1, flags, RedisCommand.PUBSUB, RedisLiterals.NUMSUB, channel); msg.SetInternalCall(); return ExecuteSync(msg, ResultProcessor.ConnectionIdentity); } - public Task IdentifyEndpointAsync(RedisChannel channel, CommandFlags flags = CommandFlags.None) + public Task IdentifyEndpointAsync(RedisChannel channel, CommandFlags flags = CommandFlags.None) { var msg = Message.Create(-1, flags, RedisCommand.PUBSUB, RedisLiterals.NUMSUB, channel); msg.SetInternalCall(); return ExecuteAsync(msg, ResultProcessor.ConnectionIdentity); } - public bool IsConnected(RedisChannel channel = default(RedisChannel)) + /// + /// This is *could* we be connected, as in "what's the theoretical endpoint for this channel?", + /// rather than if we're actually connected and actually listening on that channel. + /// + public bool IsConnected(RedisChannel channel = default) { - return multiplexer.SubscriberConnected(channel); + var server = multiplexer.GetSubscribedServer(channel) ?? multiplexer.SelectServer(RedisCommand.SUBSCRIBE, CommandFlags.DemandMaster, channel); + return server?.IsConnected == true && server.IsSubscriberConnected; } public override TimeSpan Ping(CommandFlags flags = CommandFlags.None) { - var msg = CreatePingMessage(flags, out var server); - return ExecuteSync(msg, ResultProcessor.ResponseTimer, server); + var msg = CreatePingMessage(flags); + return ExecuteSync(msg, ResultProcessor.ResponseTimer); } public override Task PingAsync(CommandFlags flags = CommandFlags.None) { - var msg = CreatePingMessage(flags, out var server); - return ExecuteAsync(msg, ResultProcessor.ResponseTimer, server); + var msg = CreatePingMessage(flags); + return ExecuteAsync(msg, ResultProcessor.ResponseTimer); } - private Message CreatePingMessage(CommandFlags flags, out ServerEndPoint server) + private Message CreatePingMessage(CommandFlags flags) { bool usePing = false; - server = null; if (multiplexer.CommandMap.IsAvailable(RedisCommand.PING)) { - try { usePing = GetFeatures(default, flags, out server).PingOnSubscriber; } + try { usePing = GetFeatures(default, flags, RedisCommand.PING, out _).PingOnSubscriber; } catch { } } + Message msg; if (usePing) { - return ResultProcessor.TimingProcessor.CreateMessage(-1, flags, RedisCommand.PING); + msg = ResultProcessor.TimingProcessor.CreateMessage(-1, flags, RedisCommand.PING); } else { // can't use regular PING, but we can unsubscribe from something random that we weren't even subscribed to... RedisValue channel = multiplexer.UniqueId; - return ResultProcessor.TimingProcessor.CreateMessage(-1, flags, RedisCommand.UNSUBSCRIBE, channel); + msg = ResultProcessor.TimingProcessor.CreateMessage(-1, flags, RedisCommand.UNSUBSCRIBE, channel); + } + // Ensure the ping is sent over the intended subscriber connection, which wouldn't happen in GetBridge() by default with PING; + msg.SetForSubscriptionBridge(); + return msg; + } + + private static void ThrowIfNull(in RedisChannel channel) + { + if (channel.IsNullOrEmpty) + { + throw new ArgumentNullException(nameof(channel)); } } public long Publish(RedisChannel channel, RedisValue message, CommandFlags flags = CommandFlags.None) { - if (channel.IsNullOrEmpty) throw new ArgumentNullException(nameof(channel)); - var msg = Message.Create(-1, flags, RedisCommand.PUBLISH, channel, message); - return ExecuteSync(msg, ResultProcessor.Int64); + ThrowIfNull(channel); + var msg = Message.Create(-1, flags, channel.GetPublishCommand(), channel, message); + // if we're actively subscribed: send via that connection (otherwise, follow normal rules) + return ExecuteSync(msg, ResultProcessor.Int64, server: multiplexer.GetSubscribedServer(channel)); } public Task PublishAsync(RedisChannel channel, RedisValue message, CommandFlags flags = CommandFlags.None) { - if (channel.IsNullOrEmpty) throw new ArgumentNullException(nameof(channel)); - var msg = Message.Create(-1, flags, RedisCommand.PUBLISH, channel, message); - return ExecuteAsync(msg, ResultProcessor.Int64); + ThrowIfNull(channel); + var msg = Message.Create(-1, flags, channel.GetPublishCommand(), channel, message); + // if we're actively subscribed: send via that connection (otherwise, follow normal rules) + return ExecuteAsync(msg, ResultProcessor.Int64, server: multiplexer.GetSubscribedServer(channel)); } void ISubscriber.Subscribe(RedisChannel channel, Action handler, CommandFlags flags) => Subscribe(channel, handler, null, flags); - public void Subscribe(RedisChannel channel, Action handler, ChannelMessageQueue queue, CommandFlags flags) - { - var task = SubscribeAsync(channel, handler, queue, flags); - if ((flags & CommandFlags.FireAndForget) == 0) Wait(task); - } - public ChannelMessageQueue Subscribe(RedisChannel channel, CommandFlags flags = CommandFlags.None) { var queue = new ChannelMessageQueue(channel, this); @@ -483,56 +262,141 @@ public ChannelMessageQueue Subscribe(RedisChannel channel, CommandFlags flags = return queue; } - Task ISubscriber.SubscribeAsync(RedisChannel channel, Action handler, CommandFlags flags) - => SubscribeAsync(channel, handler, null, flags); + private int Subscribe(RedisChannel channel, Action? handler, ChannelMessageQueue? queue, CommandFlags flags) + { + ThrowIfNull(channel); + if (handler == null && queue == null) { return 0; } - public Task SubscribeAsync(in RedisChannel channel, Action handler, ChannelMessageQueue queue, CommandFlags flags) + var sub = multiplexer.GetOrAddSubscription(channel, flags); + sub.Add(handler, queue); + return sub.EnsureSubscribedToServer(this, channel, flags, false); + } + + internal void ResubscribeToServer(Subscription sub, in RedisChannel channel, ServerEndPoint serverEndPoint, string cause) { - if (channel.IsNullOrEmpty) throw new ArgumentNullException(nameof(channel)); - return multiplexer.AddSubscription(channel, handler, queue, flags, asyncState); + // conditional: only if that's the server we were connected to, or "none"; we don't want to end up duplicated + if (sub.TryRemoveEndpoint(serverEndPoint) || !sub.IsConnectedAny()) + { + if (serverEndPoint.IsSubscriberConnected) + { + // we'll *try* for a simple resubscribe, following any -MOVED etc, but if that fails: fall back + // to full reconfigure; importantly, note that we've already recorded the disconnect + var message = sub.GetSubscriptionMessage(channel, SubscriptionAction.Subscribe, CommandFlags.None, false); + _ = ExecuteAsync(message, sub.Processor, serverEndPoint).ContinueWith( + t => multiplexer.ReconfigureIfNeeded(serverEndPoint.EndPoint, false, cause: cause), + TaskContinuationOptions.OnlyOnFaulted); + } + else + { + multiplexer.ReconfigureIfNeeded(serverEndPoint.EndPoint, false, cause: cause); + } + } } - internal bool GetSubscriberCounts(in RedisChannel channel, out int handlers, out int queues) - => multiplexer.GetSubscriberCounts(channel, out handlers, out queues); + Task ISubscriber.SubscribeAsync(RedisChannel channel, Action handler, CommandFlags flags) + => SubscribeAsync(channel, handler, null, flags); + + Task ISubscriber.SubscribeAsync(RedisChannel channel, CommandFlags flags) => SubscribeAsync(channel, flags); - public async Task SubscribeAsync(RedisChannel channel, CommandFlags flags = CommandFlags.None) + public async Task SubscribeAsync(RedisChannel channel, CommandFlags flags = CommandFlags.None, ServerEndPoint? server = null) { var queue = new ChannelMessageQueue(channel, this); - await SubscribeAsync(channel, null, queue, flags).ForAwait(); + await SubscribeAsync(channel, null, queue, flags, server).ForAwait(); return queue; } - public EndPoint SubscribedEndpoint(RedisChannel channel) + private Task SubscribeAsync(RedisChannel channel, Action? handler, ChannelMessageQueue? queue, CommandFlags flags, ServerEndPoint? server = null) { - var server = multiplexer.GetSubscribedServer(channel); - return server?.EndPoint; + ThrowIfNull(channel); + if (handler == null && queue == null) { return CompletedTask.Default(null); } + + var sub = multiplexer.GetOrAddSubscription(channel, flags); + sub.Add(handler, queue); + return sub.EnsureSubscribedToServerAsync(this, channel, flags, false, server); } - void ISubscriber.Unsubscribe(RedisChannel channel, Action handler, CommandFlags flags) + public EndPoint? SubscribedEndpoint(RedisChannel channel) => multiplexer.GetSubscribedServer(channel)?.EndPoint; + + void ISubscriber.Unsubscribe(RedisChannel channel, Action? handler, CommandFlags flags) => Unsubscribe(channel, handler, null, flags); - public void Unsubscribe(in RedisChannel channel, Action handler, ChannelMessageQueue queue, CommandFlags flags) + + public bool Unsubscribe(in RedisChannel channel, Action? handler, ChannelMessageQueue? queue, CommandFlags flags) { - var task = UnsubscribeAsync(channel, handler, queue, flags); - if ((flags & CommandFlags.FireAndForget) == 0) Wait(task); + ThrowIfNull(channel); + // Unregister the subscription handler/queue, and if that returns true (last handler removed), also disconnect from the server + // ReSharper disable once SimplifyConditionalTernaryExpression + return UnregisterSubscription(channel, handler, queue, out var sub) + ? sub.UnsubscribeFromServer(this, channel, flags, false) + : true; } - public void UnsubscribeAll(CommandFlags flags = CommandFlags.None) + Task ISubscriber.UnsubscribeAsync(RedisChannel channel, Action? handler, CommandFlags flags) + => UnsubscribeAsync(channel, handler, null, flags); + + public Task UnsubscribeAsync(in RedisChannel channel, Action? handler, ChannelMessageQueue? queue, CommandFlags flags) { - var task = UnsubscribeAllAsync(flags); - if ((flags & CommandFlags.FireAndForget) == 0) Wait(task); + ThrowIfNull(channel); + // Unregister the subscription handler/queue, and if that returns true (last handler removed), also disconnect from the server + return UnregisterSubscription(channel, handler, queue, out var sub) + ? sub.UnsubscribeFromServerAsync(this, channel, flags, asyncState, false) + : CompletedTask.Default(asyncState); } - public Task UnsubscribeAllAsync(CommandFlags flags = CommandFlags.None) + /// + /// Unregisters a handler or queue and returns if we should remove it from the server. + /// + /// if we should remove the subscription from the server, otherwise. + private bool UnregisterSubscription(in RedisChannel channel, Action? handler, ChannelMessageQueue? queue, [NotNullWhen(true)] out Subscription? sub) + { + ThrowIfNull(channel); + if (multiplexer.TryGetSubscription(channel, out sub)) + { + if (handler == null & queue == null) + { + // This was a blanket wipe, so clear it completely + sub.MarkCompleted(); + multiplexer.TryRemoveSubscription(channel, out _); + return true; + } + else if (sub.Remove(handler, queue)) + { + // Or this was the last handler and/or queue, which also means unsubscribe + multiplexer.TryRemoveSubscription(channel, out _); + return true; + } + } + return false; + } + + // TODO: We need a new api to support SUNSUBSCRIBE all. Calling this now would unsubscribe both sharded and unsharded channels. + public void UnsubscribeAll(CommandFlags flags = CommandFlags.None) { - return multiplexer.RemoveAllSubscriptions(flags, asyncState); + // TODO: Unsubscribe variadic commands to reduce round trips + var subs = multiplexer.GetSubscriptions(); + foreach (var pair in subs) + { + if (subs.TryRemove(pair.Key, out var sub)) + { + sub.MarkCompleted(); + sub.UnsubscribeFromServer(this, pair.Key, flags, false); + } + } } - Task ISubscriber.UnsubscribeAsync(RedisChannel channel, Action handler, CommandFlags flags) - => UnsubscribeAsync(channel, handler, null, flags); - public Task UnsubscribeAsync(in RedisChannel channel, Action handler, ChannelMessageQueue queue, CommandFlags flags) + public Task UnsubscribeAllAsync(CommandFlags flags = CommandFlags.None) { - if (channel.IsNullOrEmpty) throw new ArgumentNullException(nameof(channel)); - return multiplexer.RemoveSubscription(channel, handler, queue, flags, asyncState); + // TODO: Unsubscribe variadic commands to reduce round trips + Task? last = null; + var subs = multiplexer.GetSubscriptions(); + foreach (var pair in subs) + { + if (subs.TryRemove(pair.Key, out var sub)) + { + sub.MarkCompleted(); + last = sub.UnsubscribeFromServerAsync(this, pair.Key, flags, asyncState, false); + } + } + return last ?? CompletedTask.Default(asyncState); } } } diff --git a/src/StackExchange.Redis/RedisTransaction.cs b/src/StackExchange.Redis/RedisTransaction.cs index 08dd03fc2..f0a9600fa 100644 --- a/src/StackExchange.Redis/RedisTransaction.cs +++ b/src/StackExchange.Redis/RedisTransaction.cs @@ -6,13 +6,13 @@ namespace StackExchange.Redis { - internal class RedisTransaction : RedisDatabase, ITransaction + internal sealed class RedisTransaction : RedisDatabase, ITransaction { - private List _conditions; - private List _pending; + private List? _conditions; + private List? _pending; private object SyncLock => this; - public RedisTransaction(RedisDatabase wrapped, object asyncState) : base(wrapped.multiplexer, wrapped.Database, asyncState ?? wrapped.AsyncState) + public RedisTransaction(RedisDatabase wrapped, object? asyncState) : base(wrapped.multiplexer, wrapped.Database, asyncState ?? wrapped.AsyncState) { // need to check we can reliably do this... var commandMap = multiplexer.CommandMap; @@ -42,26 +42,23 @@ public ConditionResult AddCondition(Condition condition) } } - public void Execute() - { - Execute(CommandFlags.FireAndForget); - } + public void Execute() => Execute(CommandFlags.FireAndForget); public bool Execute(CommandFlags flags) { - var msg = CreateMessage(flags, out ResultProcessor proc); + var msg = CreateMessage(flags, out ResultProcessor? proc); return base.ExecuteSync(msg, proc); // need base to avoid our local "not supported" override } public Task ExecuteAsync(CommandFlags flags) { - var msg = CreateMessage(flags, out ResultProcessor proc); + var msg = CreateMessage(flags, out ResultProcessor? proc); return base.ExecuteAsync(msg, proc); // need base to avoid our local wrapping override } - internal override Task ExecuteAsync(Message message, ResultProcessor processor, ServerEndPoint server = null) + internal override Task ExecuteAsync(Message? message, ResultProcessor? processor, T defaultValue, ServerEndPoint? server = null) { - if (message == null) return CompletedTask.Default(asyncState); + if (message == null) return CompletedTask.FromDefault(defaultValue, asyncState); multiplexer.CheckMessage(message); multiplexer.Trace("Wrapping " + message.Command, "Transaction"); @@ -69,7 +66,7 @@ internal override Task ExecuteAsync(Message message, ResultProcessor pr Task task; if (message.IsFireAndForget) { - task = CompletedTask.Default(null); // F+F explicitly does not get async-state + task = CompletedTask.FromDefault(defaultValue, null); // F+F explicitly does not get async-state } else { @@ -78,6 +75,37 @@ internal override Task ExecuteAsync(Message message, ResultProcessor pr task = tcs.Task; } + QueueMessage(message); + + return task; + } + + internal override Task ExecuteAsync(Message? message, ResultProcessor? processor, ServerEndPoint? server = null) where T : default + { + if (message == null) return CompletedTask.Default(asyncState); + multiplexer.CheckMessage(message); + + multiplexer.Trace("Wrapping " + message.Command, "Transaction"); + // prepare the inner command as a task + Task task; + if (message.IsFireAndForget) + { + task = CompletedTask.Default(null); // F+F explicitly does not get async-state + } + else + { + var source = TaskResultBox.Create(out var tcs, asyncState); + message.SetSource(source!, processor); + task = tcs.Task; + } + + QueueMessage(message); + + return task; + } + + private void QueueMessage(Message message) + { // prepare an outer-command that decorates that, but expects QUEUED var queued = new QueuedMessage(message); var wasQueued = SimpleResultBox.Create(); @@ -87,36 +115,39 @@ internal override Task ExecuteAsync(Message message, ResultProcessor pr // (there is no task for the inner command) lock (SyncLock) { - (_pending ?? (_pending = new List())).Add(queued); - + (_pending ??= new List()).Add(queued); switch (message.Command) { case RedisCommand.UNKNOWN: case RedisCommand.EVAL: case RedisCommand.EVALSHA: - // people can do very naughty things in an EVAL - // including change the DB; change it back to what we - // think it should be! - var sel = PhysicalConnection.GetSelectDatabaseCommand(message.Db); - queued = new QueuedMessage(sel); - wasQueued = SimpleResultBox.Create(); - queued.SetSource(wasQueued, QueuedProcessor.Default); - _pending.Add(queued); + var server = multiplexer.SelectServer(message); + if (server != null && server.SupportsDatabases) + { + // people can do very naughty things in an EVAL + // including change the DB; change it back to what we + // think it should be! + var sel = PhysicalConnection.GetSelectDatabaseCommand(message.Db); + queued = new QueuedMessage(sel); + wasQueued = SimpleResultBox.Create(); + queued.SetSource(wasQueued, QueuedProcessor.Default); + _pending.Add(queued); + } + break; } } - return task; } - internal override T ExecuteSync(Message message, ResultProcessor processor, ServerEndPoint server = null) + internal override T? ExecuteSync(Message? message, ResultProcessor? processor, ServerEndPoint? server = null, T? defaultValue = default) where T : default { throw new NotSupportedException("ExecuteSync cannot be used inside a transaction"); } - private Message CreateMessage(CommandFlags flags, out ResultProcessor processor) + private Message? CreateMessage(CommandFlags flags, out ResultProcessor? processor) { - List cond; - List work; + List? cond; + List? work; lock (SyncLock) { work = _pending; @@ -138,7 +169,7 @@ private Message CreateMessage(CommandFlags flags, out ResultProcessor proc return new TransactionMessage(Database, flags, cond, work); } - private class QueuedMessage : Message + private sealed class QueuedMessage : Message { public Message Wrapped { get; } private volatile bool wasQueued; @@ -166,17 +197,17 @@ public override int GetHashSlot(ServerSelectionStrategy serverSelectionStrategy) => Wrapped.GetHashSlot(serverSelectionStrategy); } - private class QueuedProcessor : ResultProcessor + private sealed class QueuedProcessor : ResultProcessor { public static readonly ResultProcessor Default = new QueuedProcessor(); protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { - if (result.Type == ResultType.SimpleString && result.IsEqual(CommonReplies.QUEUED)) + if (result.Resp2TypeBulkString == ResultType.SimpleString && result.IsEqual(CommonReplies.QUEUED)) { if (message is QueuedMessage q) { - connection?.BridgeCouldBeNull?.Multiplexer?.OnTransactionLog($"observed QUEUED for " + q.Wrapped?.CommandAndKey); + connection?.BridgeCouldBeNull?.Multiplexer?.OnTransactionLog("Observed QUEUED for " + q.Wrapped?.CommandAndKey); q.WasQueued = true; } return true; @@ -185,24 +216,25 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes } } - private class TransactionMessage : Message, IMultiMessage + private sealed class TransactionMessage : Message, IMultiMessage { private readonly ConditionResult[] conditions; + public QueuedMessage[] InnerOperations { get; } - public TransactionMessage(int db, CommandFlags flags, List conditions, List operations) + public TransactionMessage(int db, CommandFlags flags, List? conditions, List? operations) : base(db, flags, RedisCommand.EXEC) { - InnerOperations = (operations == null || operations.Count == 0) ? Array.Empty() : operations.ToArray(); - this.conditions = (conditions == null || conditions.Count == 0) ? Array.Empty(): conditions.ToArray(); + InnerOperations = (operations?.Count > 0) ? operations.ToArray() : Array.Empty(); + this.conditions = (conditions?.Count > 0) ? conditions.ToArray() : Array.Empty(); } - internal override void SetExceptionAndComplete(Exception exception, PhysicalBridge bridge) + internal override void SetExceptionAndComplete(Exception exception, PhysicalBridge? bridge) { var inner = InnerOperations; if (inner != null) { - for(int i = 0; i < inner.Length;i++) + for (int i = 0; i < inner.Length; i++) { inner[i]?.Wrapped?.SetExceptionAndComplete(exception, bridge); } @@ -215,7 +247,10 @@ internal override void SetExceptionAndComplete(Exception exception, PhysicalBrid public override void AppendStormLog(StringBuilder sb) { base.AppendStormLog(sb); - if (conditions.Length != 0) sb.Append(", ").Append(conditions.Length).Append(" conditions"); + if (conditions.Length != 0) + { + sb.Append(", ").Append(conditions.Length).Append(" conditions"); + } sb.Append(", ").Append(InnerOperations.Length).Append(" operations"); } @@ -225,13 +260,13 @@ public override int GetHashSlot(ServerSelectionStrategy serverSelectionStrategy) for (int i = 0; i < conditions.Length; i++) { int newSlot = conditions[i].Condition.GetHashSlot(serverSelectionStrategy); - slot = serverSelectionStrategy.CombineSlot(slot, newSlot); + slot = ServerSelectionStrategy.CombineSlot(slot, newSlot); if (slot == ServerSelectionStrategy.MultipleSlots) return slot; } for (int i = 0; i < InnerOperations.Length; i++) { int newSlot = InnerOperations[i].Wrapped.GetHashSlot(serverSelectionStrategy); - slot = serverSelectionStrategy.CombineSlot(slot, newSlot); + slot = ServerSelectionStrategy.CombineSlot(slot, newSlot); if (slot == ServerSelectionStrategy.MultipleSlots) return slot; } return slot; @@ -239,9 +274,8 @@ public override int GetHashSlot(ServerSelectionStrategy serverSelectionStrategy) public IEnumerable GetMessages(PhysicalConnection connection) { - IResultBox lastBox = null; - var bridge = connection.BridgeCouldBeNull; - if (bridge == null) throw new ObjectDisposedException(connection.ToString()); + IResultBox? lastBox = null; + var bridge = connection.BridgeCouldBeNull ?? throw new ObjectDisposedException(connection.ToString()); bool explicitCheckForQueued = !bridge.ServerEndPoint.GetFeatures().ExecAbort; var multiplexer = bridge.Multiplexer; @@ -250,15 +284,15 @@ public IEnumerable GetMessages(PhysicalConnection connection) { try { - // Important: if the server supports EXECABORT, then we can check the pre-conditions (pause there), - // which will usually be pretty small and cheap to do - if that passes, we can just isue all the commands + // Important: if the server supports EXECABORT, then we can check the preconditions (pause there), + // which will usually be pretty small and cheap to do - if that passes, we can just issue all the commands // and rely on EXECABORT to kick us if we are being idiotic inside the MULTI. However, if the server does // *not* support EXECABORT, then we need to explicitly check for QUEUED anyway; we might as well defer // checking the preconditions to the same time to avoid having to pause twice. This will mean that on - // up-version servers, pre-condition failures exit with UNWATCH; and on down-version servers pre-condition - // failures exit with DISCARD - but that's ok : both work fine + // up-version servers, precondition failures exit with UNWATCH; and on down-version servers precondition + // failures exit with DISCARD - but that's okay : both work fine - // PART 1: issue the pre-conditions + // PART 1: issue the preconditions if (!IsAborted && conditions.Length != 0) { sb.AppendLine("issuing conditions..."); @@ -267,7 +301,7 @@ public IEnumerable GetMessages(PhysicalConnection connection) { // need to have locked them before sending them // to guarantee that we see the pulse - IResultBox latestBox = conditions[i].GetBox(); + IResultBox latestBox = conditions[i].GetBox()!; Monitor.Enter(latestBox); if (lastBox != null) Monitor.Exit(lastBox); lastBox = latestBox; @@ -286,8 +320,8 @@ public IEnumerable GetMessages(PhysicalConnection connection) sb.AppendLine("checking conditions in the *early* path"); // need to get those sent ASAP; if they are stuck in the buffers, we die multiplexer.Trace("Flushing and waiting for precondition responses"); -#pragma warning disable CS0618 - connection.FlushSync(true, multiplexer.TimeoutMilliseconds); // make sure they get sent, so we can check for QUEUED (and the pre-conditions if necessary) +#pragma warning disable CS0618 // Type or member is obsolete + connection.FlushSync(true, multiplexer.TimeoutMilliseconds); // make sure they get sent, so we can check for QUEUED (and the preconditions if necessary) #pragma warning restore CS0618 if (Monitor.Wait(lastBox, multiplexer.TimeoutMilliseconds)) @@ -298,7 +332,7 @@ public IEnumerable GetMessages(PhysicalConnection connection) sb.Append("after condition check, we are ").Append(command).AppendLine(); } else - { // timeout running pre-conditions + { // timeout running preconditions multiplexer.Trace("Timeout checking preconditions"); command = RedisCommand.UNWATCH; @@ -312,7 +346,7 @@ public IEnumerable GetMessages(PhysicalConnection connection) // PART 2: begin the transaction if (!IsAborted) { - multiplexer.Trace("Begining transaction"); + multiplexer.Trace("Beginning transaction"); yield return Message.Create(-1, CommandFlags.None, RedisCommand.MULTI); sb.AppendLine("issued MULTI"); } @@ -325,9 +359,10 @@ public IEnumerable GetMessages(PhysicalConnection connection) foreach (var op in InnerOperations) { if (explicitCheckForQueued) - { // need to have locked them before sending them + { + // need to have locked them before sending them // to guarantee that we see the pulse - IResultBox thisBox = op.ResultBox; + IResultBox? thisBox = op.ResultBox; if (thisBox != null) { Monitor.Enter(thisBox); @@ -345,8 +380,8 @@ public IEnumerable GetMessages(PhysicalConnection connection) sb.AppendLine("checking conditions in the *late* path"); multiplexer.Trace("Flushing and waiting for precondition+queued responses"); -#pragma warning disable CS0618 - connection.FlushSync(true, multiplexer.TimeoutMilliseconds); // make sure they get sent, so we can check for QUEUED (and the pre-conditions if necessary) +#pragma warning disable CS0618 // Type or member is obsolete + connection.FlushSync(true, multiplexer.TimeoutMilliseconds); // make sure they get sent, so we can check for QUEUED (and the preconditions if necessary) #pragma warning restore CS0618 if (Monitor.Wait(lastBox, multiplexer.TimeoutMilliseconds)) { @@ -406,10 +441,8 @@ public IEnumerable GetMessages(PhysicalConnection connection) } } - protected override void WriteImpl(PhysicalConnection physical) - { - physical.WriteHeader(Command, 0); - } + protected override void WriteImpl(PhysicalConnection physical) => physical.WriteHeader(Command, 0); + public override int ArgCount => 0; private bool AreAllConditionsSatisfied(ConnectionMultiplexer multiplexer) @@ -432,15 +465,15 @@ private bool AreAllConditionsSatisfied(ConnectionMultiplexer multiplexer) } } - private class TransactionProcessor : ResultProcessor + private sealed class TransactionProcessor : ResultProcessor { - public static readonly TransactionProcessor Default = new TransactionProcessor(); + public static readonly TransactionProcessor Default = new(); public override bool SetResult(PhysicalConnection connection, Message message, in RawResult result) { if (result.IsError && message is TransactionMessage tran) { - string error = result.GetString(); + string error = result.GetString()!; foreach (var op in tran.InnerOperations) { var inner = op.Wrapped; @@ -453,12 +486,12 @@ public override bool SetResult(PhysicalConnection connection, Message message, i protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { - connection?.BridgeCouldBeNull?.Multiplexer?.OnTransactionLog($"got {result} for {message.CommandAndKey}"); + var muxer = connection.BridgeCouldBeNull?.Multiplexer; + muxer?.OnTransactionLog($"got {result} for {message.CommandAndKey}"); if (message is TransactionMessage tran) { - var bridge = connection.BridgeCouldBeNull; var wrapped = tran.InnerOperations; - switch (result.Type) + switch (result.Resp2TypeArray) { case ResultType.SimpleString: if (tran.IsAborted && result.IsEqual(CommonReplies.OK)) @@ -467,11 +500,11 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes SetResult(message, false); return true; } - //EXEC returned with a NULL + // EXEC returned with a NULL if (!tran.IsAborted && result.IsNull) { connection.Trace("Server aborted due to failed EXEC"); - //cancel the commands in the transaction and mark them as complete with the completion manager + // cancel the commands in the transaction and mark them as complete with the completion manager foreach (var op in wrapped) { var inner = op.Wrapped; @@ -482,13 +515,13 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes return true; } break; - case ResultType.MultiBulk: + case ResultType.Array: if (!tran.IsAborted) { var arr = result.GetItems(); if (result.IsNull) { - connection?.BridgeCouldBeNull?.Multiplexer?.OnTransactionLog($"aborting wrapped messages (failed watch)"); + muxer?.OnTransactionLog("Aborting wrapped messages (failed watch)"); connection.Trace("Server aborted due to failed WATCH"); foreach (var op in wrapped) { @@ -502,13 +535,13 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes else if (wrapped.Length == arr.Length) { connection.Trace("Server committed; processing nested replies"); - connection?.BridgeCouldBeNull?.Multiplexer?.OnTransactionLog($"processing {arr.Length} wrapped messages"); + muxer?.OnTransactionLog($"Processing {arr.Length} wrapped messages"); int i = 0; - foreach(ref RawResult item in arr) + foreach (ref RawResult item in arr) { var inner = wrapped[i++].Wrapped; - connection?.BridgeCouldBeNull?.Multiplexer?.OnTransactionLog($"> got {item} for {inner.CommandAndKey}"); + muxer?.OnTransactionLog($"> got {item} for {inner.CommandAndKey}"); if (inner.ComputeResult(connection, in item)) { inner.Complete(); @@ -524,10 +557,9 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes // the pending tasks foreach (var op in wrapped) { - var inner = op?.Wrapped; - if(inner != null) + if (op?.Wrapped is Message inner) { - inner.Fail(ConnectionFailureType.ProtocolFailure, null, "transaction failure"); + inner.Fail(ConnectionFailureType.ProtocolFailure, null, "Transaction failure", muxer); inner.Complete(); } } @@ -536,17 +568,4 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes } } } - //internal class RedisDatabaseTransaction : RedisCoreTransaction, ITransaction - //{ - // public IRedisDatabaseAsync Pending { get { return this; } } - - // bool ITransaction.Execute(CommandFlags flags) - // { - // return ExecuteTransaction(flags); - // } - // Task ITransaction.ExecuteAsync(CommandFlags flags) - // { - // return ExecuteTransactionAsync(flags); - // } - //} } diff --git a/src/StackExchange.Redis/RedisValue.cs b/src/StackExchange.Redis/RedisValue.cs index fa339b951..5b8bfe58f 100644 --- a/src/StackExchange.Redis/RedisValue.cs +++ b/src/StackExchange.Redis/RedisValue.cs @@ -1,6 +1,9 @@ using System; using System.Buffers; using System.Buffers.Text; +using System.ComponentModel; +using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; using System.IO; using System.Linq; using System.Reflection; @@ -11,18 +14,17 @@ namespace StackExchange.Redis { /// - /// Represents values that can be stored in redis + /// Represents values that can be stored in redis. /// public readonly struct RedisValue : IEquatable, IComparable, IComparable, IConvertible { internal static readonly RedisValue[] EmptyArray = Array.Empty(); - private readonly object _objectOrSentinel; + private readonly object? _objectOrSentinel; private readonly ReadOnlyMemory _memory; private readonly long _overlappedBits64; - // internal bool IsNullOrDefaultValue { get { return (valueBlob == null && valueInt64 == 0L) || ((object)valueBlob == (object)NullSentinel); } } - private RedisValue(long overlappedValue64, ReadOnlyMemory memory, object objectOrSentinel) + private RedisValue(long overlappedValue64, ReadOnlyMemory memory, object? objectOrSentinel) { _overlappedBits64 = overlappedValue64; _memory = memory; @@ -30,7 +32,8 @@ private RedisValue(long overlappedValue64, ReadOnlyMemory memory, object o } internal RedisValue(object obj, long overlappedBits) - { // this creates a bodged RedisValue which should **never** + { + // this creates a bodged RedisValue which should **never** // be seen directly; the contents are ... unexpected _overlappedBits64 = overlappedBits; _objectOrSentinel = obj; @@ -42,20 +45,20 @@ internal RedisValue(object obj, long overlappedBits) /// public RedisValue(string value) : this(0, default, value) { } -#pragma warning disable RCS1085 // Use auto-implemented property. - internal object DirectObject => _objectOrSentinel; + [System.Diagnostics.CodeAnalysis.SuppressMessage("Roslynator", "RCS1085:Use auto-implemented property.", Justification = "Intentional field ref")] + internal object? DirectObject => _objectOrSentinel; + [System.Diagnostics.CodeAnalysis.SuppressMessage("Roslynator", "RCS1085:Use auto-implemented property.", Justification = "Intentional field ref")] internal long DirectOverlappedBits64 => _overlappedBits64; -#pragma warning restore RCS1085 // Use auto-implemented property. - private readonly static object Sentinel_SignedInteger = new object(); - private readonly static object Sentinel_UnsignedInteger = new object(); - private readonly static object Sentinel_Raw = new object(); - private readonly static object Sentinel_Double = new object(); + private static readonly object Sentinel_SignedInteger = new(); + private static readonly object Sentinel_UnsignedInteger = new(); + private static readonly object Sentinel_Raw = new(); + private static readonly object Sentinel_Double = new(); /// - /// Obtain this value as an object - to be used alongside Unbox + /// Obtain this value as an object - to be used alongside Unbox. /// - public object Box() + public object? Box() { var obj = _objectOrSentinel; if (obj is null || obj is string || obj is byte[]) return obj; @@ -85,40 +88,43 @@ public object Box() /// Parse this object as a value - to be used alongside Box. /// /// The value to unbox. - public static RedisValue Unbox(object value) + public static RedisValue Unbox(object? value) { var val = TryParse(value, out var valid); - if (!valid) throw new ArgumentException(nameof(value)); + if (!valid) throw new ArgumentException("Could not parse value", nameof(value)); return val; } /// - /// Represents the string "" + /// Represents the string "". /// public static RedisValue EmptyString { get; } = new RedisValue(0, default, Sentinel_Raw); // note: it is *really important* that this s_EmptyString assignment happens *after* the EmptyString initializer above! - static readonly object s_DoubleNAN = double.NaN, s_DoublePosInf = double.PositiveInfinity, s_DoubleNegInf = double.NegativeInfinity, + private static readonly object s_DoubleNAN = double.NaN, s_DoublePosInf = double.PositiveInfinity, s_DoubleNegInf = double.NegativeInfinity, s_EmptyString = RedisValue.EmptyString; - static readonly object[] s_CommonInt32 = Enumerable.Range(-1, 22).Select(i => (object)i).ToArray(); // [-1,20] = 22 values + private static readonly object[] s_CommonInt32 = Enumerable.Range(-1, 22).Select(i => (object)i).ToArray(); // [-1,20] = 22 values /// - /// A null value + /// A null value. /// public static RedisValue Null { get; } = new RedisValue(0, default, null); /// - /// Indicates whether the value is a primitive integer (signed or unsigned) + /// Indicates whether the **underlying** value is a primitive integer (signed or unsigned); this is **not** + /// the same as whether the value can be *treated* as an integer - see + /// and , which is usually the more appropriate test. /// + [Browsable(false), EditorBrowsable(EditorBrowsableState.Advanced)] // hide it, because this *probably* isn't what callers need public bool IsInteger => _objectOrSentinel == Sentinel_SignedInteger || _objectOrSentinel == Sentinel_UnsignedInteger; /// - /// Indicates whether the value should be considered a null value + /// Indicates whether the value should be considered a null value. /// public bool IsNull => _objectOrSentinel == null; /// - /// Indicates whether the value is either null or a zero-length value + /// Indicates whether the value is either null or a zero-length value. /// public bool IsNullOrEmpty { @@ -133,18 +139,18 @@ public bool IsNullOrEmpty } /// - /// Indicates whether the value is greater than zero-length or has an integer value + /// Indicates whether the value is greater than zero-length or has an integer value. /// public bool HasValue => !IsNullOrEmpty; /// - /// Indicates whether two RedisValue values are equivalent + /// Indicates whether two RedisValue values are equivalent. /// /// The first to compare. /// The second to compare. public static bool operator !=(RedisValue x, RedisValue y) => !(x == y); - private double OverlappedValueDouble + internal double OverlappedValueDouble { [MethodImpl(MethodImplOptions.AggressiveInlining)] get => BitConverter.Int64BitsToDouble(_overlappedBits64); @@ -163,7 +169,7 @@ internal ulong OverlappedValueUInt64 } /// - /// Indicates whether two RedisValue values are equivalent + /// Indicates whether two RedisValue values are equivalent. /// /// The first to compare. /// The second to compare. @@ -174,7 +180,7 @@ internal ulong OverlappedValueUInt64 StorageType xType = x.Type, yType = y.Type; if (xType == StorageType.Null) return yType == StorageType.Null; - if (xType == StorageType.Null) return false; + if (yType == StorageType.Null) return false; if (xType == yType) { @@ -186,7 +192,7 @@ internal ulong OverlappedValueUInt64 case StorageType.UInt64: // as long as xType == yType, only need to check the bits return x._overlappedBits64 == y._overlappedBits64; case StorageType.String: - return (string)x._objectOrSentinel == (string)y._objectOrSentinel; + return (string?)x._objectOrSentinel == (string?)y._objectOrSentinel; case StorageType.Raw: return x._memory.Span.SequenceEqual(y._memory.Span); } @@ -210,14 +216,14 @@ internal ulong OverlappedValueUInt64 } // otherwise, compare as strings - return (string)x == (string)y; + return (string?)x == (string?)y; } /// - /// See Object.Equals() + /// See . /// /// The other to compare. - public override bool Equals(object obj) + public override bool Equals(object? obj) { if (obj == null) return IsNull; if (obj is RedisValue typed) return Equals(typed); @@ -226,43 +232,34 @@ public override bool Equals(object obj) } /// - /// Indicates whether two RedisValue values are equivalent + /// Indicates whether two RedisValue values are equivalent. /// /// The to compare to. public bool Equals(RedisValue other) => this == other; - /// - /// See Object.GetHashCode() - /// + /// public override int GetHashCode() => GetHashCode(this); private static int GetHashCode(RedisValue x) { x = x.Simplify(); - switch (x.Type) + return x.Type switch { - case StorageType.Null: - return -1; - case StorageType.Double: - return x.OverlappedValueDouble.GetHashCode(); - case StorageType.Int64: - case StorageType.UInt64: - return x._overlappedBits64.GetHashCode(); - case StorageType.Raw: - return ((string)x).GetHashCode(); // to match equality - case StorageType.String: - default: - return x._objectOrSentinel.GetHashCode(); - } + StorageType.Null => -1, + StorageType.Double => x.OverlappedValueDouble.GetHashCode(), + StorageType.Int64 or StorageType.UInt64 => x._overlappedBits64.GetHashCode(), + StorageType.Raw => ((string)x!).GetHashCode(), // to match equality + _ => x._objectOrSentinel!.GetHashCode(), + }; } /// - /// Returns a string representation of the value + /// Returns a string representation of the value. /// - public override string ToString() => (string)this; + public override string ToString() => (string?)this ?? string.Empty; - internal static unsafe bool Equals(byte[] x, byte[] y) + internal static unsafe bool Equals(byte[]? x, byte[]? y) { - if ((object)x == (object)y) return true; // ref equals + if ((object?)x == (object?)y) return true; // ref equals if (x == null || y == null) return false; int len = x.Length; if (len != y.Length) return false; @@ -284,32 +281,30 @@ internal static unsafe bool Equals(byte[] x, byte[] y) return true; } - internal static unsafe int GetHashCode(ReadOnlyMemory memory) + internal static unsafe int GetHashCode(ReadOnlySpan span) { unchecked { - var span8 = memory.Span; - int len = span8.Length; + int len = span.Length; if (len == 0) return 0; int acc = 728271210; - var span64 = MemoryMarshal.Cast(span8); + var span64 = MemoryMarshal.Cast(span); for (int i = 0; i < span64.Length; i++) { var val = span64[i]; - int valHash = (((int)val) ^ ((int)(val >> 32))); - acc = (((acc << 5) + acc) ^ valHash); + int valHash = ((int)val) ^ ((int)(val >> 32)); + acc = ((acc << 5) + acc) ^ valHash; } int spare = len % 8, offset = len - spare; while (spare-- != 0) { - acc = (((acc << 5) + acc) ^ span8[offset++]); + acc = ((acc << 5) + acc) ^ span[offset++]; } return acc; } } - internal void AssertNotNull() { @@ -318,7 +313,12 @@ internal void AssertNotNull() internal enum StorageType { - Null, Int64, UInt64, Double, Raw, String, + Null, + Int64, + UInt64, + Double, + Raw, + String, } internal StorageType Type @@ -338,21 +338,21 @@ internal StorageType Type } /// - /// Get the size of this value in bytes + /// Get the size of this value in bytes. /// - public long Length() + public long Length() => Type switch { - switch (Type) - { - case StorageType.Null: return 0; - case StorageType.Raw: return _memory.Length; - case StorageType.String: return Encoding.UTF8.GetByteCount((string)_objectOrSentinel); - default: throw new InvalidOperationException("Unable to compute length of type: " + Type); - } - } + StorageType.Null => 0, + StorageType.Raw => _memory.Length, + StorageType.String => Encoding.UTF8.GetByteCount((string)_objectOrSentinel!), + StorageType.Int64 => Format.MeasureInt64(OverlappedValueInt64), + StorageType.UInt64 => Format.MeasureUInt64(OverlappedValueUInt64), + StorageType.Double => Format.MeasureDouble(OverlappedValueDouble), + _ => throw new InvalidOperationException("Unable to compute length of type: " + Type), + }; /// - /// Compare against a RedisValue for relative order + /// Compare against a RedisValue for relative order. /// /// The other to compare. public int CompareTo(RedisValue other) => CompareTo(this, other); @@ -379,7 +379,7 @@ private static int CompareTo(RedisValue x, RedisValue y) case StorageType.UInt64: return x.OverlappedValueUInt64.CompareTo(y.OverlappedValueUInt64); case StorageType.String: - return string.CompareOrdinal((string)x._objectOrSentinel, (string)y._objectOrSentinel); + return string.CompareOrdinal((string)x._objectOrSentinel!, (string)y._objectOrSentinel!); case StorageType.Raw: return x._memory.Span.SequenceCompareTo(y._memory.Span); } @@ -402,7 +402,7 @@ private static int CompareTo(RedisValue x, RedisValue y) } // otherwise, compare as strings - return string.CompareOrdinal((string)x, (string)y); + return string.CompareOrdinal((string?)x, (string?)y); } catch (Exception ex) { @@ -412,7 +412,7 @@ private static int CompareTo(RedisValue x, RedisValue y) return 0; } - int IComparable.CompareTo(object obj) + int IComparable.CompareTo(object? obj) { if (obj == null) return CompareTo(Null); @@ -422,7 +422,7 @@ int IComparable.CompareTo(object obj) return CompareTo(val); } - internal static RedisValue TryParse(object obj, out bool valid) + internal static RedisValue TryParse(object? obj, out bool valid) { valid = true; switch (obj) @@ -477,7 +477,7 @@ internal static RedisValue TryParse(object obj, out bool valid) [CLSCompliant(false)] public static implicit operator RedisValue(ulong value) { - const ulong MSB = (1UL) << 63; + const ulong MSB = 1UL << 63; return (value & MSB) == 0 ? new RedisValue((long)value, default, Sentinel_SignedInteger) // prefer signed whenever we can : new RedisValue(unchecked((long)value), default, Sentinel_UnsignedInteger); // with unsigned as the fallback @@ -535,6 +535,7 @@ public static implicit operator RedisValue(ReadOnlyMemory value) if (value.Length == 0) return EmptyString; return new RedisValue(0, value, Sentinel_Raw); } + /// /// Creates a new from a . /// @@ -545,7 +546,7 @@ public static implicit operator RedisValue(ReadOnlyMemory value) /// Creates a new from an . /// /// The to convert to a . - public static implicit operator RedisValue(string value) + public static implicit operator RedisValue(string? value) { if (value == null) return Null; if (value.Length == 0) return EmptyString; @@ -556,7 +557,7 @@ public static implicit operator RedisValue(string value) /// Creates a new from an . /// /// The to convert to a . - public static implicit operator RedisValue(byte[] value) + public static implicit operator RedisValue(byte[]? value) { if (value == null) return Null; if (value.Length == 0) return EmptyString; @@ -579,15 +580,12 @@ public static implicit operator RedisValue(byte[] value) /// Converts a to a . /// /// The to convert. - public static explicit operator bool(RedisValue value) + public static explicit operator bool(RedisValue value) => (long)value switch { - switch ((long)value) - { - case 0: return false; - case 1: return true; - default: throw new InvalidCastException(); - } - } + 0 => false, + 1 => true, + _ => throw new InvalidCastException(), + }; /// /// Converts a to a . @@ -603,16 +601,13 @@ public static explicit operator int(RedisValue value) public static explicit operator long(RedisValue value) { value = value.Simplify(); - switch (value.Type) + return value.Type switch { - case StorageType.Null: - return 0; // in redis, an arithmetic zero is kinda the same thing as not-exists (think "incr") - case StorageType.Int64: - return value.OverlappedValueInt64; - case StorageType.UInt64: - return checked((long)value.OverlappedValueUInt64); // this will throw since unsigned is always 64-bit - } - throw new InvalidCastException($"Unable to cast from {value.Type} to long: '{value}'"); + StorageType.Null => 0, // in redis, an arithmetic zero is kinda the same thing as not-exists (think "incr") + StorageType.Int64 => value.OverlappedValueInt64, + StorageType.UInt64 => checked((long)value.OverlappedValueUInt64), // this will throw since unsigned is always 64-bit + _ => throw new InvalidCastException($"Unable to cast from {value.Type} to long: '{value}'"), + }; } /// @@ -623,16 +618,13 @@ public static explicit operator long(RedisValue value) public static explicit operator uint(RedisValue value) { value = value.Simplify(); - switch (value.Type) + return value.Type switch { - case StorageType.Null: - return 0; // in redis, an arithmetic zero is kinda the same thing as not-exists (think "incr") - case StorageType.Int64: - return checked((uint)value.OverlappedValueInt64); - case StorageType.UInt64: - return checked((uint)value.OverlappedValueUInt64); - } - throw new InvalidCastException($"Unable to cast from {value.Type} to uint: '{value}'"); + StorageType.Null => 0, // in redis, an arithmetic zero is kinda the same thing as not-exists (think "incr") + StorageType.Int64 => checked((uint)value.OverlappedValueInt64), + StorageType.UInt64 => checked((uint)value.OverlappedValueUInt64), + _ => throw new InvalidCastException($"Unable to cast from {value.Type} to uint: '{value}'"), + }; } /// @@ -643,16 +635,13 @@ public static explicit operator uint(RedisValue value) public static explicit operator ulong(RedisValue value) { value = value.Simplify(); - switch (value.Type) + return value.Type switch { - case StorageType.Null: - return 0; // in redis, an arithmetic zero is kinda the same thing as not-exists (think "incr") - case StorageType.Int64: - return checked((ulong)value.OverlappedValueInt64); // throw if negative - case StorageType.UInt64: - return value.OverlappedValueUInt64; - } - throw new InvalidCastException($"Unable to cast from {value.Type} to ulong: '{value}'"); + StorageType.Null => 0, // in redis, an arithmetic zero is kinda the same thing as not-exists (think "incr") + StorageType.Int64 => checked((ulong)value.OverlappedValueInt64), // throw if negative + StorageType.UInt64 => value.OverlappedValueUInt64, + _ => throw new InvalidCastException($"Unable to cast from {value.Type} to ulong: '{value}'"), + }; } /// @@ -662,18 +651,18 @@ public static explicit operator ulong(RedisValue value) public static explicit operator double(RedisValue value) { value = value.Simplify(); - switch (value.Type) + return value.Type switch { - case StorageType.Null: - return 0; // in redis, an arithmetic zero is kinda the same thing as not-exists (think "incr") - case StorageType.Int64: - return value.OverlappedValueInt64; - case StorageType.UInt64: - return value.OverlappedValueUInt64; - case StorageType.Double: - return value.OverlappedValueDouble; - } - throw new InvalidCastException($"Unable to cast from {value.Type} to double: '{value}'"); + StorageType.Null => 0, // in redis, an arithmetic zero is kinda the same thing as not-exists (think "incr") + StorageType.Int64 => value.OverlappedValueInt64, + StorageType.UInt64 => value.OverlappedValueUInt64, + StorageType.Double => value.OverlappedValueDouble, + // special values like NaN/Inf are deliberately not handled by Simplify, but need to be considered for casting + StorageType.String when Format.TryParseDouble((string)value._objectOrSentinel!, out var d) => d, + StorageType.Raw when TryParseDouble(value._memory.Span, out var d) => d, + // anything else: fail + _ => throw new InvalidCastException($"Unable to cast from {value.Type} to double: '{value}'"), + }; } /// @@ -683,18 +672,14 @@ public static explicit operator double(RedisValue value) public static explicit operator decimal(RedisValue value) { value = value.Simplify(); - switch (value.Type) + return value.Type switch { - case StorageType.Null: - return 0; // in redis, an arithmetic zero is kinda the same thing as not-exists (think "incr") - case StorageType.Int64: - return value.OverlappedValueInt64; - case StorageType.UInt64: - return value.OverlappedValueUInt64; - case StorageType.Double: - return (decimal)value.OverlappedValueDouble; - } - throw new InvalidCastException($"Unable to cast from {value.Type} to decimal: '{value}'"); + StorageType.Null => 0, // in redis, an arithmetic zero is kinda the same thing as not-exists (think "incr") + StorageType.Int64 => value.OverlappedValueInt64, + StorageType.UInt64 => value.OverlappedValueUInt64, + StorageType.Double => (decimal)value.OverlappedValueDouble, + _ => throw new InvalidCastException($"Unable to cast from {value.Type} to decimal: '{value}'"), + }; } /// @@ -704,18 +689,14 @@ public static explicit operator decimal(RedisValue value) public static explicit operator float(RedisValue value) { value = value.Simplify(); - switch (value.Type) + return value.Type switch { - case StorageType.Null: - return 0; // in redis, an arithmetic zero is kinda the same thing as not-exists (think "incr") - case StorageType.Int64: - return value.OverlappedValueInt64; - case StorageType.UInt64: - return value.OverlappedValueUInt64; - case StorageType.Double: - return (float)value.OverlappedValueDouble; - } - throw new InvalidCastException($"Unable to cast from {value.Type} to double: '{value}'"); + StorageType.Null => 0, // in redis, an arithmetic zero is kinda the same thing as not-exists (think "incr") + StorageType.Int64 => value.OverlappedValueInt64, + StorageType.UInt64 => value.OverlappedValueUInt64, + StorageType.Double => (float)value.OverlappedValueDouble, + _ => throw new InvalidCastException($"Unable to cast from {value.Type} to double: '{value}'"), + }; } private static bool TryParseDouble(ReadOnlySpan blob, out double value) @@ -734,28 +715,28 @@ private static bool TryParseDouble(ReadOnlySpan blob, out double value) /// Converts the to a . /// /// The to convert. - public static explicit operator double? (RedisValue value) + public static explicit operator double?(RedisValue value) => value.IsNull ? (double?)null : (double)value; /// /// Converts the to a . /// /// The to convert. - public static explicit operator float? (RedisValue value) + public static explicit operator float?(RedisValue value) => value.IsNull ? (float?)null : (float)value; /// /// Converts the to a . /// /// The to convert. - public static explicit operator decimal? (RedisValue value) + public static explicit operator decimal?(RedisValue value) => value.IsNull ? (decimal?)null : (decimal)value; /// /// Converts the to a . /// /// The to convert. - public static explicit operator long? (RedisValue value) + public static explicit operator long?(RedisValue value) => value.IsNull ? (long?)null : (long)value; /// @@ -763,14 +744,14 @@ private static bool TryParseDouble(ReadOnlySpan blob, out double value) /// /// The to convert. [CLSCompliant(false)] - public static explicit operator ulong? (RedisValue value) + public static explicit operator ulong?(RedisValue value) => value.IsNull ? (ulong?)null : (ulong)value; /// /// Converts the to a . /// /// The to convert. - public static explicit operator int? (RedisValue value) + public static explicit operator int?(RedisValue value) => value.IsNull ? (int?)null : (int)value; /// @@ -778,21 +759,21 @@ private static bool TryParseDouble(ReadOnlySpan blob, out double value) /// /// The to convert. [CLSCompliant(false)] - public static explicit operator uint? (RedisValue value) + public static explicit operator uint?(RedisValue value) => value.IsNull ? (uint?)null : (uint)value; /// /// Converts the to a . /// /// The to convert. - public static explicit operator bool? (RedisValue value) + public static explicit operator bool?(RedisValue value) => value.IsNull ? (bool?)null : (bool)value; /// /// Converts a to a . /// /// The to convert. - public static implicit operator string(RedisValue value) + public static implicit operator string?(RedisValue value) { switch (value.Type) { @@ -800,7 +781,7 @@ public static implicit operator string(RedisValue value) case StorageType.Double: return Format.ToString(value.OverlappedValueDouble); case StorageType.Int64: return Format.ToString(value.OverlappedValueInt64); case StorageType.UInt64: return Format.ToString(value.OverlappedValueUInt64); - case StorageType.String: return (string)value._objectOrSentinel; + case StorageType.String: return (string)value._objectOrSentinel!; case StorageType.Raw: var span = value._memory.Span; if (span.IsEmpty) return ""; @@ -809,7 +790,10 @@ public static implicit operator string(RedisValue value) { return Format.GetString(span); } - catch + catch (Exception e) when // Only catch exception throwed by Encoding.UTF8.GetString + (e is DecoderFallbackException + || e is ArgumentException + || e is ArgumentNullException) { return ToHex(span); } @@ -846,7 +830,7 @@ private static string ToHex(ReadOnlySpan src) /// Converts a to a . /// /// The to convert. - public static implicit operator byte[] (RedisValue value) + public static implicit operator byte[]?(RedisValue value) { switch (value.Type) { @@ -863,78 +847,197 @@ public static implicit operator byte[] (RedisValue value) return value._memory.ToArray(); case StorageType.Int64: - Span span = stackalloc byte[PhysicalConnection.MaxInt64TextLen + 2]; - int len = PhysicalConnection.WriteRaw(span, value.OverlappedValueInt64, false, 0); - arr = new byte[len - 2]; // don't need the CRLF - span.Slice(0, arr.Length).CopyTo(arr); - return arr; + Debug.Assert(Format.MaxInt64TextLen <= 24); + Span span = stackalloc byte[24]; + int len = Format.FormatInt64(value.OverlappedValueInt64, span); + return span.Slice(0, len).ToArray(); case StorageType.UInt64: - // we know it is a huge value - just jump straight to Utf8Formatter - span = stackalloc byte[PhysicalConnection.MaxInt64TextLen]; - if (!Utf8Formatter.TryFormat(value.OverlappedValueUInt64, span, out len)) - throw new InvalidOperationException("TryFormat failed"); - arr = new byte[len]; - span.Slice(0, len).CopyTo(arr); - return arr; + Debug.Assert(Format.MaxInt64TextLen <= 24); + span = stackalloc byte[24]; + len = Format.FormatUInt64(value.OverlappedValueUInt64, span); + return span.Slice(0, len).ToArray(); + case StorageType.Double: + span = stackalloc byte[Format.MaxDoubleTextLen]; + len = Format.FormatDouble(value.OverlappedValueDouble, span); + return span.Slice(0, len).ToArray(); + case StorageType.String: + return Encoding.UTF8.GetBytes((string)value._objectOrSentinel!); } // fallback: stringify and encode - return Encoding.UTF8.GetBytes((string)value); + return Encoding.UTF8.GetBytes((string)value!); + } + + /// + /// Gets the length of the value in bytes. + /// + public int GetByteCount() => Type switch + { + StorageType.Null => 0, + StorageType.Raw => _memory.Length, + StorageType.String => Encoding.UTF8.GetByteCount((string)_objectOrSentinel!), + StorageType.Int64 => Format.MeasureInt64(OverlappedValueInt64), + StorageType.UInt64 => Format.MeasureUInt64(OverlappedValueUInt64), + StorageType.Double => Format.MeasureDouble(OverlappedValueDouble), + _ => ThrowUnableToMeasure(), + }; + + /// + /// Gets the maximum length of the value in bytes. + /// + internal int GetMaxByteCount() => Type switch + { + StorageType.Null => 0, + StorageType.Raw => _memory.Length, + StorageType.String => Encoding.UTF8.GetMaxByteCount(((string)_objectOrSentinel!).Length), + StorageType.Int64 => Format.MaxInt64TextLen, + StorageType.UInt64 => Format.MaxInt64TextLen, + StorageType.Double => Format.MaxDoubleTextLen, + _ => ThrowUnableToMeasure(), + }; + + /// + /// Gets the length of the value in characters, assuming UTF8 interpretation of BLOB payloads. + /// + internal int GetCharCount() => Type switch + { + StorageType.Null => 0, + StorageType.Raw => Encoding.UTF8.GetCharCount(_memory.Span), + StorageType.String => ((string)_objectOrSentinel!).Length, + StorageType.Int64 => Format.MeasureInt64(OverlappedValueInt64), + StorageType.UInt64 => Format.MeasureUInt64(OverlappedValueUInt64), + StorageType.Double => Format.MeasureDouble(OverlappedValueDouble), + _ => ThrowUnableToMeasure(), + }; + + /// + /// Gets the length of the value in characters, assuming UTF8 interpretation of BLOB payloads. + /// + internal int GetMaxCharCount() => Type switch + { + StorageType.Null => 0, + StorageType.Raw => Encoding.UTF8.GetMaxCharCount(_memory.Length), + StorageType.String => ((string)_objectOrSentinel!).Length, + StorageType.Int64 => Format.MaxInt64TextLen, + StorageType.UInt64 => Format.MaxInt64TextLen, + StorageType.Double => Format.MaxDoubleTextLen, + _ => ThrowUnableToMeasure(), + }; + + private int ThrowUnableToMeasure() => throw new InvalidOperationException("Unable to compute length of type: " + Type); + + /// + /// Gets the length of the value in bytes. + /// + /* right now, we only support int lengths, but adding this now so that + there are no surprises if/when we add support for discontiguous buffers */ + public long GetLongByteCount() => GetByteCount(); + + /// + /// Copy the value as bytes to the provided . + /// + public int CopyTo(Span destination) + { + switch (Type) + { + case StorageType.Null: + return 0; + case StorageType.Raw: + var srcBytes = _memory.Span; + srcBytes.CopyTo(destination); + return srcBytes.Length; + case StorageType.String: + return Encoding.UTF8.GetBytes(((string)_objectOrSentinel!).AsSpan(), destination); + case StorageType.Int64: + return Format.FormatInt64(OverlappedValueInt64, destination); + case StorageType.UInt64: + return Format.FormatUInt64(OverlappedValueUInt64, destination); + case StorageType.Double: + return Format.FormatDouble(OverlappedValueDouble, destination); + default: + return ThrowUnableToMeasure(); + } } /// - /// Converts a to a ReadOnlyMemory + /// Copy the value as character data to the provided . + /// + internal int CopyTo(Span destination) + { + switch (Type) + { + case StorageType.Null: + return 0; + case StorageType.Raw: + var srcBytes = _memory.Span; + return Encoding.UTF8.GetChars(srcBytes, destination); + case StorageType.String: + var span = ((string)_objectOrSentinel!).AsSpan(); + span.CopyTo(destination); + return span.Length; + case StorageType.Int64: + return Format.FormatInt64(OverlappedValueInt64, destination); + case StorageType.UInt64: + return Format.FormatUInt64(OverlappedValueUInt64, destination); + case StorageType.Double: + return Format.FormatDouble(OverlappedValueDouble, destination); + default: + return ThrowUnableToMeasure(); + } + } + + /// + /// Converts a to a . /// /// The to convert. public static implicit operator ReadOnlyMemory(RedisValue value) - => value.Type == StorageType.Raw ? value._memory : (byte[])value; + => value.Type == StorageType.Raw ? value._memory : (byte[]?)value; TypeCode IConvertible.GetTypeCode() => TypeCode.Object; - bool IConvertible.ToBoolean(IFormatProvider provider) => (bool)this; - byte IConvertible.ToByte(IFormatProvider provider) => (byte)(uint)this; - char IConvertible.ToChar(IFormatProvider provider) => (char)(uint)this; - DateTime IConvertible.ToDateTime(IFormatProvider provider) => DateTime.Parse((string)this, provider); - decimal IConvertible.ToDecimal(IFormatProvider provider) => (decimal)this; - double IConvertible.ToDouble(IFormatProvider provider) => (double)this; - short IConvertible.ToInt16(IFormatProvider provider) => (short)this; - int IConvertible.ToInt32(IFormatProvider provider) => (int)this; - long IConvertible.ToInt64(IFormatProvider provider) => (long)this; - sbyte IConvertible.ToSByte(IFormatProvider provider) => (sbyte)this; - float IConvertible.ToSingle(IFormatProvider provider) => (float)this; - string IConvertible.ToString(IFormatProvider provider) => (string)this; - - object IConvertible.ToType(Type conversionType, IFormatProvider provider) + bool IConvertible.ToBoolean(IFormatProvider? provider) => (bool)this; + byte IConvertible.ToByte(IFormatProvider? provider) => (byte)(uint)this; + char IConvertible.ToChar(IFormatProvider? provider) => (char)(uint)this; + DateTime IConvertible.ToDateTime(IFormatProvider? provider) => DateTime.Parse(((string?)this)!, provider); + decimal IConvertible.ToDecimal(IFormatProvider? provider) => (decimal)this; + double IConvertible.ToDouble(IFormatProvider? provider) => (double)this; + short IConvertible.ToInt16(IFormatProvider? provider) => (short)this; + int IConvertible.ToInt32(IFormatProvider? provider) => (int)this; + long IConvertible.ToInt64(IFormatProvider? provider) => (long)this; + sbyte IConvertible.ToSByte(IFormatProvider? provider) => (sbyte)this; + float IConvertible.ToSingle(IFormatProvider? provider) => (float)this; + string IConvertible.ToString(IFormatProvider? provider) => ((string?)this)!; + + object IConvertible.ToType(Type conversionType, IFormatProvider? provider) { if (conversionType == null) throw new ArgumentNullException(nameof(conversionType)); - if (conversionType == typeof(byte[])) return (byte[])this; + if (conversionType == typeof(byte[])) return ((byte[]?)this)!; if (conversionType == typeof(ReadOnlyMemory)) return (ReadOnlyMemory)this; if (conversionType == typeof(RedisValue)) return this; - switch (System.Type.GetTypeCode(conversionType)) + return System.Type.GetTypeCode(conversionType) switch { - case TypeCode.Boolean: return (bool)this; - case TypeCode.Byte: return checked((byte)(uint)this); - case TypeCode.Char: return checked((char)(uint)this); - case TypeCode.DateTime: return DateTime.Parse((string)this, provider); - case TypeCode.Decimal: return (decimal)this; - case TypeCode.Double: return (double)this; - case TypeCode.Int16: return (short)this; - case TypeCode.Int32: return (int)this; - case TypeCode.Int64: return (long)this; - case TypeCode.SByte: return (sbyte)this; - case TypeCode.Single: return (float)this; - case TypeCode.String: return (string)this; - case TypeCode.UInt16: return checked((ushort)(uint)this); - case TypeCode.UInt32: return (uint)this; - case TypeCode.UInt64: return (ulong)this; - case TypeCode.Object: return this; - default: - throw new NotSupportedException(); - } + TypeCode.Boolean => (bool)this, + TypeCode.Byte => checked((byte)(uint)this), + TypeCode.Char => checked((char)(uint)this), + TypeCode.DateTime => DateTime.Parse(((string?)this)!, provider), + TypeCode.Decimal => (decimal)this, + TypeCode.Double => (double)this, + TypeCode.Int16 => (short)this, + TypeCode.Int32 => (int)this, + TypeCode.Int64 => (long)this, + TypeCode.SByte => (sbyte)this, + TypeCode.Single => (float)this, + TypeCode.String => ((string?)this)!, + TypeCode.UInt16 => checked((ushort)(uint)this), + TypeCode.UInt32 => (uint)this, + TypeCode.UInt64 => (ulong)this, + TypeCode.Object => this, + _ => throw new NotSupportedException(), + }; } - ushort IConvertible.ToUInt16(IFormatProvider provider) => checked((ushort)(uint)this); - uint IConvertible.ToUInt32(IFormatProvider provider) => (uint)this; - ulong IConvertible.ToUInt64(IFormatProvider provider) => (ulong)this; + ushort IConvertible.ToUInt16(IFormatProvider? provider) => checked((ushort)(uint)this); + uint IConvertible.ToUInt32(IFormatProvider? provider) => (uint)this; + ulong IConvertible.ToUInt64(IFormatProvider? provider) => (ulong)this; /// /// Attempt to reduce to canonical terms ahead of time; parses integers, floats, etc @@ -942,7 +1045,7 @@ object IConvertible.ToType(Type conversionType, IFormatProvider provider) /// but more importantly b: because it can change values - for example, if they start /// with "123.000", it should **stay** as "123.000", not become 123L; this could be /// a hash key or similar - we don't want to break it; RedisConnection uses - /// the storage type, not the "does it look like a long?" - for this reason + /// the storage type, not the "does it look like a long?" - for this reason. /// internal RedisValue Simplify() { @@ -951,13 +1054,14 @@ internal RedisValue Simplify() switch (Type) { case StorageType.String: - string s = (string)_objectOrSentinel; + string s = (string)_objectOrSentinel!; if (Format.CouldBeInteger(s)) { if (Format.TryParseInt64(s, out i64)) return i64; if (Format.TryParseUInt64(s, out u64)) return u64; } - if (Format.TryParseDouble(s, out var f64)) return f64; + // note: don't simplify inf/nan, as that causes equality semantic problems + if (Format.TryParseDouble(s, out var f64) && !IsSpecialDouble(f64)) return f64; break; case StorageType.Raw: var b = _memory.Span; @@ -966,7 +1070,8 @@ internal RedisValue Simplify() if (Format.TryParseInt64(b, out i64)) return i64; if (Format.TryParseUInt64(b, out u64)) return u64; } - if (TryParseDouble(b, out f64)) return f64; + // note: don't simplify inf/nan, as that causes equality semantic problems + if (TryParseDouble(b, out f64) && !IsSpecialDouble(f64)) return f64; break; case StorageType.Double: // is the double actually an integer? @@ -977,11 +1082,13 @@ internal RedisValue Simplify() return this; } + private static bool IsSpecialDouble(double d) => double.IsNaN(d) || double.IsInfinity(d); + /// - /// Convert to a signed long if possible, returning true. - /// Returns false otherwise. + /// Convert to a signed if possible. /// /// The value, if conversion was possible. + /// if successfully parsed, otherwise. public bool TryParse(out long val) { switch (Type) @@ -994,13 +1101,20 @@ public bool TryParse(out long val) val = default; return false; case StorageType.String: - return Format.TryParseInt64((string)_objectOrSentinel, out val); + return Format.TryParseInt64((string)_objectOrSentinel!, out val); case StorageType.Raw: return Format.TryParseInt64(_memory.Span, out val); case StorageType.Double: var d = OverlappedValueDouble; - try { val = (long)d; } - catch { val = default; return false; } + try + { + val = (long)d; + } + catch + { + val = default; + return false; + } return val == d; case StorageType.Null: // in redis-land 0 approx. equal null; so roll with it @@ -1012,10 +1126,10 @@ public bool TryParse(out long val) } /// - /// Convert to a int if possible, returning true. - /// Returns false otherwise. + /// Convert to an if possible. /// /// The value, if conversion was possible. + /// if successfully parsed, otherwise. public bool TryParse(out int val) { if (!TryParse(out long l) || l > int.MaxValue || l < int.MinValue) @@ -1029,10 +1143,10 @@ public bool TryParse(out int val) } /// - /// Convert to a double if possible, returning true. - /// Returns false otherwise. + /// Convert to a if possible. /// /// The value, if conversion was possible. + /// if successfully parsed, otherwise. public bool TryParse(out double val) { switch (Type) @@ -1047,7 +1161,7 @@ public bool TryParse(out double val) val = OverlappedValueDouble; return true; case StorageType.String: - return Format.TryParseDouble((string)_objectOrSentinel, out val); + return Format.TryParseDouble((string)_objectOrSentinel!, out val); case StorageType.Raw: return TryParseDouble(_memory.Span, out val); case StorageType.Null: @@ -1060,8 +1174,8 @@ public bool TryParse(out double val) } /// - /// Create a RedisValue from a MemoryStream; it will *attempt* to use the internal buffer - /// directly, but if this isn't possibly it will fallback to ToArray + /// Create a from a . + /// It will *attempt* to use the internal buffer directly, but if this isn't possible it will fallback to . /// /// The to create a value from. public static RedisValue CreateFrom(MemoryStream stream) @@ -1079,7 +1193,7 @@ public static RedisValue CreateFrom(MemoryStream stream) } } - private static readonly FieldInfo + private static readonly FieldInfo? s_origin = typeof(MemoryStream).GetField("_origin", BindingFlags.NonPublic | BindingFlags.Instance), s_buffer = typeof(MemoryStream).GetField("_buffer", BindingFlags.NonPublic | BindingFlags.Instance); @@ -1089,8 +1203,8 @@ private static bool ReflectionTryGetBuffer(MemoryStream ms, out ArraySegment(arr, offset, checked((int)ms.Length)); return true; } @@ -1117,8 +1231,8 @@ public bool StartsWith(RedisValue value) switch (thisType) { case StorageType.String: - var sThis = ((string)_objectOrSentinel); - var sOther = ((string)value._objectOrSentinel); + var sThis = (string)_objectOrSentinel!; + var sOther = (string)value._objectOrSentinel!; return sThis.StartsWith(sOther, StringComparison.Ordinal); case StorageType.Raw: rawThis = _memory; @@ -1126,7 +1240,7 @@ public bool StartsWith(RedisValue value) return rawThis.Span.StartsWith(rawOther.Span); } } - byte[] arr0 = null, arr1 = null; + byte[]? arr0 = null, arr1 = null; try { rawThis = AsMemory(out arr0); @@ -1141,7 +1255,7 @@ public bool StartsWith(RedisValue value) } } - private ReadOnlyMemory AsMemory(out byte[] leased) + private ReadOnlyMemory AsMemory(out byte[]? leased) { switch (Type) { @@ -1149,8 +1263,8 @@ private ReadOnlyMemory AsMemory(out byte[] leased) leased = null; return _memory; case StorageType.String: - string s = (string)_objectOrSentinel; - HaveString: + string s = (string)_objectOrSentinel!; +HaveString: if (s.Length == 0) { leased = null; @@ -1163,11 +1277,11 @@ private ReadOnlyMemory AsMemory(out byte[] leased) s = Format.ToString(OverlappedValueDouble); goto HaveString; case StorageType.Int64: - leased = ArrayPool.Shared.Rent(PhysicalConnection.MaxInt64TextLen + 2); // reused code has CRLF terminator + leased = ArrayPool.Shared.Rent(Format.MaxInt64TextLen + 2); // reused code has CRLF terminator len = PhysicalConnection.WriteRaw(leased, OverlappedValueInt64) - 2; // drop the CRLF return new ReadOnlyMemory(leased, 0, len); case StorageType.UInt64: - leased = ArrayPool.Shared.Rent(PhysicalConnection.MaxInt64TextLen); // reused code has CRLF terminator + leased = ArrayPool.Shared.Rent(Format.MaxInt64TextLen); // reused code has CRLF terminator // value is huge, jump direct to Utf8Formatter if (!Utf8Formatter.TryFormat(OverlappedValueUInt64, leased, out len)) throw new InvalidOperationException("TryFormat failed"); @@ -1176,5 +1290,109 @@ private ReadOnlyMemory AsMemory(out byte[] leased) leased = null; return default; } + + /// + /// Get the digest (hash used for check-and-set/check-and-delete operations) of this value. + /// + internal ValueCondition Digest() + { + switch (Type) + { + case StorageType.Raw: + return ValueCondition.CalculateDigest(_memory.Span); + case StorageType.Null: + return ValueCondition.NotExists; // interpret === null as "not exists" + default: + var len = GetByteCount(); + byte[]? oversized = null; + Span buffer = len <= 128 ? stackalloc byte[128] : (oversized = ArrayPool.Shared.Rent(len)); + CopyTo(buffer); + var digest = ValueCondition.CalculateDigest(buffer.Slice(0, len)); + if (oversized is not null) ArrayPool.Shared.Return(oversized); + return digest; + } + } + + internal bool TryGetSpan(out ReadOnlySpan span) + { + if (_objectOrSentinel == Sentinel_Raw) + { + span = _memory.Span; + return true; + } + span = default; + return false; + } + + /// + /// Indicates whether the current value has the supplied value as a prefix. + /// + /// The to check. + [OverloadResolutionPriority(1)] // prefer this when it is an option (vs casting a byte[] to RedisValue) + public bool StartsWith(ReadOnlySpan value) + { + if (IsNull) return false; + if (value.IsEmpty) return true; + if (IsNullOrEmpty) return false; + + int len; + switch (Type) + { + case StorageType.Raw: + return _memory.Span.StartsWith(value); + case StorageType.Int64: + Span buffer = stackalloc byte[Format.MaxInt64TextLen]; + len = Format.FormatInt64(OverlappedValueInt64, buffer); + return buffer.Slice(0, len).StartsWith(value); + case StorageType.UInt64: + buffer = stackalloc byte[Format.MaxInt64TextLen]; + len = Format.FormatUInt64(OverlappedValueUInt64, buffer); + return buffer.Slice(0, len).StartsWith(value); + case StorageType.Double: + buffer = stackalloc byte[Format.MaxDoubleTextLen]; + len = Format.FormatDouble(OverlappedValueDouble, buffer); + return buffer.Slice(0, len).StartsWith(value); + case StorageType.String: + var s = ((string)_objectOrSentinel!).AsSpan(); + if (s.Length < value.Length) return false; // not enough characters to match + if (s.Length > value.Length) s = s.Slice(0, value.Length); // only need to match the prefix + var maxBytes = Encoding.UTF8.GetMaxByteCount(s.Length); + byte[]? lease = null; + const int MAX_STACK = 128; + buffer = maxBytes <= MAX_STACK ? stackalloc byte[MAX_STACK] : (lease = ArrayPool.Shared.Rent(maxBytes)); + var bytes = Encoding.UTF8.GetBytes(s, buffer); + bool isMatch = buffer.Slice(0, bytes).StartsWith(value); + if (lease is not null) ArrayPool.Shared.Return(lease); + return isMatch; + default: + return false; + } + } + + // used by the toy server to smuggle weird vectors; on their own heads... not used by SE.Redis itself + // (these additions just formalize the usage in the older server code) + internal bool TryGetForeign([NotNullWhen(true)] out T? value, out int index, out int length) + where T : class + { + if (typeof(T) != typeof(string) && typeof(T) != typeof(byte[]) && DirectObject is T found) + { + index = 0; + length = checked((int)DirectOverlappedBits64); + value = found; + return true; + } + value = null; + index = 0; + length = 0; + return false; + } + + internal static RedisValue CreateForeign(T obj, int offset, int count) where T : class + { + // non-zero offset isn't supported until v3, left here for API parity + if (typeof(T) == typeof(string) || typeof(T) == typeof(byte[]) || offset != 0) Throw(); + return new RedisValue(obj, count); + static void Throw() => throw new InvalidOperationException(); + } } } diff --git a/src/StackExchange.Redis/RedisValueWithExpiry.cs b/src/StackExchange.Redis/RedisValueWithExpiry.cs deleted file mode 100644 index 340e3365b..000000000 --- a/src/StackExchange.Redis/RedisValueWithExpiry.cs +++ /dev/null @@ -1,29 +0,0 @@ -using System; - -namespace StackExchange.Redis -{ - /// - /// Describes a value/expiry pair - /// - public readonly struct RedisValueWithExpiry - { - /// - /// Creates a from a and a - /// - public RedisValueWithExpiry(RedisValue value, TimeSpan? expiry) - { - Value = value; - Expiry = expiry; - } - - /// - /// The expiry of this record - /// - public TimeSpan? Expiry { get; } - - /// - /// The value of this record - /// - public RedisValue Value { get; } - } -} diff --git a/src/StackExchange.Redis/ResultBox.cs b/src/StackExchange.Redis/ResultBox.cs index 4fe34f114..20b76ba15 100644 --- a/src/StackExchange.Redis/ResultBox.cs +++ b/src/StackExchange.Redis/ResultBox.cs @@ -14,13 +14,13 @@ internal interface IResultBox } internal interface IResultBox : IResultBox { - T GetResult(out Exception ex, bool canRecycle = false); + T? GetResult(out Exception? ex, bool canRecycle = false); void SetResult(T value); } internal abstract class SimpleResultBox : IResultBox { - private volatile Exception _exception; + private volatile Exception? _exception; bool IResultBox.IsAsync => false; bool IResultBox.IsFaulted => _exception != null; @@ -42,7 +42,7 @@ void IResultBox.ActivateContinuations() // about any confusion in stack-trace internal static readonly Exception CancelledException = new TaskCanceledException(); - protected Exception Exception + protected Exception? Exception { get => _exception; set => _exception = value; @@ -52,10 +52,10 @@ protected Exception Exception internal sealed class SimpleResultBox : SimpleResultBox, IResultBox { private SimpleResultBox() { } - private T _value; + private T? _value; [ThreadStatic] - private static SimpleResultBox _perThreadInstance; + private static SimpleResultBox? _perThreadInstance; public static IResultBox Create() => new SimpleResultBox(); public static IResultBox Get() // includes recycled boxes; used from sync, so makes re-use easy @@ -64,16 +64,17 @@ private SimpleResultBox() { } _perThreadInstance = null; // in case of oddness; only set back when recycled return obj; } + void IResultBox.SetResult(T value) => _value = value; - T IResultBox.GetResult(out Exception ex, bool canRecycle) + T? IResultBox.GetResult(out Exception? ex, bool canRecycle) { var value = _value; ex = Exception; if (canRecycle) { Exception = null; - _value = default; + _value = default!; _perThreadInstance = this; } return value; @@ -85,10 +86,10 @@ internal sealed class TaskResultBox : TaskCompletionSource, IResultBox // you might be asking "wait, doesn't the Task own these?", to which // I say: no; we can't set *immediately* due to thread-theft etc, hence // the fun TryComplete indirection - so we need somewhere to buffer them - private volatile Exception _exception; - private T _value; + private volatile Exception? _exception; + private T _value = default!; - private TaskResultBox(object asyncState, TaskCreationOptions creationOptions) : base(asyncState, creationOptions) + private TaskResultBox(object? asyncState, TaskCreationOptions creationOptions) : base(asyncState, creationOptions) { } bool IResultBox.IsAsync => true; @@ -101,14 +102,14 @@ private TaskResultBox(object asyncState, TaskCreationOptions creationOptions) : void IResultBox.SetResult(T value) => _value = value; - T IResultBox.GetResult(out Exception ex, bool _) + T? IResultBox.GetResult(out Exception? ex, bool unused) { ex = _exception; return _value; // nothing to do re recycle: TaskCompletionSource cannot be recycled } - static readonly WaitCallback s_ActivateContinuations = state => ((TaskResultBox)state).ActivateContinuationsImpl(); + private static readonly WaitCallback s_ActivateContinuations = state => ((TaskResultBox)state!).ActivateContinuationsImpl(); void IResultBox.ActivateContinuations() { if ((Task.CreationOptions & TaskCreationOptions.RunContinuationsAsynchronously) == 0) @@ -116,6 +117,8 @@ void IResultBox.ActivateContinuations() else ActivateContinuationsImpl(); } + + [System.Diagnostics.CodeAnalysis.SuppressMessage("Usage", "CA1816:Dispose methods should call SuppressFinalize", Justification = "Intentional observation")] private void ActivateContinuationsImpl() { var val = _value; @@ -135,16 +138,17 @@ private void ActivateContinuationsImpl() } } - public static IResultBox Create(out TaskCompletionSource source, object asyncState) + public static IResultBox Create(out TaskCompletionSource source, object? asyncState) { // it might look a little odd to return the same object as two different things, // but that's because it is serving two purposes, and I want to make it clear // how it is being used in those 2 different ways; also, the *fact* that they // are the same underlying object is an implementation detail that the rest of // the code doesn't need to know about - var obj = new TaskResultBox(asyncState, ConnectionMultiplexer.PreventThreadTheft - ? TaskCreationOptions.None // if we don't trust the TPL/sync-context, avoid a double QUWI dispatch - : TaskCreationOptions.RunContinuationsAsynchronously); + var obj = new TaskResultBox( + asyncState, + // if we don't trust the TPL/sync-context, avoid a double QUWI dispatch + ConnectionMultiplexer.PreventThreadTheft ? TaskCreationOptions.None : TaskCreationOptions.RunContinuationsAsynchronously); source = obj; return obj; } diff --git a/src/StackExchange.Redis/ResultProcessor.Digest.cs b/src/StackExchange.Redis/ResultProcessor.Digest.cs new file mode 100644 index 000000000..757009ea5 --- /dev/null +++ b/src/StackExchange.Redis/ResultProcessor.Digest.cs @@ -0,0 +1,42 @@ +using System; +using System.Buffers; + +namespace StackExchange.Redis; + +internal abstract partial class ResultProcessor +{ + // VectorSet result processors + public static readonly ResultProcessor Digest = + new DigestProcessor(); + + private sealed class DigestProcessor : ResultProcessor + { + protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) + { + if (result.IsNull) // for example, key doesn't exist + { + SetResult(message, null); + return true; + } + + if (result.Resp2TypeBulkString == ResultType.BulkString + && result.Payload is { Length: 2 * ValueCondition.DigestBytes } payload) + { + ValueCondition digest; + if (payload.IsSingleSegment) // single chunk - fast path + { + digest = ValueCondition.ParseDigest(payload.First.Span); + } + else // linearize + { + Span buffer = stackalloc byte[2 * ValueCondition.DigestBytes]; + payload.CopyTo(buffer); + digest = ValueCondition.ParseDigest(buffer); + } + SetResult(message, digest); + return true; + } + return false; + } + } +} diff --git a/src/StackExchange.Redis/ResultProcessor.Lease.cs b/src/StackExchange.Redis/ResultProcessor.Lease.cs new file mode 100644 index 000000000..c0f9e6d8e --- /dev/null +++ b/src/StackExchange.Redis/ResultProcessor.Lease.cs @@ -0,0 +1,218 @@ +using System.Diagnostics; +using Pipelines.Sockets.Unofficial.Arenas; + +// ReSharper disable once CheckNamespace +namespace StackExchange.Redis; + +internal abstract partial class ResultProcessor +{ + // Lease result processors + public static readonly ResultProcessor?> LeaseFloat32 = new LeaseFloat32Processor(); + + public static readonly ResultProcessor> + Lease = new LeaseProcessor(); + + public static readonly ResultProcessor> + LeaseFromArray = new LeaseFromArrayProcessor(); + + private abstract class LeaseProcessor : ResultProcessor?> + { + protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) + { + if (result.Resp2TypeArray != ResultType.Array) + { + return false; // not an array + } + + // deal with null + if (result.IsNull) + { + SetResult(message, Lease.Empty); + return true; + } + + // lease and fill + var items = result.GetItems(); + var length = checked((int)items.Length); + var lease = Lease.Create(length, clear: false); // note this handles zero nicely + var target = lease.Span; + int index = 0; + foreach (ref RawResult item in items) + { + if (!TryParse(item, out target[index++])) + { + // something went wrong; recycle and quit + lease.Dispose(); + return false; + } + } + Debug.Assert(index == length, "length mismatch"); + SetResult(message, lease); + return true; + } + + protected abstract bool TryParse(in RawResult raw, out T parsed); + } + + private abstract class InterleavedLeaseProcessor : ResultProcessor?> + { + protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) + { + if (result.Resp2TypeArray != ResultType.Array) + { + return false; // not an array + } + + // deal with null + if (result.IsNull) + { + SetResult(message, Lease.Empty); + return true; + } + + // lease and fill + var items = result.GetItems(); + var length = checked((int)items.Length) / 2; + var lease = Lease.Create(length, clear: false); // note this handles zero nicely + var target = lease.Span; + + var iter = items.GetEnumerator(); + for (int i = 0; i < target.Length; i++) + { + bool ok = iter.MoveNext(); + if (ok) + { + ref readonly RawResult first = ref iter.Current; + ok = iter.MoveNext() && TryParse(in first, in iter.Current, out target[i]); + } + if (!ok) + { + lease.Dispose(); + return false; + } + } + SetResult(message, lease); + return true; + } + + protected abstract bool TryParse(in RawResult first, in RawResult second, out T parsed); + } + + // takes a nested vector of the form [[A],[B,C],[D]] and exposes it as [A,B,C,D]; this is + // especially useful for VLINKS + private abstract class FlattenedLeaseProcessor : ResultProcessor?> + { + protected virtual long GetArrayLength(in RawResult array) => array.GetItems().Length; + + protected virtual bool TryReadOne(ref Sequence.Enumerator reader, out T value) + { + if (reader.MoveNext()) + { + return TryReadOne(in reader.Current, out value); + } + value = default!; + return false; + } + + protected virtual bool TryReadOne(in RawResult result, out T value) + { + value = default!; + return false; + } + + protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) + { + if (result.Resp2TypeArray != ResultType.Array) + { + return false; // not an array + } + if (result.IsNull) + { + SetResult(message, Lease.Empty); + return true; + } + var items = result.GetItems(); + long length = 0; + foreach (ref RawResult item in items) + { + if (item.Resp2TypeArray == ResultType.Array && !item.IsNull) + { + length += GetArrayLength(in item); + } + } + + if (length == 0) + { + SetResult(message, Lease.Empty); + return true; + } + var lease = Lease.Create(checked((int)length), clear: false); + int index = 0; + var target = lease.Span; + foreach (ref RawResult item in items) + { + if (item.Resp2TypeArray == ResultType.Array && !item.IsNull) + { + var iter = item.GetItems().GetEnumerator(); + while (index < target.Length && TryReadOne(ref iter, out target[index])) + { + index++; + } + } + } + + if (index == length) + { + SetResult(message, lease); + return true; + } + lease.Dispose(); // failed to fill? + return false; + } + } + + private sealed class LeaseFloat32Processor : LeaseProcessor + { + protected override bool TryParse(in RawResult raw, out float parsed) + { + var result = raw.TryGetDouble(out double val); + parsed = (float)val; + return result; + } + } + + private sealed class LeaseProcessor : ResultProcessor> + { + protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) + { + switch (result.Resp2TypeBulkString) + { + case ResultType.Integer: + case ResultType.SimpleString: + case ResultType.BulkString: + SetResult(message, result.AsLease()!); + return true; + } + return false; + } + } + + private sealed class LeaseFromArrayProcessor : ResultProcessor> + { + protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) + { + switch (result.Resp2TypeBulkString) + { + case ResultType.Array: + var items = result.GetItems(); + if (items.Length == 1) + { // treat an array of 1 like a single reply + SetResult(message, items[0].AsLease()!); + return true; + } + break; + } + return false; + } + } +} diff --git a/src/StackExchange.Redis/ResultProcessor.VectorSets.cs b/src/StackExchange.Redis/ResultProcessor.VectorSets.cs new file mode 100644 index 000000000..f8f3bed72 --- /dev/null +++ b/src/StackExchange.Redis/ResultProcessor.VectorSets.cs @@ -0,0 +1,120 @@ +using Pipelines.Sockets.Unofficial.Arenas; + +// ReSharper disable once CheckNamespace +namespace StackExchange.Redis; + +internal abstract partial class ResultProcessor +{ + // VectorSet result processors + public static readonly ResultProcessor?> VectorSetLinksWithScores = + new VectorSetLinksWithScoresProcessor(); + + public static readonly ResultProcessor?> VectorSetLinks = new VectorSetLinksProcessor(); + + public static readonly ResultProcessor?> LeaseRedisValue = new LeaseRedisValueProcessor(); + + public static ResultProcessor VectorSetInfo = new VectorSetInfoProcessor(); + + private sealed class VectorSetLinksWithScoresProcessor : FlattenedLeaseProcessor + { + protected override long GetArrayLength(in RawResult array) => array.GetItems().Length / 2; + + protected override bool TryReadOne(ref Sequence.Enumerator reader, out VectorSetLink value) + { + if (reader.MoveNext()) + { + ref readonly RawResult first = ref reader.Current; + if (reader.MoveNext() && reader.Current.TryGetDouble(out var score)) + { + value = new VectorSetLink(first.AsRedisValue(), score); + return true; + } + } + + value = default; + return false; + } + } + + private sealed class VectorSetLinksProcessor : FlattenedLeaseProcessor + { + protected override bool TryReadOne(in RawResult result, out RedisValue value) + { + value = result.AsRedisValue(); + return true; + } + } + + private sealed class LeaseRedisValueProcessor : LeaseProcessor + { + protected override bool TryParse(in RawResult raw, out RedisValue parsed) + { + parsed = raw.AsRedisValue(); + return true; + } + } + + private sealed partial class VectorSetInfoProcessor : ResultProcessor + { + protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) + { + if (result.Resp2TypeArray == ResultType.Array) + { + if (result.IsNull) + { + SetResult(message, null); + return true; + } + + var quantType = VectorSetQuantization.Unknown; + string? quantTypeRaw = null; + int vectorDim = 0, maxLevel = 0; + long resultSize = 0, vsetUid = 0, hnswMaxNodeUid = 0; + var iter = result.GetItems().GetEnumerator(); + while (iter.MoveNext()) + { + if (!iter.Current.TryParse(VectorSetInfoFieldMetadata.TryParse, out VectorSetInfoField field)) + field = VectorSetInfoField.Unknown; + + if (!iter.MoveNext()) break; + ref readonly RawResult value = ref iter.Current; + + switch (field) + { + case VectorSetInfoField.Size when value.TryGetInt64(out var i64): + resultSize = i64; + break; + case VectorSetInfoField.VsetUid when value.TryGetInt64(out var i64): + vsetUid = i64; + break; + case VectorSetInfoField.MaxLevel when value.TryGetInt64(out var i64): + maxLevel = checked((int)i64); + break; + case VectorSetInfoField.VectorDim when value.TryGetInt64(out var i64): + vectorDim = checked((int)i64); + break; + case VectorSetInfoField.QuantType + when value.TryParse(VectorSetQuantizationMetadata.TryParse, out VectorSetQuantization quantTypeValue) + && quantTypeValue is not VectorSetQuantization.Unknown: + quantType = quantTypeValue; + break; + case VectorSetInfoField.QuantType: + quantTypeRaw = value.GetString(); + quantType = VectorSetQuantization.Unknown; + break; + case VectorSetInfoField.HnswMaxNodeUid when value.TryGetInt64(out var i64): + hnswMaxNodeUid = i64; + break; + } + } + + SetResult( + message, + new VectorSetInfo(quantType, quantTypeRaw, vectorDim, resultSize, maxLevel, vsetUid, hnswMaxNodeUid)); + return true; + } + + return false; + } + } +} diff --git a/src/StackExchange.Redis/ResultProcessor.cs b/src/StackExchange.Redis/ResultProcessor.cs index 9f8dd5c51..fc5c3d5b4 100644 --- a/src/StackExchange.Redis/ResultProcessor.cs +++ b/src/StackExchange.Redis/ResultProcessor.cs @@ -2,15 +2,19 @@ using System.Buffers; using System.Collections.Generic; using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; using System.IO; using System.Linq; using System.Net; +using System.Runtime.CompilerServices; +using System.Text; using System.Text.RegularExpressions; +using Microsoft.Extensions.Logging; using Pipelines.Sockets.Unofficial.Arenas; namespace StackExchange.Redis { - internal abstract class ResultProcessor + internal abstract partial class ResultProcessor { public static readonly ResultProcessor Boolean = new BooleanProcessor(), @@ -18,13 +22,16 @@ public static readonly ResultProcessor DemandPONG = new ExpectBasicStringProcessor(CommonReplies.PONG), DemandZeroOrOne = new DemandZeroOrOneProcessor(), AutoConfigure = new AutoConfigureProcessor(), - TrackSubscriptions = new TrackSubscriptionsProcessor(), + TrackSubscriptions = new TrackSubscriptionsProcessor(null), Tracer = new TracerProcessor(false), EstablishConnection = new TracerProcessor(true), - BackgroundSaveStarted = new ExpectBasicStringProcessor(CommonReplies.backgroundSavingStarted_trimmed, startsWith: true); + BackgroundSaveStarted = new ExpectBasicStringProcessor(CommonReplies.backgroundSavingStarted_trimmed, startsWith: true), + BackgroundSaveAOFStarted = new ExpectBasicStringProcessor(CommonReplies.backgroundSavingAOFStarted_trimmed, startsWith: true); + + public static readonly ResultProcessor + ByteArray = new ByteArrayProcessor(); public static readonly ResultProcessor - ByteArray = new ByteArrayProcessor(), ScriptLoad = new ScriptLoadProcessor(); public static readonly ResultProcessor @@ -36,6 +43,10 @@ public static readonly ResultProcessor public static readonly ResultProcessor DateTime = new DateTimeProcessor(); + public static readonly ResultProcessor + NullableDateTimeFromMilliseconds = new NullableDateTimeProcessor(fromMilliseconds: true), + NullableDateTimeFromSeconds = new NullableDateTimeProcessor(fromMilliseconds: false); + public static readonly ResultProcessor Double = new DoubleProcessor(); public static readonly ResultProcessor>[]> @@ -46,15 +57,26 @@ public static readonly MultiStreamProcessor public static readonly ResultProcessor Int64 = new Int64Processor(), - PubSubNumSub = new PubSubNumSubProcessor(); + PubSubNumSub = new PubSubNumSubProcessor(), + Int64DefaultNegativeOne = new Int64DefaultValueProcessor(-1); + + public static readonly ResultProcessor Int32 = new Int32Processor(); public static readonly ResultProcessor NullableDouble = new NullableDoubleProcessor(); + + public static readonly ResultProcessor + NullableDoubleArray = new NullableDoubleArrayProcessor(); + public static readonly ResultProcessor NullableInt64 = new NullableInt64Processor(); + public static readonly ResultProcessor ExpireResultArray = new ExpireResultArrayProcessor(); + + public static readonly ResultProcessor PersistResultArray = new PersistResultArrayProcessor(); + public static readonly ResultProcessor - RedisChannelArrayLiteral = new RedisChannelArrayProcessor(RedisChannel.PatternMode.Literal); + RedisChannelArrayLiteral = new RedisChannelArrayProcessor(RedisChannel.RedisChannelOptions.None); public static readonly ResultProcessor RedisKey = new RedisKeyProcessor(); @@ -68,15 +90,24 @@ public static readonly ResultProcessor public static readonly ResultProcessor RedisValue = new RedisValueProcessor(); - public static readonly ResultProcessor> - Lease = new LeaseProcessor(); + public static readonly ResultProcessor + RedisValueFromArray = new RedisValueFromArrayProcessor(); public static readonly ResultProcessor RedisValueArray = new RedisValueArrayProcessor(); + public static readonly ResultProcessor + Int64Array = new Int64ArrayProcessor(); + + public static readonly ResultProcessor + NullableStringArray = new NullableStringArrayProcessor(); + public static readonly ResultProcessor StringArray = new StringArrayProcessor(); + public static readonly ResultProcessor + BooleanArray = new BooleanArrayProcessor(); + public static readonly ResultProcessor RedisGeoPositionArray = new RedisValueGeoPositionArrayProcessor(); public static readonly ResultProcessor @@ -96,12 +127,24 @@ public static readonly SortedSetEntryProcessor public static readonly SortedSetEntryArrayProcessor SortedSetWithScores = new SortedSetEntryArrayProcessor(); + public static readonly SortedSetPopResultProcessor + SortedSetPopResult = new SortedSetPopResultProcessor(); + + public static readonly ListPopResultProcessor + ListPopResult = new ListPopResultProcessor(); + public static readonly SingleStreamProcessor SingleStream = new SingleStreamProcessor(); public static readonly SingleStreamProcessor SingleStreamWithNameSkip = new SingleStreamProcessor(skipStreamName: true); + public static readonly StreamAutoClaimProcessor + StreamAutoClaim = new StreamAutoClaimProcessor(); + + public static readonly StreamAutoClaimIdsOnlyProcessor + StreamAutoClaimIdsOnly = new StreamAutoClaimIdsOnlyProcessor(); + public static readonly StreamConsumerInfoProcessor StreamConsumerInfo = new StreamConsumerInfoProcessor(); @@ -119,14 +162,16 @@ public static readonly StreamPendingMessagesProcessor public static ResultProcessor GeoRadiusArray(GeoRadiusOptions options) => GeoRadiusResultArrayProcessor.Get(options); - public static readonly ResultProcessor - String = new StringProcessor(), - ClusterNodesRaw = new ClusterNodesRawProcessor(); + public static readonly ResultProcessor + LCSMatchResult = new LongestCommonSubsequenceProcessor(); - #region Sentinel + public static readonly ResultProcessor + String = new StringProcessor(), + TieBreaker = new TieBreakerProcessor(), + ClusterNodesRaw = new ClusterNodesRawProcessor(); - public static readonly ResultProcessor - SentinelMasterEndpoint = new SentinelGetMasterAddressByNameProcessor(); + public static readonly ResultProcessor + SentinelPrimaryEndpoint = new SentinelGetPrimaryAddressByNameProcessor(); public static readonly ResultProcessor SentinelAddressesEndPoints = new SentinelGetSentinelAddressesProcessor(); @@ -137,8 +182,6 @@ public static readonly ResultProcessor public static readonly ResultProcessor[][]> SentinelArrayOfArrays = new SentinelArrayOfArraysProcessor(); - #endregion - public static readonly ResultProcessor[]> StringPairInterleaved = new StringPairInterleavedProcessor(); public static readonly TimeSpanProcessor @@ -147,30 +190,33 @@ public static readonly TimeSpanProcessor public static readonly HashEntryArrayProcessor HashEntryArray = new HashEntryArrayProcessor(); - public void ConnectionFail(Message message, ConnectionFailureType fail, Exception innerException, string annotation) + [System.Diagnostics.CodeAnalysis.SuppressMessage("Performance", "CA1822:Mark members as static", Justification = "Conditionally run on instance")] + public void ConnectionFail(Message message, ConnectionFailureType fail, Exception? innerException, string? annotation, ConnectionMultiplexer? muxer) { PhysicalConnection.IdentifyFailureType(innerException, ref fail); - string exMessage = fail.ToString() + (message == null ? "" : (" on " + ( - fail == ConnectionFailureType.ProtocolFailure ? message.ToString() : message.CommandAndKey))); - if (!string.IsNullOrWhiteSpace(annotation)) exMessage += ", " + annotation; - - var ex = innerException == null ? new RedisConnectionException(fail, exMessage) - : new RedisConnectionException(fail, exMessage, innerException); + var sb = new StringBuilder(fail.ToString()); + if (message is not null) + { + sb.Append(" on "); + sb.Append(muxer?.RawConfig.IncludeDetailInExceptions == true ? message.ToString() : message.ToStringCommandOnly()); + } + if (!string.IsNullOrWhiteSpace(annotation)) + { + sb.Append(", "); + sb.Append(annotation); + } + var ex = new RedisConnectionException(fail, sb.ToString(), innerException); SetException(message, ex); } - public void ConnectionFail(Message message, ConnectionFailureType fail, string errorMessage) - { + public static void ConnectionFail(Message message, ConnectionFailureType fail, string errorMessage) => SetException(message, new RedisConnectionException(fail, errorMessage)); - } - public void ServerFail(Message message, string errorMessage) - { + public static void ServerFail(Message message, string errorMessage) => SetException(message, new RedisServerException(errorMessage)); - } - public void SetException(Message message, Exception ex) + public static void SetException(Message? message, Exception ex) { var box = message?.ResultBox; box?.SetException(ex); @@ -183,51 +229,80 @@ public virtual bool SetResult(PhysicalConnection connection, Message message, in { try { - logging.Log?.WriteLine($"Response from {bridge?.Name} / {message.CommandAndKey}: {result}"); + logging.Log?.LogInformationResponse(bridge?.Name, message.CommandAndKey, result); } catch { } } if (result.IsError) { - if (result.StartsWith(CommonReplies.NOAUTH)) bridge?.Multiplexer?.SetAuthSuspect(); + if (result.StartsWith(CommonReplies.NOAUTH)) + { + bridge?.Multiplexer.SetAuthSuspect(new RedisServerException("NOAUTH Returned - connection has not yet authenticated")); + } + else if (result.StartsWith(CommonReplies.WRONGPASS)) + { + bridge?.Multiplexer.SetAuthSuspect(new RedisServerException(result.ToString())); + } - var server = bridge.ServerEndPoint; + var server = bridge?.ServerEndPoint; bool log = !message.IsInternalCall; bool isMoved = result.StartsWith(CommonReplies.MOVED); - bool wasNoRedirect = ( message.Flags & CommandFlags.NoRedirect ) != 0; - string err = string.Empty; + bool wasNoRedirect = (message.Flags & CommandFlags.NoRedirect) != 0; + string? err = string.Empty; bool unableToConnectError = false; if (isMoved || result.StartsWith(CommonReplies.ASK)) { message.SetResponseReceived(); log = false; - string[] parts = result.GetString().Split(StringSplits.Space, 3); - EndPoint endpoint; + string[] parts = result.GetString()!.Split(StringSplits.Space, 3); if (Format.TryParseInt32(parts[1], out int hashSlot) - && (endpoint = Format.TryParseEndPoint(parts[2])) != null) + && Format.TryParseEndPoint(parts[2], out var endpoint)) { - // no point sending back to same server, and no point sending to a dead server - if (!Equals(server.EndPoint, endpoint)) + // Check if MOVED points to same endpoint + bool isSameEndpoint = Equals(server?.EndPoint, endpoint); + if (isSameEndpoint && isMoved) + { + // MOVED to same endpoint detected. + // This occurs when Redis/Valkey servers are behind DNS records, load balancers, or proxies. + // The MOVED error signals that the client should reconnect to allow the DNS/proxy/load balancer + // to route the connection to a different underlying server host, then retry the command. + // Mark the bridge to reconnect - reader loop will handle disconnection and reconnection. + bridge?.MarkNeedsReconnect(); + } + if (bridge is null) + { + // already toast + } + else if (bridge.Multiplexer.TryResend(hashSlot, message, endpoint, isMoved, isSameEndpoint)) { - if (bridge == null) - { } // already toast - else if (bridge.Multiplexer.TryResend(hashSlot, message, endpoint, isMoved)) + bridge.Multiplexer.Trace(message.Command + " re-issued to " + endpoint, isMoved ? "MOVED" : "ASK"); + return false; + } + else + { + if (isMoved && wasNoRedirect) { - bridge.Multiplexer.Trace(message.Command + " re-issued to " + endpoint, isMoved ? "MOVED" : "ASK"); - return false; + if (bridge.Multiplexer.RawConfig.IncludeDetailInExceptions) + { + err = $"Key has MOVED to Endpoint {endpoint} and hashslot {hashSlot} but CommandFlags.NoRedirect was specified - redirect not followed for {message.CommandAndKey}. "; + } + else + { + err = "Key has MOVED but CommandFlags.NoRedirect was specified - redirect not followed. "; + } } else { - if (isMoved && wasNoRedirect) + unableToConnectError = true; + if (bridge.Multiplexer.RawConfig.IncludeDetailInExceptions) { - err = $"Key has MOVED to Endpoint {endpoint} and hashslot {hashSlot} but CommandFlags.NoRedirect was specified - redirect not followed for {message.CommandAndKey}. "; + err = $"Endpoint {endpoint} serving hashslot {hashSlot} is not reachable at this point of time. Please check connectTimeout value. If it is low, try increasing it to give the ConnectionMultiplexer a chance to recover from the network disconnect. " + + PerfCounterHelper.GetThreadPoolAndCPUSummary(); } else { - unableToConnectError = true; - err = $"Endpoint {endpoint} serving hashslot {hashSlot} is not reachable at this point of time. Please check connectTimeout value. If it is low, try increasing it to give the ConnectionMultiplexer a chance to recover from the network disconnect. " - + PerfCounterHelper.GetThreadPoolAndCPUSummary(bridge.Multiplexer.IncludePerformanceCountersInExceptions); + err = "Endpoint is not reachable at this point of time. Please check connectTimeout value. If it is low, try increasing it to give the ConnectionMultiplexer a chance to recover from the network disconnect. "; } } } @@ -236,14 +311,14 @@ public virtual bool SetResult(PhysicalConnection connection, Message message, in if (string.IsNullOrWhiteSpace(err)) { - err = result.GetString(); + err = result.GetString()!; } - if (log) + if (log && server != null) { - bridge.Multiplexer.OnErrorMessage(server.EndPoint, err); + bridge?.Multiplexer.OnErrorMessage(server.EndPoint, err); } - bridge?.Multiplexer?.Trace("Completed with error: " + err + " (" + GetType().Name + ")", ToString()); + bridge?.Multiplexer.Trace("Completed with error: " + err + " (" + GetType().Name + ")", ToString()); if (unableToConnectError) { ConnectionFail(message, ConnectionFailureType.UnableToConnect, err); @@ -258,7 +333,7 @@ public virtual bool SetResult(PhysicalConnection connection, Message message, in bool coreResult = SetResultCore(connection, message, result); if (coreResult) { - bridge?.Multiplexer?.Trace("Completed with success: " + result.ToString() + " (" + GetType().Name + ")", ToString()); + bridge?.Multiplexer.Trace("Completed with success: " + result.ToString() + " (" + GetType().Name + ")", ToString()); } else { @@ -273,7 +348,7 @@ public virtual bool SetResult(PhysicalConnection connection, Message message, in private void UnexpectedResponse(Message message, in RawResult result) { ConnectionMultiplexer.TraceWithoutContext("From " + GetType().Name, "Unexpected Response"); - ConnectionFail(message, ConnectionFailureType.ProtocolFailure, "Unexpected response to " + (message?.Command.ToString() ?? "n/a") + ": " + result.ToString()); + ConnectionFail(message, ConnectionFailureType.ProtocolFailure, "Unexpected response to " + (message?.CommandString ?? "n/a") + ": " + result.ToString()); } public sealed class TimeSpanProcessor : ResultProcessor @@ -286,11 +361,10 @@ public TimeSpanProcessor(bool isMilliseconds) public bool TryParse(in RawResult result, out TimeSpan? expiry) { - switch (result.Type) + switch (result.Resp2TypeBulkString) { case ResultType.Integer: - long time; - if (result.TryGetInt64(out time)) + if (result.TryGetInt64(out long time)) { if (time < 0) { @@ -331,19 +405,18 @@ public sealed class TimingProcessor : ResultProcessor { private static readonly double TimestampToTicks = TimeSpan.TicksPerSecond / (double)Stopwatch.Frequency; - public static TimerMessage CreateMessage(int db, CommandFlags flags, RedisCommand command, RedisValue value = default(RedisValue)) - { - return new TimerMessage(db, flags, command, value); - } + public static TimerMessage CreateMessage(int db, CommandFlags flags, RedisCommand command, RedisValue value = default) => + new TimerMessage(db, flags, command, value); protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { - if (result.Type == ResultType.Error) + if (result.IsError) { return false; } else - { // don't check the actual reply; there are multiple ways of constructing + { + // don't check the actual reply; there are multiple ways of constructing // a timing message, and we don't actually care about what approach was used TimeSpan duration; if (message is TimerMessage timingMessage) @@ -390,17 +463,38 @@ protected override void WriteImpl(PhysicalConnection physical) public sealed class TrackSubscriptionsProcessor : ResultProcessor { + private ConnectionMultiplexer.Subscription? Subscription { get; } + public TrackSubscriptionsProcessor(ConnectionMultiplexer.Subscription? sub) => Subscription = sub; + protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { - if (result.Type == ResultType.MultiBulk) + if (result.Resp2TypeArray == ResultType.Array) { var items = result.GetItems(); if (items.Length >= 3 && items[2].TryGetInt64(out long count)) { connection.SubscriptionCount = count; + SetResult(message, true); + + var ep = connection.BridgeCouldBeNull?.ServerEndPoint; + if (ep is not null) + { + switch (message.Command) + { + case RedisCommand.SUBSCRIBE: + case RedisCommand.SSUBSCRIBE: + case RedisCommand.PSUBSCRIBE: + Subscription?.AddEndpoint(ep); + break; + default: + Subscription?.TryRemoveEndpoint(ep); + break; + } + } return true; } } + SetResult(message, false); return false; } } @@ -409,13 +503,21 @@ internal sealed class DemandZeroOrOneProcessor : ResultProcessor { public static bool TryGet(in RawResult result, out bool value) { - switch (result.Type) + switch (result.Resp2TypeBulkString) { case ResultType.Integer: case ResultType.SimpleString: case ResultType.BulkString: - if (result.IsEqual(CommonReplies.one)) { value = true; return true; } - else if (result.IsEqual(CommonReplies.zero)) { value = false; return true; } + if (result.IsEqual(CommonReplies.one)) + { + value = true; + return true; + } + else if (result.IsEqual(CommonReplies.zero)) + { + value = false; + return true; + } break; } value = false; @@ -435,70 +537,58 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes internal sealed class ScriptLoadProcessor : ResultProcessor { + /// + /// Anything hashed with SHA1 has exactly 40 characters. We can use that as a shortcut in the code bellow. + /// + private const int SHA1Length = 40; + private static readonly Regex sha1 = new Regex("^[0-9a-f]{40}$", RegexOptions.Compiled | RegexOptions.IgnoreCase); - internal static bool IsSHA1(string script) - { - return script != null && sha1.IsMatch(script); - } + internal static bool IsSHA1(string? script) => script is not null && script.Length == SHA1Length && sha1.IsMatch(script); internal const int Sha1HashLength = 20; internal static byte[] ParseSHA1(byte[] value) { - if (value?.Length == Sha1HashLength * 2) + static int FromHex(char c) { - var tmp = new byte[Sha1HashLength]; - int charIndex = 0; - for (int i = 0; i < tmp.Length; i++) - { - int x = FromHex((char)value[charIndex++]), y = FromHex((char)value[charIndex++]); - if (x < 0 || y < 0) return null; - tmp[i] = (byte)((x << 4) | y); - } - return tmp; + if (c >= '0' && c <= '9') return c - '0'; + if (c >= 'a' && c <= 'f') return c - 'a' + 10; + if (c >= 'A' && c <= 'F') return c - 'A' + 10; + return -1; } - return null; - } - internal static byte[] ParseSHA1(string value) - { - if (value?.Length == (Sha1HashLength * 2) && sha1.IsMatch(value)) + if (value?.Length == Sha1HashLength * 2) { var tmp = new byte[Sha1HashLength]; int charIndex = 0; for (int i = 0; i < tmp.Length; i++) { - int x = FromHex(value[charIndex++]), y = FromHex(value[charIndex++]); - if (x < 0 || y < 0) return null; + int x = FromHex((char)value[charIndex++]), y = FromHex((char)value[charIndex++]); + if (x < 0 || y < 0) + { + throw new ArgumentException("Unable to parse response as SHA1", nameof(value)); + } tmp[i] = (byte)((x << 4) | y); } return tmp; } - return null; + throw new ArgumentException("Unable to parse response as SHA1", nameof(value)); } - private static int FromHex(char c) - { - if (c >= '0' && c <= '9') return c - '0'; - if (c >= 'a' && c <= 'f') return c - 'a' + 10; - if (c >= 'A' && c <= 'F') return c - 'A' + 10; - return -1; - } // note that top-level error messages still get handled by SetResult, but nested errors // (is that a thing?) will be wrapped in the RedisResult protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { - switch (result.Type) + switch (result.Resp2TypeBulkString) { case ResultType.BulkString: var asciiHash = result.GetBlob(); if (asciiHash == null || asciiHash.Length != (Sha1HashLength * 2)) return false; - byte[] hash = null; - if (!message.IsInternalCall) - { - hash = ParseSHA1(asciiHash); // external caller wants the hex bytes, not the ascii bytes - } + // External caller wants the hex bytes, not the ASCII bytes + // For nullability/consistency reasons, we always do the parse here. + byte[] hash = ParseSHA1(asciiHash); + if (message is RedisDatabase.ScriptLoadMessage sl) { connection.BridgeCouldBeNull?.ServerEndPoint?.AddScript(sl.Script, asciiHash); @@ -512,18 +602,18 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes internal sealed class SortedSetEntryProcessor : ResultProcessor { - public bool TryParse(in RawResult result, out SortedSetEntry? entry) + public static bool TryParse(in RawResult result, out SortedSetEntry? entry) { - switch (result.Type) + switch (result.Resp2TypeArray) { - case ResultType.MultiBulk: - var arr = result.GetItems(); - if (result.IsNull || arr.Length < 2) + case ResultType.Array: + if (result.IsNull || result.ItemsCount < 2) { entry = null; } else { + var arr = result.GetItems(); entry = new SortedSetEntry(arr[0].AsRedisValue(), arr[1].TryGetDouble(out double val) ? val : double.NaN); } return true; @@ -546,78 +636,179 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes internal sealed class SortedSetEntryArrayProcessor : ValuePairInterleavedProcessorBase { - protected override SortedSetEntry Parse(in RawResult first, in RawResult second) + protected override SortedSetEntry Parse(in RawResult first, in RawResult second, object? state) => + new SortedSetEntry(first.AsRedisValue(), second.TryGetDouble(out double val) ? val : double.NaN); + } + + internal sealed class SortedSetPopResultProcessor : ResultProcessor + { + protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { - return new SortedSetEntry(first.AsRedisValue(), second.TryGetDouble(out double val) ? val : double.NaN); + if (result.Resp2TypeArray == ResultType.Array) + { + if (result.IsNull) + { + SetResult(message, Redis.SortedSetPopResult.Null); + return true; + } + + var arr = result.GetItems(); + SetResult(message, new SortedSetPopResult(arr[0].AsRedisKey(), arr[1].GetItemsAsSortedSetEntryArray()!)); + return true; + } + + return false; } } - internal sealed class HashEntryArrayProcessor : ValuePairInterleavedProcessorBase + internal sealed class ListPopResultProcessor : ResultProcessor { - protected override HashEntry Parse(in RawResult first, in RawResult second) + protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { - return new HashEntry(first.AsRedisValue(), second.AsRedisValue()); + if (result.Resp2TypeArray == ResultType.Array) + { + if (result.IsNull) + { + SetResult(message, Redis.ListPopResult.Null); + return true; + } + + var arr = result.GetItems(); + SetResult(message, new ListPopResult(arr[0].AsRedisKey(), arr[1].GetItemsAsValues()!)); + return true; + } + + return false; } } + internal sealed class HashEntryArrayProcessor : ValuePairInterleavedProcessorBase + { + protected override HashEntry Parse(in RawResult first, in RawResult second, object? state) => + new HashEntry(first.AsRedisValue(), second.AsRedisValue()); + } + internal abstract class ValuePairInterleavedProcessorBase : ResultProcessor { - public bool TryParse(in RawResult result, out T[] pairs) + // when RESP3 was added, some interleaved value/pair responses: became jagged instead; + // this isn't strictly a RESP3 thing (RESP2 supports jagged), but: it is a thing that + // happened, and we need to handle that; thus, by default, we'll detect jagged data + // and handle it automatically; this virtual is included so we can turn it off + // on a per-processor basis if needed + protected virtual bool AllowJaggedPairs => true; + + public bool TryParse(in RawResult result, out T[]? pairs) => TryParse(result, out pairs, false, out _); - public bool TryParse(in RawResult result, out T[] pairs, bool allowOversized, out int count) + public T[]? ParseArray(in RawResult result, bool allowOversized, out int count, object? state) { - count = 0; - switch (result.Type) + if (result.IsNull) { - case ResultType.MultiBulk: - var arr = result.GetItems(); - if (result.IsNull) + count = 0; + return null; + } + + var arr = result.GetItems(); + count = (int)arr.Length; + if (count == 0) + { + return []; + } + + bool interleaved = !(result.IsResp3 && AllowJaggedPairs && IsAllJaggedPairs(arr)); + if (interleaved) count >>= 1; // so: half of that + var pairs = allowOversized ? ArrayPool.Shared.Rent(count) : new T[count]; + + if (interleaved) + { + // linear elements i.e. {key,value,key,value,key,value} + if (arr.IsSingleSegment) + { + var span = arr.FirstSpan; + int offset = 0; + for (int i = 0; i < count; i++) { - pairs = null; + pairs[i] = Parse(span[offset++], span[offset++], state); } - else + } + else + { + var iter = arr.GetEnumerator(); // simplest way of getting successive values + for (int i = 0; i < count; i++) { - count = (int)arr.Length / 2; - if (count == 0) - { - pairs = Array.Empty(); - } - else - { - pairs = allowOversized ? ArrayPool.Shared.Rent(count) : new T[count]; - if (arr.IsSingleSegment) - { - var span = arr.FirstSpan; - int offset = 0; - for (int i = 0; i < count; i++) - { - pairs[i] = Parse(span[offset++], span[offset++]); - } - } - else - { - var iter = arr.GetEnumerator(); // simplest way of getting successive values - for (int i = 0; i < count; i++) - { - pairs[i] = Parse(iter.GetNext(), iter.GetNext()); - } - } - } + pairs[i] = Parse(iter.GetNext(), iter.GetNext(), state); + } + } + } + else + { + // jagged elements i.e. {{key,value},{key,value},{key,value}} + // to get here, we've already asserted that all elements are arrays with length 2 + if (arr.IsSingleSegment) + { + int i = 0; + foreach (var el in arr.FirstSpan) + { + var inner = el.GetItems(); + pairs[i++] = Parse(inner[0], inner[1], state); + } + } + else + { + var iter = arr.GetEnumerator(); // simplest way of getting successive values + for (int i = 0; i < count; i++) + { + var inner = iter.GetNext().GetItems(); + pairs[i] = Parse(inner[0], inner[1], state); + } + } + } + return pairs; + + static bool IsAllJaggedPairs(in Sequence arr) + { + return arr.IsSingleSegment ? CheckSpan(arr.FirstSpan) : CheckSpans(arr); + + static bool CheckSpans(in Sequence arr) + { + foreach (var chunk in arr.Spans) + { + if (!CheckSpan(chunk)) return false; + } + return true; + } + static bool CheckSpan(ReadOnlySpan chunk) + { + // check whether each value is actually an array of length 2 + foreach (ref readonly RawResult el in chunk) + { + if (el is not { Resp2TypeArray: ResultType.Array, ItemsCount: 2 }) return false; } return true; + } + } + } + + public bool TryParse(in RawResult result, out T[]? pairs, bool allowOversized, out int count) + { + switch (result.Resp2TypeArray) + { + case ResultType.Array: + pairs = ParseArray(in result, allowOversized, out count, null); + return true; default: + count = 0; pairs = null; return false; } } - protected abstract T Parse(in RawResult first, in RawResult second); + protected abstract T Parse(in RawResult first, in RawResult second, object? state); protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { - if (TryParse(result, out T[] arr)) + if (TryParse(result, out T[]? arr)) { - SetResult(message, arr); + SetResult(message, arr!); return true; } return false; @@ -626,8 +817,8 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes internal sealed class AutoConfigureProcessor : ResultProcessor { - private ConnectionMultiplexer.LogProxy Log { get; } - public AutoConfigureProcessor(ConnectionMultiplexer.LogProxy log = null) => Log = log; + private ILogger? Log { get; } + public AutoConfigureProcessor(ILogger? log = null) => Log = log; public override bool SetResult(PhysicalConnection connection, Message message, in RawResult result) { @@ -637,10 +828,11 @@ public override bool SetResult(PhysicalConnection connection, Message message, i if (bridge != null) { var server = bridge.ServerEndPoint; - Log?.WriteLine($"{Format.ToString(server)}: Auto-configured role: replica"); + Log?.LogInformationAutoConfiguredRoleReplica(new(server)); server.IsReplica = true; } } + return base.SetResult(connection, message, result); } @@ -648,74 +840,74 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes { var server = connection.BridgeCouldBeNull?.ServerEndPoint; if (server == null) return false; - switch (result.Type) + + switch (result.Resp2TypeBulkString) { + case ResultType.Integer: + if (message?.Command == RedisCommand.CLIENT) + { + if (result.TryGetInt64(out long clientId)) + { + connection.ConnectionId = clientId; + Log?.LogInformationAutoConfiguredClientConnectionId(new(server), clientId); + + SetResult(message, true); + return true; + } + } + break; case ResultType.BulkString: if (message?.Command == RedisCommand.INFO) { - string info = result.GetString(), line; + string? info = result.GetString(); if (string.IsNullOrWhiteSpace(info)) { SetResult(message, true); return true; } - string masterHost = null, masterPort = null; + string? primaryHost = null, primaryPort = null; bool roleSeen = false; using (var reader = new StringReader(info)) { - while ((line = reader.ReadLine()) != null) + while (reader.ReadLine() is string line) { - if (string.IsNullOrWhiteSpace(line) || line.StartsWith("# ")) continue; + if (string.IsNullOrWhiteSpace(line) || line.StartsWith("# ")) + { + continue; + } - string val; + string? val; if ((val = Extract(line, "role:")) != null) { roleSeen = true; - switch (val) + if (TryParseRole(val, out bool isReplica)) { - case "master": - server.IsReplica = false; - Log?.WriteLine($"{Format.ToString(server)}: Auto-configured (INFO) role: master"); - break; - case "replica": - case "slave": - server.IsReplica = true; - Log?.WriteLine($"{Format.ToString(server)}: Auto-configured (INFO) role: replica"); - break; + server.IsReplica = isReplica; + Log?.LogInformationAutoConfiguredInfoRole(new(server), isReplica ? "replica" : "primary"); } } else if ((val = Extract(line, "master_host:")) != null) { - masterHost = val; + primaryHost = val; } else if ((val = Extract(line, "master_port:")) != null) { - masterPort = val; + primaryPort = val; } else if ((val = Extract(line, "redis_version:")) != null) { - if (Version.TryParse(val, out Version version)) + if (Format.TryParseVersion(val, out Version? version)) { server.Version = version; - Log?.WriteLine($"{Format.ToString(server)}: Auto-configured (INFO) version: " + version); + Log?.LogInformationAutoConfiguredInfoVersion(new(server), version); } } else if ((val = Extract(line, "redis_mode:")) != null) { - switch (val) + if (TryParseServerType(val, out var serverType)) { - case "standalone": - server.ServerType = ServerType.Standalone; - Log?.WriteLine($"{Format.ToString(server)}: Auto-configured (INFO) server-type: standalone"); - break; - case "cluster": - server.ServerType = ServerType.Cluster; - Log?.WriteLine($"{Format.ToString(server)}: Auto-configured (INFO) server-type: cluster"); - break; - case "sentinel": - server.ServerType = ServerType.Sentinel; - Log?.WriteLine($"{Format.ToString(server)}: Auto-configured (INFO) server-type: sentinel"); - break; + server.ServerType = serverType; + Log?.LogInformationAutoConfiguredInfoServerType(new(server), serverType); } } else if ((val = Extract(line, "run_id:")) != null) @@ -723,24 +915,25 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes server.RunId = val; } } - if (roleSeen) - { // these are in the same section, if presnt - server.MasterEndPoint = Format.TryParseEndPoint(masterHost, masterPort); + if (roleSeen && Format.TryParseEndPoint(primaryHost!, primaryPort, out var sep)) + { + // These are in the same section, if present + server.PrimaryEndPoint = sep; } } } else if (message?.Command == RedisCommand.SENTINEL) { server.ServerType = ServerType.Sentinel; - Log?.WriteLine($"{Format.ToString(server)}: Auto-configured (SENTINEL) server-type: sentinel"); + Log?.LogInformationAutoConfiguredSentinelServerType(new(server)); } SetResult(message, true); return true; - case ResultType.MultiBulk: + case ResultType.Array: if (message?.Command == RedisCommand.CONFIG) { var iter = result.GetItems().GetEnumerator(); - while(iter.MoveNext()) + while (iter.MoveNext()) { ref RawResult key = ref iter.Current; if (!iter.MoveNext()) break; @@ -760,35 +953,75 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes { targetSeconds = (timeoutSeconds * 3) / 4; } - Log?.WriteLine($"{Format.ToString(server)}: Auto-configured (CONFIG) timeout: " + targetSeconds + "s"); + Log?.LogInformationAutoConfiguredConfigTimeout(new(server), targetSeconds); server.WriteEverySeconds = targetSeconds; } } else if (key.IsEqual(CommonReplies.databases) && val.TryGetInt64(out i64)) { int dbCount = checked((int)i64); - Log?.WriteLine($"{Format.ToString(server)}: Auto-configured (CONFIG) databases: " + dbCount); + Log?.LogInformationAutoConfiguredConfigDatabases(new(server), dbCount); server.Databases = dbCount; + if (dbCount > 1) + { + connection.MultiDatabasesOverride = true; + } } else if (key.IsEqual(CommonReplies.slave_read_only) || key.IsEqual(CommonReplies.replica_read_only)) { if (val.IsEqual(CommonReplies.yes)) { server.ReplicaReadOnly = true; - Log?.WriteLine($"{Format.ToString(server)}: Auto-configured (CONFIG) read-only replica: true"); + Log?.LogInformationAutoConfiguredConfigReadOnlyReplica(new(server), true); } else if (val.IsEqual(CommonReplies.no)) { server.ReplicaReadOnly = false; - Log?.WriteLine($"{Format.ToString(server)}: Auto-configured (CONFIG) read-only replica: false"); + Log?.LogInformationAutoConfiguredConfigReadOnlyReplica(new(server), false); } } } } + else if (message?.Command == RedisCommand.HELLO) + { + var iter = result.GetItems().GetEnumerator(); + while (iter.MoveNext()) + { + ref RawResult key = ref iter.Current; + if (!iter.MoveNext()) break; + ref RawResult val = ref iter.Current; + + if (key.IsEqual(CommonReplies.version) && Format.TryParseVersion(val.GetString(), out var version)) + { + server.Version = version; + Log?.LogInformationAutoConfiguredHelloServerVersion(new(server), version); + } + else if (key.IsEqual(CommonReplies.proto) && val.TryGetInt64(out var i64)) + { + connection.SetProtocol(i64 >= 3 ? RedisProtocol.Resp3 : RedisProtocol.Resp2); + Log?.LogInformationAutoConfiguredHelloProtocol(new(server), connection.Protocol ?? RedisProtocol.Resp2); + } + else if (key.IsEqual(CommonReplies.id) && val.TryGetInt64(out i64)) + { + connection.ConnectionId = i64; + Log?.LogInformationAutoConfiguredHelloConnectionId(new(server), i64); + } + else if (key.IsEqual(CommonReplies.mode) && TryParseServerType(val.GetString(), out var serverType)) + { + server.ServerType = serverType; + Log?.LogInformationAutoConfiguredHelloServerType(new(server), serverType); + } + else if (key.IsEqual(CommonReplies.role) && TryParseRole(val.GetString(), out bool isReplica)) + { + server.IsReplica = isReplica; + Log?.LogInformationAutoConfiguredHelloRole(new(server), isReplica ? "replica" : "primary"); + } + } + } else if (message?.Command == RedisCommand.SENTINEL) { server.ServerType = ServerType.Sentinel; - Log?.WriteLine($"{Format.ToString(server)}: Auto-configured (SENTINEL) server-type: sentinel"); + Log?.LogInformationAutoConfiguredSentinelServerType(new(server)); } SetResult(message, true); return true; @@ -796,11 +1029,50 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes return false; } - private static string Extract(string line, string prefix) + private static string? Extract(string line, string prefix) { if (line.StartsWith(prefix)) return line.Substring(prefix.Length).Trim(); return null; } + + private static bool TryParseServerType(string? val, out ServerType serverType) + { + switch (val) + { + case "standalone": + serverType = ServerType.Standalone; + return true; + case "cluster": + serverType = ServerType.Cluster; + return true; + case "sentinel": + serverType = ServerType.Sentinel; + return true; + default: + serverType = default; + return false; + } + } + + private static bool TryParseRole(string? val, out bool isReplica) + { + switch (val) + { + case "primary": + case "master": + isReplica = false; + return true; + case "replica": + case "slave": + isReplica = true; + return true; + default: + isReplica = default; + return false; + } + } + + internal static ResultProcessor Create(ILogger? log) => log is null ? AutoConfigure : new AutoConfigureProcessor(log); } private sealed class BooleanProcessor : ResultProcessor @@ -812,7 +1084,7 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes SetResult(message, false); // lots of ops return (nil) when they mean "no" return true; } - switch (result.Type) + switch (result.Resp2TypeBulkString) { case ResultType.SimpleString: if (result.IsEqual(CommonReplies.OK)) @@ -828,7 +1100,7 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes case ResultType.BulkString: SetResult(message, result.GetBoolean()); return true; - case ResultType.MultiBulk: + case ResultType.Array: var items = result.GetItems(); if (items.Length == 1) { // treat an array of 1 like a single reply (for example, SCRIPT EXISTS) @@ -841,11 +1113,11 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes } } - private sealed class ByteArrayProcessor : ResultProcessor + private sealed class ByteArrayProcessor : ResultProcessor { protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { - switch (result.Type) + switch (result.Resp2TypeBulkString) { case ResultType.BulkString: SetResult(message, result.GetBlob()); @@ -859,8 +1131,7 @@ private sealed class ClusterNodesProcessor : ResultProcessor + private sealed class ClusterNodesRawProcessor : ResultProcessor { protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { - switch (result.Type) + switch (result.Resp2TypeBulkString) { case ResultType.Integer: case ResultType.SimpleString: case ResultType.BulkString: - string nodes = result.GetString(); + string nodes = result.GetString()!; try - { ClusterNodesProcessor.Parse(connection, nodes); } + { + ClusterNodesProcessor.Parse(connection, nodes); + } catch - { /* tralalalala */} + { + /* tralalalala */ + } SetResult(message, nodes); return true; } @@ -908,8 +1188,12 @@ private sealed class ConnectionIdentityProcessor : ResultProcessor { protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { - SetResult(message, connection.BridgeCouldBeNull?.ServerEndPoint?.EndPoint); - return true; + if (connection.BridgeCouldBeNull is PhysicalBridge bridge) + { + SetResult(message, bridge.ServerEndPoint.EndPoint); + return true; + } + return false; } } @@ -918,7 +1202,7 @@ private sealed class DateTimeProcessor : ResultProcessor protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { long unixTime; - switch (result.Type) + switch (result.Resp2TypeArray) { case ResultType.Integer: if (result.TryGetInt64(out unixTime)) @@ -928,7 +1212,7 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes return true; } break; - case ResultType.MultiBulk: + case ResultType.Array: var arr = result.GetItems(); switch (arr.Length) { @@ -943,7 +1227,7 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes case 2: if (arr[0].TryGetInt64(out unixTime) && arr[1].TryGetInt64(out long micros)) { - var time = RedisBase.UnixEpoch.AddSeconds(unixTime).AddTicks(micros * 10); // datetime ticks are 100ns + var time = RedisBase.UnixEpoch.AddSeconds(unixTime).AddTicks(micros * 10); // DateTime ticks are 100ns SetResult(message, time); return true; } @@ -955,15 +1239,42 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes } } + public sealed class NullableDateTimeProcessor : ResultProcessor + { + private readonly bool isMilliseconds; + public NullableDateTimeProcessor(bool fromMilliseconds) => isMilliseconds = fromMilliseconds; + + protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) + { + switch (result.Resp2TypeBulkString) + { + case ResultType.Integer when result.TryGetInt64(out var duration): + DateTime? expiry = duration switch + { + // -1 means no expiry and -2 means key does not exist + < 0 => null, + _ when isMilliseconds => RedisBase.UnixEpoch.AddMilliseconds(duration), + _ => RedisBase.UnixEpoch.AddSeconds(duration), + }; + SetResult(message, expiry); + return true; + + case ResultType.BulkString when result.IsNull: + SetResult(message, null); + return true; + } + return false; + } + } + private sealed class DoubleProcessor : ResultProcessor { protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { - switch (result.Type) + switch (result.Resp2TypeBulkString) { case ResultType.Integer: - long i64; - if (result.TryGetInt64(out i64)) + if (result.TryGetInt64(out long i64)) { SetResult(message, i64); return true; @@ -971,8 +1282,7 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes break; case ResultType.SimpleString: case ResultType.BulkString: - double val; - if (result.TryGetDouble(out val)) + if (result.TryGetDouble(out double val)) { SetResult(message, val); return true; @@ -1000,7 +1310,7 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes SetResult(message, true); return true; } - if(message.Command == RedisCommand.AUTH) connection?.BridgeCouldBeNull?.Multiplexer?.SetAuthSuspect(); + if (message.Command == RedisCommand.AUTH) connection?.BridgeCouldBeNull?.Multiplexer?.SetAuthSuspect(new RedisException("Unknown AUTH exception")); return false; } } @@ -1009,13 +1319,15 @@ private sealed class InfoProcessor : ResultProcessor>>(); - using (var reader = new StringReader(result.GetString())) + var raw = result.GetString(); + if (raw is not null) { - while ((line = reader.ReadLine()) != null) + using var reader = new StringReader(raw); + while (reader.ReadLine() is string line) { if (string.IsNullOrWhiteSpace(line)) continue; if (line.StartsWith("# ")) @@ -1038,9 +1350,29 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes return false; } - private static string Normalize(string category) + private static string Normalize(string? category) => + category.IsNullOrWhiteSpace() ? "miscellaneous" : category.Trim(); + } + + private sealed class Int64DefaultValueProcessor : ResultProcessor + { + private readonly long _defaultValue; + + public Int64DefaultValueProcessor(long defaultValue) => _defaultValue = defaultValue; + + protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { - return string.IsNullOrWhiteSpace(category) ? "miscellaneous" : category.Trim(); + if (result.IsNull) + { + SetResult(message, _defaultValue); + return true; + } + if (result.Resp2TypeBulkString == ResultType.Integer && result.TryGetInt64(out var i64)) + { + SetResult(message, i64); + return true; + } + return false; } } @@ -1048,13 +1380,12 @@ private class Int64Processor : ResultProcessor { protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { - switch (result.Type) + switch (result.Resp2TypeBulkString) { case ResultType.Integer: case ResultType.SimpleString: case ResultType.BulkString: - long i64; - if (result.TryGetInt64(out i64)) + if (result.TryGetInt64(out long i64)) { SetResult(message, i64); return true; @@ -1065,11 +1396,102 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes } } - private class PubSubNumSubProcessor : Int64Processor + private class Int32Processor : ResultProcessor + { + protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) + { + switch (result.Resp2TypeBulkString) + { + case ResultType.Integer: + case ResultType.SimpleString: + case ResultType.BulkString: + if (result.TryGetInt64(out long i64)) + { + SetResult(message, checked((int)i64)); + return true; + } + break; + } + return false; + } + } + + internal static ResultProcessor StreamTrimResult => + Int32EnumProcessor.Instance; + + internal static ResultProcessor StreamTrimResultArray => + Int32EnumArrayProcessor.Instance; + + private sealed class Int32EnumProcessor : ResultProcessor where T : unmanaged, Enum { + private Int32EnumProcessor() { } + public static readonly Int32EnumProcessor Instance = new(); + protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { - if (result.Type == ResultType.MultiBulk) + switch (result.Resp2TypeBulkString) + { + case ResultType.Integer: + case ResultType.SimpleString: + case ResultType.BulkString: + if (result.TryGetInt64(out long i64)) + { + Debug.Assert(Unsafe.SizeOf() == sizeof(int)); + int i32 = (int)i64; + SetResult(message, Unsafe.As(ref i32)); + return true; + } + break; + case ResultType.Array when result.ItemsCount == 1: // pick a single element from a unit vector + if (result.GetItems()[0].TryGetInt64(out i64)) + { + Debug.Assert(Unsafe.SizeOf() == sizeof(int)); + int i32 = (int)i64; + SetResult(message, Unsafe.As(ref i32)); + return true; + } + break; + } + return false; + } + } + + private sealed class Int32EnumArrayProcessor : ResultProcessor where T : unmanaged, Enum + { + private Int32EnumArrayProcessor() { } + public static readonly Int32EnumArrayProcessor Instance = new(); + + protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) + { + switch (result.Resp2TypeArray) + { + case ResultType.Array: + T[] arr; + if (result.IsNull) + { + arr = null!; + } + else + { + Debug.Assert(Unsafe.SizeOf() == sizeof(int)); + arr = result.ToArray(static (in RawResult x) => + { + int i32 = (int)x.AsRedisValue(); + return Unsafe.As(ref i32); + })!; + } + SetResult(message, arr); + return true; + } + return false; + } + } + + private sealed class PubSubNumSubProcessor : Int64Processor + { + protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) + { + if (result.Resp2TypeArray == ResultType.Array) { var arr = result.GetItems(); if (arr.Length == 2 && arr[1].TryGetInt64(out long val)) @@ -1082,11 +1504,25 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes } } + private sealed class NullableDoubleArrayProcessor : ResultProcessor + { + protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) + { + if (result.Resp2TypeArray == ResultType.Array && !result.IsNull) + { + var arr = result.GetItemsAsDoubles()!; + SetResult(message, arr); + return true; + } + return false; + } + } + private sealed class NullableDoubleProcessor : ResultProcessor { protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { - switch (result.Type) + switch (result.Resp2TypeBulkString) { case ResultType.Integer: case ResultType.SimpleString: @@ -1096,8 +1532,7 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes SetResult(message, null); return true; } - double val; - if (result.TryGetDouble(out val)) + if (result.TryGetDouble(out double val)) { SetResult(message, val); return true; @@ -1108,27 +1543,67 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes } } - private sealed class NullableInt64Processor : ResultProcessor + private sealed class NullableInt64Processor : ResultProcessor + { + protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) + { + switch (result.Resp2TypeBulkString) + { + case ResultType.Integer: + case ResultType.SimpleString: + case ResultType.BulkString: + if (result.IsNull) + { + SetResult(message, null); + return true; + } + if (result.TryGetInt64(out long i64)) + { + SetResult(message, i64); + return true; + } + break; + case ResultType.Array: + var items = result.GetItems(); + if (items.Length == 1) + { // treat an array of 1 like a single reply + if (items[0].TryGetInt64(out long value)) + { + SetResult(message, value); + return true; + } + } + break; + } + return false; + } + } + + private sealed class ExpireResultArrayProcessor : ResultProcessor + { + protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) + { + if (result.Resp2TypeArray == ResultType.Array || result.IsNull) + { + var arr = result.ToArray((in RawResult x) => (ExpireResult)(long)x.AsRedisValue())!; + + SetResult(message, arr); + return true; + } + return false; + } + } + + private sealed class PersistResultArrayProcessor : ResultProcessor { protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { - switch (result.Type) + if (result.Resp2TypeArray == ResultType.Array || result.IsNull) { - case ResultType.Integer: - case ResultType.SimpleString: - case ResultType.BulkString: - if (result.IsNull) - { - SetResult(message, null); - return true; - } - long i64; - if (result.TryGetInt64(out i64)) - { - SetResult(message, i64); - return true; - } - break; + var arr = result.ToArray((in RawResult x) => (PersistResult)(long)x.AsRedisValue())!; + + SetResult(message, arr); + return true; } return false; } @@ -1136,30 +1611,30 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes private sealed class RedisChannelArrayProcessor : ResultProcessor { - private readonly RedisChannel.PatternMode mode; - public RedisChannelArrayProcessor(RedisChannel.PatternMode mode) + private readonly RedisChannel.RedisChannelOptions options; + public RedisChannelArrayProcessor(RedisChannel.RedisChannelOptions options) { - this.mode = mode; + this.options = options; } private readonly struct ChannelState // I would use a value-tuple here, but that is binding hell { - public readonly byte[] Prefix; - public readonly RedisChannel.PatternMode Mode; - public ChannelState(byte[] prefix, RedisChannel.PatternMode mode) + public readonly byte[]? Prefix; + public readonly RedisChannel.RedisChannelOptions Options; + public ChannelState(byte[]? prefix, RedisChannel.RedisChannelOptions options) { Prefix = prefix; - Mode = mode; + Options = options; } } protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { - switch (result.Type) + switch (result.Resp2TypeArray) { - case ResultType.MultiBulk: + case ResultType.Array: var final = result.ToArray( - (in RawResult item, in ChannelState state) => item.AsRedisChannel(state.Prefix, state.Mode), - new ChannelState(connection.ChannelPrefix, mode)); + (in RawResult item, in ChannelState state) => item.AsRedisChannel(state.Prefix, state.Options), + new ChannelState(connection.ChannelPrefix, options))!; SetResult(message, final); return true; @@ -1172,10 +1647,10 @@ private sealed class RedisKeyArrayProcessor : ResultProcessor { protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { - switch (result.Type) + switch (result.Resp2TypeArray) { - case ResultType.MultiBulk: - var arr = result.GetItemsAsKeys(); + case ResultType.Array: + var arr = result.GetItemsAsKeys()!; SetResult(message, arr); return true; } @@ -1187,7 +1662,7 @@ private sealed class RedisKeyProcessor : ResultProcessor { protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { - switch (result.Type) + switch (result.Resp2TypeBulkString) { case ResultType.Integer: case ResultType.SimpleString: @@ -1203,11 +1678,11 @@ private sealed class RedisTypeProcessor : ResultProcessor { protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { - switch (result.Type) + switch (result.Resp2TypeBulkString) { case ResultType.SimpleString: case ResultType.BulkString: - string s = result.GetString(); + string s = result.GetString()!; RedisType value; if (string.Equals(s, "zset", StringComparison.OrdinalIgnoreCase)) value = Redis.RedisType.SortedSet; else if (!Enum.TryParse(s, true, out value)) value = global::StackExchange.Redis.RedisType.Unknown; @@ -1222,15 +1697,18 @@ private sealed class RedisValueArrayProcessor : ResultProcessor { protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { - switch (result.Type) + switch (result.Resp2TypeBulkString) { // allow a single item to pass explicitly pretending to be an array; example: SPOP {key} 1 case ResultType.BulkString: - var arr = new[] { result.AsRedisValue() }; + // If the result is nil, the result should be an empty array + var arr = result.IsNull + ? Array.Empty() + : new[] { result.AsRedisValue() }; SetResult(message, arr); return true; - case ResultType.MultiBulk: - arr = result.GetItemsAsValues(); + case ResultType.Array: + arr = result.GetItemsAsValues()!; SetResult(message, arr); return true; } @@ -1238,15 +1716,45 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes } } - private sealed class StringArrayProcessor : ResultProcessor + private sealed class Int64ArrayProcessor : ResultProcessor + { + protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) + { + if (result.Resp2TypeArray == ResultType.Array && !result.IsNull) + { + var arr = result.ToArray((in RawResult x) => (long)x.AsRedisValue())!; + SetResult(message, arr); + return true; + } + + return false; + } + } + + private sealed class NullableStringArrayProcessor : ResultProcessor { protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { - switch (result.Type) + switch (result.Resp2TypeArray) { - case ResultType.MultiBulk: - var arr = result.GetItemsAsStrings(); + case ResultType.Array: + var arr = result.GetItemsAsStrings()!; + + SetResult(message, arr); + return true; + } + return false; + } + } + private sealed class StringArrayProcessor : ResultProcessor + { + protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) + { + switch (result.Resp2TypeArray) + { + case ResultType.Array: + var arr = result.GetItemsAsStringsNotNullable()!; SetResult(message, arr); return true; } @@ -1254,13 +1762,27 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes } } + private sealed class BooleanArrayProcessor : ResultProcessor + { + protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) + { + if (result.Resp2TypeArray == ResultType.Array && !result.IsNull) + { + var arr = result.GetItemsAsBooleans()!; + SetResult(message, arr); + return true; + } + return false; + } + } + private sealed class RedisValueGeoPositionProcessor : ResultProcessor { protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { - switch (result.Type) + switch (result.Resp2TypeArray) { - case ResultType.MultiBulk: + case ResultType.Array: var pos = result.GetItemsAsGeoPosition(); SetResult(message, pos); @@ -1274,10 +1796,10 @@ private sealed class RedisValueGeoPositionArrayProcessor : ResultProcessor Parse(item, options), this.options); + (in RawResult item, in GeoRadiusOptions radiusOptions) => Parse(item, radiusOptions), options)!; SetResult(message, typed); return true; } @@ -1355,11 +1877,81 @@ The geohash integer. } } + /// + /// Parser for the https://redis.io/commands/lcs/ format with the and arguments. + /// + /// + /// Example response: + /// 1) "matches" + /// 2) 1) 1) 1) (integer) 4 + /// 2) (integer) 7 + /// 2) 1) (integer) 5 + /// 2) (integer) 8 + /// 3) (integer) 4 + /// 3) "len" + /// 4) (integer) 6 + /// ... + /// + private sealed class LongestCommonSubsequenceProcessor : ResultProcessor + { + protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) + { + switch (result.Resp2TypeArray) + { + case ResultType.Array when TryParse(result, out var value): + SetResult(message, value); + return true; + } + return false; + } + + private static bool TryParse(in RawResult result, out LCSMatchResult value) + { + var topItems = result.GetItems(); + var matches = new LCSMatchResult.LCSMatch[topItems[1].GetItems().Length]; + int i = 0; + var matchesRawArray = topItems[1]; // skip the first element (title "matches") + foreach (var match in matchesRawArray.GetItems()) + { + var matchItems = match.GetItems(); + + if (TryReadPosition(matchItems[0], out var first) + && TryReadPosition(matchItems[1], out var second) + && matchItems[2].TryGetInt64(out var length)) + { + matches[i++] = new LCSMatchResult.LCSMatch(first, second, length); + } + else + { + value = default; + return false; + } + } + var len = (long)topItems[3].AsRedisValue(); + + value = new LCSMatchResult(matches, len); + return true; + } + + private static bool TryReadPosition(in RawResult raw, out LCSMatchResult.LCSPosition position) + { + // Expecting a 2-element array: [start, end] + if (raw.Resp2TypeArray is ResultType.Array && raw.ItemsCount >= 2 + && raw[0].TryGetInt64(out var start) && raw[1].TryGetInt64(out var end)) + { + position = new LCSMatchResult.LCSPosition(start, end); + return true; + } + position = default; + return false; + } + } + private sealed class RedisValueProcessor : ResultProcessor { protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { - switch (result.Type) + switch (result.Resp2TypeBulkString) { case ResultType.Integer: case ResultType.SimpleString: @@ -1371,6 +1963,25 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes } } + private sealed class RedisValueFromArrayProcessor : ResultProcessor + { + protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) + { + switch (result.Resp2TypeBulkString) + { + case ResultType.Array: + var items = result.GetItems(); + if (items.Length == 1) + { // treat an array of 1 like a single reply + SetResult(message, items[0].AsRedisValue()); + return true; + } + break; + } + return false; + } + } + private sealed class RoleProcessor : ResultProcessor { protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) @@ -1382,19 +1993,19 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes } ref var val = ref items[0]; - Role role; - if (val.IsEqual(RedisLiterals.master)) role = ParseMaster(items); - else if (val.IsEqual(RedisLiterals.slave)) role = ParseReplica(items, RedisLiterals.slave); - else if (val.IsEqual(RedisLiterals.replica)) role = ParseReplica(items, RedisLiterals.replica); // for when "slave" is deprecated + Role? role; + if (val.IsEqual(RedisLiterals.master)) role = ParsePrimary(items); + else if (val.IsEqual(RedisLiterals.slave)) role = ParseReplica(items, RedisLiterals.slave!); + else if (val.IsEqual(RedisLiterals.replica)) role = ParseReplica(items, RedisLiterals.replica!); // for when "slave" is deprecated else if (val.IsEqual(RedisLiterals.sentinel)) role = ParseSentinel(items); - else role = new Role.Unknown(val.GetString()); + else role = new Role.Unknown(val.GetString()!); if (role is null) return false; SetResult(message, role); return true; } - private static Role ParseMaster(in Sequence items) + private static Role? ParsePrimary(in Sequence items) { if (items.Length < 3) { @@ -1417,7 +2028,7 @@ private static Role ParseMaster(in Sequence items) replicas = new List((int)replicaItems.Length); for (int i = 0; i < replicaItems.Length; i++) { - if (TryParseMasterReplica(replicaItems[i].GetItems(), out var replica)) + if (TryParsePrimaryReplica(replicaItems[i].GetItems(), out var replica)) { replicas.Add(replica); } @@ -1426,12 +2037,12 @@ private static Role ParseMaster(in Sequence items) return null; } } - } + } return new Role.Master(offset, replicas); } - private static bool TryParseMasterReplica(in Sequence items, out Role.Master.Replica replica) + private static bool TryParsePrimaryReplica(in Sequence items, out Role.Master.Replica replica) { if (items.Length < 3) { @@ -1439,9 +2050,9 @@ private static bool TryParseMasterReplica(in Sequence items, out Role return false; } - var masterIp = items[0].GetString(); + var primaryIp = items[0].GetString()!; - if (!items[1].TryGetInt64(out var masterPort) || masterPort > int.MaxValue) + if (!items[1].TryGetInt64(out var primaryPort) || primaryPort > int.MaxValue) { replica = default; return false; @@ -1453,74 +2064,58 @@ private static bool TryParseMasterReplica(in Sequence items, out Role return false; } - replica = new Role.Master.Replica(masterIp, (int)masterPort, replicationOffset); + replica = new Role.Master.Replica(primaryIp, (int)primaryPort, replicationOffset); return true; } - private static Role ParseReplica(in Sequence items, string role) + private static Role? ParseReplica(in Sequence items, string role) { if (items.Length < 5) { return null; } - var masterIp = items[1].GetString(); + var primaryIp = items[1].GetString()!; - if (!items[2].TryGetInt64(out var masterPort) || masterPort > int.MaxValue) + if (!items[2].TryGetInt64(out var primaryPort) || primaryPort > int.MaxValue) { return null; } ref var val = ref items[3]; string replicationState; - if (val.IsEqual(RedisLiterals.connect)) replicationState = RedisLiterals.connect; - else if (val.IsEqual(RedisLiterals.connecting)) replicationState = RedisLiterals.connecting; - else if (val.IsEqual(RedisLiterals.sync)) replicationState = RedisLiterals.sync; - else if (val.IsEqual(RedisLiterals.connected)) replicationState = RedisLiterals.connected; - else if (val.IsEqual(RedisLiterals.none)) replicationState = RedisLiterals.none; - else if (val.IsEqual(RedisLiterals.handshake)) replicationState = RedisLiterals.handshake; - else replicationState = val.GetString(); + if (val.IsEqual(RedisLiterals.connect)) replicationState = RedisLiterals.connect!; + else if (val.IsEqual(RedisLiterals.connecting)) replicationState = RedisLiterals.connecting!; + else if (val.IsEqual(RedisLiterals.sync)) replicationState = RedisLiterals.sync!; + else if (val.IsEqual(RedisLiterals.connected)) replicationState = RedisLiterals.connected!; + else if (val.IsEqual(RedisLiterals.none)) replicationState = RedisLiterals.none!; + else if (val.IsEqual(RedisLiterals.handshake)) replicationState = RedisLiterals.handshake!; + else replicationState = val.GetString()!; if (!items[4].TryGetInt64(out var replicationOffset)) { return null; } - return new Role.Replica(role, masterIp, (int)masterPort, replicationState, replicationOffset); + return new Role.Replica(role, primaryIp, (int)primaryPort, replicationState, replicationOffset); } - private static Role ParseSentinel(in Sequence items) + private static Role? ParseSentinel(in Sequence items) { if (items.Length < 2) { return null; } - var masters = items[1].GetItemsAsStrings(); - return new Role.Sentinel(masters); - } - } - - private sealed class LeaseProcessor : ResultProcessor> - { - protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) - { - switch (result.Type) - { - case ResultType.Integer: - case ResultType.SimpleString: - case ResultType.BulkString: - SetResult(message, result.AsLease()); - return true; - } - return false; + var primaries = items[1].GetItemsAsStrings()!; + return new Role.Sentinel(primaries); } } - private class ScriptResultProcessor : ResultProcessor + private sealed class ScriptResultProcessor : ResultProcessor { public override bool SetResult(PhysicalConnection connection, Message message, in RawResult result) { - if (result.Type == ResultType.Error && result.StartsWith(CommonReplies.NOSCRIPT)) + if (result.IsError && result.StartsWith(CommonReplies.NOSCRIPT)) { // scripts are not flushed individually, so assume the entire script cache is toast ("SCRIPT FLUSH") connection.BridgeCouldBeNull?.ServerEndPoint?.FlushScriptCache(); message.SetScriptUnavailable(); @@ -1533,8 +2128,7 @@ public override bool SetResult(PhysicalConnection connection, Message message, i // (is that a thing?) will be wrapped in the RedisResult protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { - var value = Redis.RedisResult.TryCreate(connection, result); - if (value != null) + if (RedisResult.TryCreate(connection, result, out var value)) { SetResult(message, value); return true; @@ -1552,16 +2146,19 @@ public SingleStreamProcessor(bool skipStreamName = false) this.skipStreamName = skipStreamName; } + /// + /// Handles . + /// protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { if (result.IsNull) { // Server returns 'nil' if no entries are returned for the given stream. - SetResult(message, Array.Empty()); + SetResult(message, []); return true; } - if (result.Type != ResultType.MultiBulk) + if (result.Resp2TypeArray != ResultType.Array) { return false; } @@ -1570,26 +2167,47 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes if (skipStreamName) { - // Skip the first element in the array (i.e., the stream name). - // See https://redis.io/commands/xread. - - // > XREAD COUNT 2 STREAMS mystream 0 - // 1) 1) "mystream" <== Skip the stream name - // 2) 1) 1) 1519073278252 - 0 <== Index 1 contains the array of stream entries - // 2) 1) "foo" - // 2) "value_1" - // 2) 1) 1519073279157 - 0 - // 2) 1) "foo" - // 2) "value_2" - - // Retrieve the initial array. For XREAD of a single stream it will - // be an array of only 1 element in the response. - var readResult = result.GetItems(); - - // Within that single element, GetItems will return an array of - // 2 elements: the stream name and the stream entries. - // Skip the stream name (index 0) and only process the stream entries (index 1). - entries = ParseRedisStreamEntries(readResult[0].GetItems()[1]); + /* + RESP 2: array element per stream; each element is an array of a name plus payload; payload is array of name/value pairs + + 127.0.0.1:6379> XREAD COUNT 2 STREAMS temperatures:us-ny:10007 0-0 + 1) 1) "temperatures:us-ny:10007" + 2) 1) 1) "1691504774593-0" + 2) 1) "temp_f" + 2) "87.2" + 3) "pressure" + 4) "29.69" + 5) "humidity" + 6) "46" + 2) 1) "1691504856705-0" + 2) 1) "temp_f" + 2) "87.2" + 3) "pressure" + 4) "29.69" + 5) "humidity" + 6) "46" + + RESP 3: map of element names with array of name plus payload; payload is array of name/value pairs + + 127.0.0.1:6379> XREAD COUNT 2 STREAMS temperatures:us-ny:10007 0-0 + 1# "temperatures:us-ny:10007" => 1) 1) "1691504774593-0" + 2) 1) "temp_f" + 2) "87.2" + 3) "pressure" + 4) "29.69" + 5) "humidity" + 6) "46" + 2) 1) "1691504856705-0" + 2) 1) "temp_f" + 2) "87.2" + 3) "pressure" + 4) "29.69" + 5) "humidity" + 6) "46" + */ + + ref readonly RawResult readResult = ref (result.Resp3Type == ResultType.Map ? ref result[1] : ref result[0][1]); + entries = ParseRedisStreamEntries(readResult); } else { @@ -1601,6 +2219,9 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes } } + /// + /// Handles . + /// internal sealed class MultiStreamProcessor : StreamProcessorBase { /* @@ -1608,8 +2229,6 @@ The result is similar to the XRANGE result (see SingleStreamProcessor) with the addition of the stream name as the first element of top level Multibulk array. - See https://redis.io/commands/xread. - > XREAD COUNT 2 STREAMS mystream writers 0-0 0-0 1) 1) "mystream" 2) 1) 1) 1526984818136-0 @@ -1633,6 +2252,8 @@ Multibulk array. 2) "Jane" 3) "surname" 4) "Austen" + + (note that XREADGROUP may include additional interior elements; see ParseRedisStreamEntries) */ protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) @@ -1640,30 +2261,111 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes if (result.IsNull) { // Nothing returned for any of the requested streams. The server returns 'nil'. - SetResult(message, Array.Empty()); + SetResult(message, []); return true; } - if (result.Type != ResultType.MultiBulk) + if (result.Resp2TypeArray != ResultType.Array) { return false; } - var streams = result.GetItems().ToArray((in RawResult item, in MultiStreamProcessor obj) => + RedisStream[] streams; + if (result.Resp3Type == ResultType.Map) // see SetResultCore for the shape delta between RESP2 and RESP3 + { + // root is a map of named inner-arrays + streams = RedisStreamInterleavedProcessor.Instance.ParseArray(result, false, out _, this)!; // null-checked + } + else { - var details = item.GetItems(); + streams = result.GetItems().ToArray( + (in RawResult item, in MultiStreamProcessor obj) => + { + var details = item.GetItems(); - // details[0] = Name of the Stream - // details[1] = Multibulk Array of Stream Entries - return new RedisStream(key: details[0].AsRedisKey(), - entries: obj.ParseRedisStreamEntries(details[1])); - }, this); + // details[0] = Name of the Stream + // details[1] = Multibulk Array of Stream Entries + return new RedisStream(key: details[0].AsRedisKey(), entries: obj.ParseRedisStreamEntries(details[1])!); + }, + this); + } SetResult(message, streams); return true; } } + private sealed class RedisStreamInterleavedProcessor : ValuePairInterleavedProcessorBase + { + protected override bool AllowJaggedPairs => false; // we only use this on a flattened map + + public static readonly RedisStreamInterleavedProcessor Instance = new(); + private RedisStreamInterleavedProcessor() + { + } + + protected override RedisStream Parse(in RawResult first, in RawResult second, object? state) + => new(key: first.AsRedisKey(), entries: ((MultiStreamProcessor)state!).ParseRedisStreamEntries(second)); + } + + /// + /// This processor is for *without* the option. + /// + internal sealed class StreamAutoClaimProcessor : StreamProcessorBase + { + protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) + { + // See https://redis.io/commands/xautoclaim for command documentation. + // Note that the result should never be null, so intentionally treating it as a failure to parse here + if (result.Resp2TypeArray == ResultType.Array && !result.IsNull) + { + var items = result.GetItems(); + + // [0] The next start ID. + var nextStartId = items[0].AsRedisValue(); + // [1] The array of StreamEntry's. + var entries = ParseRedisStreamEntries(items[1]); + // [2] The array of message IDs deleted from the stream that were in the PEL. + // This is not available in 6.2 so we need to be defensive when reading this part of the response. + var deletedIds = (items.Length == 3 ? items[2].GetItemsAsValues() : null) ?? []; + + SetResult(message, new StreamAutoClaimResult(nextStartId, entries, deletedIds)); + return true; + } + + return false; + } + } + + /// + /// This processor is for *with* the option. + /// + internal sealed class StreamAutoClaimIdsOnlyProcessor : ResultProcessor + { + protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) + { + // See https://redis.io/commands/xautoclaim for command documentation. + // Note that the result should never be null, so intentionally treating it as a failure to parse here + if (result.Resp2TypeArray == ResultType.Array && !result.IsNull) + { + var items = result.GetItems(); + + // [0] The next start ID. + var nextStartId = items[0].AsRedisValue(); + // [1] The array of claimed message IDs. + var claimedIds = items[1].GetItemsAsValues() ?? []; + // [2] The array of message IDs deleted from the stream that were in the PEL. + // This is not available in 6.2 so we need to be defensive when reading this part of the response. + var deletedIds = (items.Length == 3 ? items[2].GetItemsAsValues() : null) ?? []; + + SetResult(message, new StreamAutoClaimIdsOnlyResult(nextStartId, claimedIds, deletedIds)); + return true; + } + + return false; + } + } + internal sealed class StreamConsumerInfoProcessor : InterleavedStreamInfoProcessorBase { protected override StreamConsumerInfo ParseItem(in RawResult result) @@ -1684,9 +2386,8 @@ protected override StreamConsumerInfo ParseItem(in RawResult result) // 4) (integer)1 // 5) idle // 6) (integer)83841983 - var arr = result.GetItems(); - string name = default; + string? name = default; int pendingMessageCount = default; long idleTimeInMilliseconds = default; @@ -1694,15 +2395,22 @@ protected override StreamConsumerInfo ParseItem(in RawResult result) KeyValuePairParser.TryRead(arr, KeyValuePairParser.Pending, ref pendingMessageCount); KeyValuePairParser.TryRead(arr, KeyValuePairParser.Idle, ref idleTimeInMilliseconds); - return new StreamConsumerInfo(name, pendingMessageCount, idleTimeInMilliseconds); + return new StreamConsumerInfo(name!, pendingMessageCount, idleTimeInMilliseconds); } } private static class KeyValuePairParser { internal static readonly CommandBytes - Name = "name", Consumers = "consumers", Pending = "pending", Idle = "idle", LastDeliveredId = "last-delivered-id", - IP = "ip", Port = "port"; + Name = "name", + Consumers = "consumers", + Pending = "pending", + Idle = "idle", + LastDeliveredId = "last-delivered-id", + EntriesRead = "entries-read", + Lag = "lag", + IP = "ip", + Port = "port"; internal static bool TryRead(Sequence pairs, in CommandBytes key, ref long value) { @@ -1717,25 +2425,39 @@ internal static bool TryRead(Sequence pairs, in CommandBytes key, ref } return false; } + internal static bool TryRead(Sequence pairs, in CommandBytes key, ref long? value) + { + var len = pairs.Length / 2; + for (int i = 0; i < len; i++) + { + if (pairs[i * 2].IsEqual(key) && pairs[(i * 2) + 1].TryGetInt64(out var tmp)) + { + value = tmp; + return true; + } + } + return false; + } internal static bool TryRead(Sequence pairs, in CommandBytes key, ref int value) { long tmp = default; - if(TryRead(pairs, key, ref tmp)) { + if (TryRead(pairs, key, ref tmp)) + { value = checked((int)tmp); return true; } return false; } - internal static bool TryRead(Sequence pairs, in CommandBytes key, ref string value) + internal static bool TryRead(Sequence pairs, in CommandBytes key, [NotNullWhen(true)] ref string? value) { var len = pairs.Length / 2; for (int i = 0; i < len; i++) { if (pairs[i * 2].IsEqual(key)) { - value = pairs[(i * 2) + 1].GetString(); + value = pairs[(i * 2) + 1].GetString()!; return true; } } @@ -1759,6 +2481,10 @@ protected override StreamGroupInfo ParseItem(in RawResult result) // 6) (integer)2 // 7) last-delivered-id // 8) "1588152489012-0" + // 9) "entries-read" + // 10) (integer)2 + // 11) "lag" + // 12) (integer)0 // 2) 1) name // 2) "some-other-group" // 3) consumers @@ -1767,17 +2493,24 @@ protected override StreamGroupInfo ParseItem(in RawResult result) // 6) (integer)0 // 7) last-delivered-id // 8) "1588152498034-0" - + // 9) "entries-read" + // 10) (integer)1 + // 11) "lag" + // 12) (integer)1 var arr = result.GetItems(); - string name = default, lastDeliveredId = default; + string? name = default, lastDeliveredId = default; int consumerCount = default, pendingMessageCount = default; + long entriesRead = default; + long? lag = default; KeyValuePairParser.TryRead(arr, KeyValuePairParser.Name, ref name); KeyValuePairParser.TryRead(arr, KeyValuePairParser.Consumers, ref consumerCount); KeyValuePairParser.TryRead(arr, KeyValuePairParser.Pending, ref pendingMessageCount); KeyValuePairParser.TryRead(arr, KeyValuePairParser.LastDeliveredId, ref lastDeliveredId); + KeyValuePairParser.TryRead(arr, KeyValuePairParser.EntriesRead, ref entriesRead); + KeyValuePairParser.TryRead(arr, KeyValuePairParser.Lag, ref lag); - return new StreamGroupInfo(name, consumerCount, pendingMessageCount, lastDeliveredId); + return new StreamGroupInfo(name!, consumerCount, pendingMessageCount, lastDeliveredId, entriesRead, lag); } } @@ -1787,7 +2520,7 @@ internal abstract class InterleavedStreamInfoProcessorBase : ResultProcessor< protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { - if (result.Type != ResultType.MultiBulk) + if (result.Resp2TypeArray != ResultType.Array) { return false; } @@ -1824,7 +2557,7 @@ internal sealed class StreamInfoProcessor : StreamProcessorBase // 2) "banana" protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { - if (result.Type != ResultType.MultiBulk) + if (result.Resp2TypeArray != ResultType.Array) { return false; } @@ -1832,43 +2565,72 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes var arr = result.GetItems(); var max = arr.Length / 2; - long length = -1, radixTreeKeys = -1, radixTreeNodes = -1, groups = -1; - var lastGeneratedId = Redis.RedisValue.Null; + long length = -1, radixTreeKeys = -1, radixTreeNodes = -1, groups = -1, + entriesAdded = -1, idmpDuration = -1, idmpMaxsize = -1, + pidsTracked = -1, iidsTracked = -1, iidsAdded = -1, iidsDuplicates = -1; + RedisValue lastGeneratedId = Redis.RedisValue.Null, + maxDeletedEntryId = Redis.RedisValue.Null, + recordedFirstEntryId = Redis.RedisValue.Null; StreamEntry firstEntry = StreamEntry.Null, lastEntry = StreamEntry.Null; var iter = arr.GetEnumerator(); - for(int i = 0; i < max; i++) + for (int i = 0; i < max; i++) { - ref RawResult key = ref iter.GetNext(), value = ref iter.GetNext(); - if (key.Payload.Length > CommandBytes.MaxLength) continue; + if (!iter.GetNext().TryParse(StreamInfoFieldMetadata.TryParse, out StreamInfoField field)) + field = StreamInfoField.Unknown; + ref RawResult value = ref iter.GetNext(); - var keyBytes = new CommandBytes(key.Payload); - if(keyBytes.Equals(CommonReplies.length)) - { - if (!value.TryGetInt64(out length)) return false; - } - else if (keyBytes.Equals(CommonReplies.radixTreeKeys)) - { - if (!value.TryGetInt64(out radixTreeKeys)) return false; - } - else if (keyBytes.Equals(CommonReplies.radixTreeNodes)) - { - if (!value.TryGetInt64(out radixTreeNodes)) return false; - } - else if (keyBytes.Equals(CommonReplies.groups)) - { - if (!value.TryGetInt64(out groups)) return false; - } - else if (keyBytes.Equals(CommonReplies.lastGeneratedId)) - { - lastGeneratedId = value.AsRedisValue(); - } - else if (keyBytes.Equals(CommonReplies.firstEntry)) + switch (field) { - firstEntry = ParseRedisStreamEntry(value); - } - else if (keyBytes.Equals(CommonReplies.lastEntry)) - { - lastEntry = ParseRedisStreamEntry(value); + case StreamInfoField.Length: + if (!value.TryGetInt64(out length)) return false; + break; + case StreamInfoField.RadixTreeKeys: + if (!value.TryGetInt64(out radixTreeKeys)) return false; + break; + case StreamInfoField.RadixTreeNodes: + if (!value.TryGetInt64(out radixTreeNodes)) return false; + break; + case StreamInfoField.Groups: + if (!value.TryGetInt64(out groups)) return false; + break; + case StreamInfoField.LastGeneratedId: + lastGeneratedId = value.AsRedisValue(); + break; + case StreamInfoField.FirstEntry: + firstEntry = ParseRedisStreamEntry(value); + break; + case StreamInfoField.LastEntry: + lastEntry = ParseRedisStreamEntry(value); + break; + // 7.0 + case StreamInfoField.MaxDeletedEntryId: + maxDeletedEntryId = value.AsRedisValue(); + break; + case StreamInfoField.RecordedFirstEntryId: + recordedFirstEntryId = value.AsRedisValue(); + break; + case StreamInfoField.EntriesAdded: + if (!value.TryGetInt64(out entriesAdded)) return false; + break; + // 8.6 + case StreamInfoField.IdmpDuration: + if (!value.TryGetInt64(out idmpDuration)) return false; + break; + case StreamInfoField.IdmpMaxsize: + if (!value.TryGetInt64(out idmpMaxsize)) return false; + break; + case StreamInfoField.PidsTracked: + if (!value.TryGetInt64(out pidsTracked)) return false; + break; + case StreamInfoField.IidsTracked: + if (!value.TryGetInt64(out iidsTracked)) return false; + break; + case StreamInfoField.IidsAdded: + if (!value.TryGetInt64(out iidsAdded)) return false; + break; + case StreamInfoField.IidsDuplicates: + if (!value.TryGetInt64(out iidsDuplicates)) return false; + break; } } @@ -1879,7 +2641,16 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes groups: checked((int)groups), firstEntry: firstEntry, lastEntry: lastEntry, - lastGeneratedId: lastGeneratedId); + lastGeneratedId: lastGeneratedId, + maxDeletedEntryId: maxDeletedEntryId, + entriesAdded: entriesAdded, + recordedFirstEntryId: recordedFirstEntryId, + idmpDuration: idmpDuration, + idmpMaxSize: idmpMaxsize, + pidsTracked: pidsTracked, + iidsTracked: iidsTracked, + iidsAdded: iidsAdded, + iidsDuplicates: iidsDuplicates); SetResult(message, streamInfo); return true; @@ -1899,8 +2670,7 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes // 2) "2" // 5) 1) 1) "Joe" // 2) "8" - - if (result.Type != ResultType.MultiBulk) + if (result.Resp2TypeArray != ResultType.Array) { return false; } @@ -1912,7 +2682,7 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes return false; } - StreamConsumer[] consumers = null; + StreamConsumer[]? consumers = null; // If there are no consumers as of yet for the given group, the last // item in the response array will be null. @@ -1928,12 +2698,11 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes }); } - var pendingInfo = new StreamPendingInfo(pendingMessageCount: (int)arr[0].AsRedisValue(), + var pendingInfo = new StreamPendingInfo( + pendingMessageCount: (int)arr[0].AsRedisValue(), lowestId: arr[1].AsRedisValue(), highestId: arr[2].AsRedisValue(), - consumers: consumers ?? Array.Empty()); - // ^^^^^ - // Should we bother allocating an empty array only to prevent the need for a null check? + consumers: consumers ?? []); SetResult(message, pendingInfo); return true; @@ -1944,7 +2713,7 @@ internal sealed class StreamPendingMessagesProcessor : ResultProcessor : ResultProcessor + internal sealed class StreamNameValueEntryProcessor : ValuePairInterleavedProcessorBase { - // For command response formats see https://redis.io/topics/streams-intro. + public static readonly StreamNameValueEntryProcessor Instance = new(); + private StreamNameValueEntryProcessor() + { + } + + protected override NameValueEntry Parse(in RawResult first, in RawResult second, object? state) + => new NameValueEntry(first.AsRedisValue(), second.AsRedisValue()); + } - protected StreamEntry ParseRedisStreamEntry(in RawResult item) + /// + /// Handles stream responses. For formats, see . + /// + /// The type of the stream result. + internal abstract class StreamProcessorBase : ResultProcessor + { + protected static StreamEntry ParseRedisStreamEntry(in RawResult item) { - if (item.IsNull || item.Type != ResultType.MultiBulk) + if (item.IsNull || item.Resp2TypeArray != ResultType.Array) { return StreamEntry.Null; } // Process the Multibulk array for each entry. The entry contains the following elements: // [0] = SimpleString (the ID of the stream entry) // [1] = Multibulk array of the name/value pairs of the stream entry's data + // optional (XREADGROUP with CLAIM): + // [2] = idle time (in milliseconds) + // [3] = delivery count var entryDetails = item.GetItems(); - return new StreamEntry(id: entryDetails[0].AsRedisValue(), - values: ParseStreamEntryValues(entryDetails[1])); - } - protected StreamEntry[] ParseRedisStreamEntries(in RawResult result) - { - if (result.Type != ResultType.MultiBulk) + var id = entryDetails[0].AsRedisValue(); + var values = ParseStreamEntryValues(entryDetails[1]); + // check for optional fields (XREADGROUP with CLAIM) + if (entryDetails.Length >= 4 && entryDetails[2].TryGetInt64(out var idleTimeInMs) && entryDetails[3].TryGetInt64(out var deliveryCount)) { - return null; + return new StreamEntry( + id: id, + values: values, + idleTime: TimeSpan.FromMilliseconds(idleTimeInMs), + deliveryCount: checked((int)deliveryCount)); } - - return result.GetItems().ToArray( - (in RawResult item, in StreamProcessorBase obj) => obj.ParseRedisStreamEntry(item), this); + return new StreamEntry( + id: id, + values: values); } + protected internal StreamEntry[] ParseRedisStreamEntries(in RawResult result) => + result.GetItems().ToArray((in RawResult item, in StreamProcessorBase _) => ParseRedisStreamEntry(item), this); - protected NameValueEntry[] ParseStreamEntryValues(in RawResult result) + protected static NameValueEntry[] ParseStreamEntryValues(in RawResult result) { // The XRANGE, XREVRANGE, XREAD commands return stream entries // in the following format. The name/value pairs are interleaved @@ -2009,52 +2799,32 @@ protected NameValueEntry[] ParseStreamEntryValues(in RawResult result) // 2) "9999" // 3) "temperature" // 4) "18.2" - - if (result.Type != ResultType.MultiBulk || result.IsNull) - { - return null; - } - - var arr = result.GetItems(); - - // Calculate how many name/value pairs are in the stream entry. - int count = (int)arr.Length / 2; - - if (count == 0) return Array.Empty(); - - var pairs = new NameValueEntry[count]; - - var iter = arr.GetEnumerator(); - for (int i = 0; i < pairs.Length; i++) + if (result.Resp2TypeArray != ResultType.Array || result.IsNull) { - pairs[i] = new NameValueEntry(iter.GetNext().AsRedisValue(), - iter.GetNext().AsRedisValue()); + return []; } - - return pairs; + return StreamNameValueEntryProcessor.Instance.ParseArray(result, false, out _, null)!; // ! because we checked null above } } private sealed class StringPairInterleavedProcessor : ValuePairInterleavedProcessorBase> { - protected override KeyValuePair Parse(in RawResult first, in RawResult second) - { - return new KeyValuePair(first.GetString(), second.GetString()); - } + protected override KeyValuePair Parse(in RawResult first, in RawResult second, object? state) => + new KeyValuePair(first.GetString()!, second.GetString()!); } - private sealed class StringProcessor : ResultProcessor + private sealed class StringProcessor : ResultProcessor { protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { - switch (result.Type) + switch (result.Resp2TypeBulkString) { case ResultType.Integer: case ResultType.SimpleString: case ResultType.BulkString: SetResult(message, result.GetString()); return true; - case ResultType.MultiBulk: + case ResultType.Array: var arr = result.GetItems(); if (arr.Length == 1) { @@ -2067,7 +2837,33 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes } } - private class TracerProcessor : ResultProcessor + private sealed class TieBreakerProcessor : ResultProcessor + { + protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) + { + switch (result.Resp2TypeBulkString) + { + case ResultType.SimpleString: + case ResultType.BulkString: + var tieBreaker = result.GetString()!; + SetResult(message, tieBreaker); + + try + { + if (connection.BridgeCouldBeNull?.ServerEndPoint is ServerEndPoint endpoint) + { + endpoint.TieBreakerResult = tieBreaker; + } + } + catch { } + + return true; + } + return false; + } + } + + private sealed class TracerProcessor : ResultProcessor { private readonly bool establishConnection; @@ -2078,7 +2874,7 @@ public TracerProcessor(bool establishConnection) public override bool SetResult(PhysicalConnection connection, Message message, in RawResult result) { - connection?.BridgeCouldBeNull?.Multiplexer.OnInfoMessage($"got '{result}' for '{message.CommandAndKey}' on '{connection}'"); + connection.BridgeCouldBeNull?.Multiplexer.OnInfoMessage($"got '{result}' for '{message.CommandAndKey}' on '{connection}'"); var final = base.SetResult(connection, message, result); if (result.IsError) { @@ -2095,35 +2891,36 @@ public override bool SetResult(PhysicalConnection connection, Message message, i connection.RecordConnectionFailed(ConnectionFailureType.ProtocolFailure, new RedisServerException(result.ToString())); } } + + if (connection.Protocol is null) + { + // if we didn't get a valid response from HELLO, then we have to assume RESP2 at some point + connection.SetProtocol(RedisProtocol.Resp2); + } + return final; } + [System.Diagnostics.CodeAnalysis.SuppressMessage("Style", "IDE0071:Simplify interpolation", Justification = "Allocations (string.Concat vs. string.Format)")] protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { bool happy; switch (message.Command) { case RedisCommand.ECHO: - happy = result.Type == ResultType.BulkString && (!establishConnection || result.IsEqual(connection.BridgeCouldBeNull?.Multiplexer?.UniqueId)); + happy = result.Resp2TypeBulkString == ResultType.BulkString && (!establishConnection || result.IsEqual(connection.BridgeCouldBeNull?.Multiplexer?.UniqueId)); break; case RedisCommand.PING: // there are two different PINGs; "interactive" is a +PONG or +{your message}, // but subscriber returns a bulk-array of [ "pong", {your message} ] - switch (result.Type) + switch (result.Resp2TypeArray) { case ResultType.SimpleString: happy = result.IsEqual(CommonReplies.PONG); break; - case ResultType.MultiBulk: - if (result.ItemsCount == 2) - { - var items = result.GetItems(); - happy = items[0].IsEqual(CommonReplies.PONG) && items[1].Payload.IsEmpty; - } - else - { - happy = false; - } + case ResultType.Array when result.ItemsCount == 2: + var items = result.GetItems(); + happy = items[0].IsEqual(CommonReplies.PONG) && items[1].Payload.IsEmpty; break; default: happy = false; @@ -2131,10 +2928,10 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes } break; case RedisCommand.TIME: - happy = result.Type == ResultType.MultiBulk && result.GetItems().Length == 2; + happy = result.Resp2TypeArray == ResultType.Array && result.ItemsCount == 2; break; case RedisCommand.EXISTS: - happy = result.Type == ResultType.Integer; + happy = result.Resp2TypeBulkString == ResultType.Integer; break; default: happy = false; @@ -2144,6 +2941,7 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes { if (establishConnection) { + // This is what ultimately brings us to complete a connection, by advancing the state forward from a successful tracer after connection. connection.BridgeCouldBeNull?.OnFullyEstablished(connection, $"From command: {message.Command}"); } SetResult(message, happy); @@ -2151,22 +2949,21 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes } else { - connection.RecordConnectionFailed(ConnectionFailureType.ProtocolFailure, + connection.RecordConnectionFailed( + ConnectionFailureType.ProtocolFailure, new InvalidOperationException($"unexpected tracer reply to {message.Command}: {result.ToString()}")); return false; } } } - #region Sentinel - - private sealed class SentinelGetMasterAddressByNameProcessor : ResultProcessor + private sealed class SentinelGetPrimaryAddressByNameProcessor : ResultProcessor { protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { - switch (result.Type) + switch (result.Resp2TypeArray) { - case ResultType.MultiBulk: + case ResultType.Array: var items = result.GetItems(); if (result.IsNull) { @@ -2174,7 +2971,7 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes } else if (items.Length == 2 && items[1].TryGetInt64(out var port)) { - SetResult(message, Format.ParseEndPoint(items[0].GetString(), checked((int)port))); + SetResult(message, Format.ParseEndPoint(items[0].GetString()!, checked((int)port))); return true; } else if (items.Length == 0) @@ -2192,15 +2989,15 @@ private sealed class SentinelGetSentinelAddressesProcessor : ResultProcessor endPoints = new List(); + List endPoints = []; - switch (result.Type) + switch (result.Resp2TypeArray) { - case ResultType.MultiBulk: + case ResultType.Array: foreach (RawResult item in result.GetItems()) { var pairs = item.GetItems(); - string ip = null; + string? ip = null; int port = default; if (KeyValuePairParser.TryRead(pairs, in KeyValuePairParser.IP, ref ip) && KeyValuePairParser.TryRead(pairs, in KeyValuePairParser.Port, ref port)) @@ -2212,7 +3009,7 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes return true; case ResultType.SimpleString: - //We don't want to blow up if the master is not found + // We don't want to blow up if the primary is not found if (result.IsNull) return true; break; @@ -2226,15 +3023,15 @@ private sealed class SentinelGetReplicaAddressesProcessor : ResultProcessor endPoints = new List(); + List endPoints = []; - switch (result.Type) + switch (result.Resp2TypeArray) { - case ResultType.MultiBulk: + case ResultType.Array: foreach (RawResult item in result.GetItems()) { var pairs = item.GetItems(); - string ip = null; + string? ip = null; int port = default; if (KeyValuePairParser.TryRead(pairs, in KeyValuePairParser.IP, ref ip) && KeyValuePairParser.TryRead(pairs, in KeyValuePairParser.Port, ref port)) @@ -2245,7 +3042,7 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes break; case ResultType.SimpleString: - //We don't want to blow up if the master is not found + // We don't want to blow up if the primary is not found if (result.IsNull) return true; break; @@ -2265,22 +3062,29 @@ private sealed class SentinelArrayOfArraysProcessor : ResultProcessor[], StringPairInterleavedProcessor>( (in RawResult rawInnerArray, in StringPairInterleavedProcessor proc) => { - proc.TryParse(rawInnerArray, out KeyValuePair[] kvpArray); - return kvpArray; - }, innerProcessor); + if (proc.TryParse(rawInnerArray, out KeyValuePair[]? kvpArray)) + { + return kvpArray!; + } + else + { + throw new ArgumentOutOfRangeException(nameof(rawInnerArray), $"Error processing {message.CommandAndKey}, could not decode array '{rawInnerArray}'"); + } + }, + innerProcessor)!; SetResult(message, returnArray); return true; @@ -2288,13 +3092,11 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes return false; } } - - #endregion } internal abstract class ResultProcessor : ResultProcessor { - protected void SetResult(Message message, T value) + protected static void SetResult(Message? message, T value) { if (message == null) return; var box = message.ResultBox as IResultBox; @@ -2308,14 +3110,14 @@ internal abstract class ArrayResultProcessor : ResultProcessor { protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) { - switch(result.Type) + switch (result.Resp2TypeArray) { - case ResultType.MultiBulk: + case ResultType.Array: var items = result.GetItems(); T[] arr; if (items.IsEmpty) { - arr = Array.Empty(); + arr = []; } else { diff --git a/src/StackExchange.Redis/ResultTypeExtensions.cs b/src/StackExchange.Redis/ResultTypeExtensions.cs new file mode 100644 index 000000000..e2f941f00 --- /dev/null +++ b/src/StackExchange.Redis/ResultTypeExtensions.cs @@ -0,0 +1,11 @@ +namespace StackExchange.Redis +{ + internal static class ResultTypeExtensions + { + public static bool IsError(this ResultType value) + => (value & (ResultType)0b111) == ResultType.Error; + + public static ResultType ToResp2(this ResultType value) + => value & (ResultType)0b111; // just keep the last 3 bits + } +} diff --git a/src/StackExchange.Redis/Role.cs b/src/StackExchange.Redis/Role.cs index 293f7281e..587194026 100644 --- a/src/StackExchange.Redis/Role.cs +++ b/src/StackExchange.Redis/Role.cs @@ -5,9 +5,11 @@ namespace StackExchange.Redis /// /// Result of the ROLE command. Values depend on the role: master, replica, or sentinel. /// - /// https://redis.io/commands/role + /// public abstract class Role { + internal static Unknown Null { get; } = new Unknown(""); + /// /// One of "master", "slave" (aka replica), or "sentinel". /// @@ -19,9 +21,9 @@ public abstract class Role private Role(string role) => Value = role; /// - /// Result of the ROLE command for a master node. + /// Result of the ROLE command for a primary node. /// - /// https://redis.io/commands/role#master-output + /// public sealed class Master : Role { /// @@ -60,9 +62,12 @@ internal Replica(string ip, int port, long offset) Port = port; ReplicationOffset = offset; } + + /// + public override string ToString() => $"{Ip}:{Port} - {ReplicationOffset}"; } - internal Master(long offset, ICollection replicas) : base(RedisLiterals.master) + internal Master(long offset, ICollection replicas) : base(RedisLiterals.master!) { ReplicationOffset = offset; Replicas = replicas; @@ -72,16 +77,16 @@ internal Master(long offset, ICollection replicas) : base(RedisLiterals /// /// Result of the ROLE command for a replica node. /// - /// https://redis.io/commands/role#output-of-the-command-on-replicas + /// public sealed class Replica : Role { /// - /// The IP address of the master node for this replica. + /// The IP address of the primary node for this replica. /// public string MasterIp { get; } /// - /// The port number of the master node for this replica. + /// The port number of the primary node for this replica. /// public int MasterPort { get; } @@ -107,17 +112,17 @@ internal Replica(string role, string ip, int port, string state, long offset) : /// /// Result of the ROLE command for a sentinel node. /// - /// https://redis.io/commands/role#sentinel-output + /// public sealed class Sentinel : Role { /// - /// Master names monitored by this sentinel node. + /// Primary names monitored by this sentinel node. /// - public ICollection MonitoredMasters { get; } + public ICollection MonitoredMasters { get; } - internal Sentinel(ICollection masters) : base(RedisLiterals.sentinel) + internal Sentinel(ICollection primaries) : base(RedisLiterals.sentinel!) { - MonitoredMasters = masters; + MonitoredMasters = primaries; } } diff --git a/src/StackExchange.Redis/Runtime.cs b/src/StackExchange.Redis/Runtime.cs new file mode 100644 index 000000000..879c9c325 --- /dev/null +++ b/src/StackExchange.Redis/Runtime.cs @@ -0,0 +1,9 @@ +using System; +using System.Runtime.InteropServices; + +namespace StackExchange.Redis; + +internal static class Runtime +{ + public static readonly bool IsMono = RuntimeInformation.FrameworkDescription.StartsWith("Mono ", StringComparison.OrdinalIgnoreCase); +} diff --git a/src/StackExchange.Redis/ScriptParameterMapper.cs b/src/StackExchange.Redis/ScriptParameterMapper.cs index e6a556943..10dccd5ff 100644 --- a/src/StackExchange.Redis/ScriptParameterMapper.cs +++ b/src/StackExchange.Redis/ScriptParameterMapper.cs @@ -15,7 +15,7 @@ public readonly struct ScriptParameters public readonly RedisKey[] Keys; public readonly RedisValue[] Arguments; - public static readonly ConstructorInfo Cons = typeof(ScriptParameters).GetConstructor(new[] { typeof(RedisKey[]), typeof(RedisValue[]) }); + public static readonly ConstructorInfo Cons = typeof(ScriptParameters).GetConstructor(new[] { typeof(RedisKey[]), typeof(RedisValue[]) })!; public ScriptParameters(RedisKey[] keys, RedisValue[] args) { Keys = keys; @@ -23,11 +23,15 @@ public ScriptParameters(RedisKey[] keys, RedisValue[] args) } } - private static readonly Regex ParameterExtractor = new Regex(@"@(? ([a-z]|_) ([a-z]|_|\d)*)", RegexOptions.Compiled | RegexOptions.IgnoreCase | RegexOptions.IgnorePatternWhitespace); + private static readonly Regex ParameterExtractor = new Regex(@"@(? ([a-z]|_) ([a-z]|_|\d)*)", RegexOptions.Compiled | RegexOptions.IgnoreCase | RegexOptions.IgnorePatternWhitespace | RegexOptions.CultureInvariant); + private static string[] ExtractParameters(string script) { var ps = ParameterExtractor.Matches(script); - if (ps.Count == 0) return null; + if (ps.Count == 0) + { + return Array.Empty(); + } var ret = new HashSet(); @@ -39,7 +43,7 @@ private static string[] ExtractParameters(string script) { var prevChar = script[ix]; - // don't consider this a parameter if it's in the middle of word (ie. if it's preceeded by a letter) + // don't consider this a parameter if it's in the middle of word (i.e. if it's preceded by a letter) if (char.IsLetterOrDigit(prevChar) || prevChar == '_') continue; // this is an escape, ignore it @@ -75,7 +79,7 @@ private static string MakeOrdinalScriptWithoutKeys(string rawScript, string[] ar { ret.Append("ARGV["); ret.Append(argIx + 1); - ret.Append("]"); + ret.Append(']'); } else { @@ -125,44 +129,42 @@ static ScriptParameterMapper() } /// - /// Turns a script with @namedParameters into a LuaScript that can be executed - /// against a given IDatabase(Async) object + /// Turns a script with @namedParameters into a LuaScript that can be executed against a given IDatabase(Async) object. /// /// The script to prepare. public static LuaScript PrepareScript(string script) { var ps = ExtractParameters(script); var ordinalScript = MakeOrdinalScriptWithoutKeys(script, ps); - return new LuaScript(script, ordinalScript, ps); } - private static readonly HashSet ConvertableTypes = - new HashSet { - typeof(int), - typeof(int?), - typeof(long), - typeof(long?), - typeof(double), - typeof(double?), - typeof(string), - typeof(byte[]), - typeof(ReadOnlyMemory), - typeof(bool), - typeof(bool?), - - typeof(RedisKey), - typeof(RedisValue) - }; + private static readonly HashSet ConvertableTypes = new() + { + typeof(int), + typeof(int?), + typeof(long), + typeof(long?), + typeof(double), + typeof(double?), + typeof(string), + typeof(byte[]), + typeof(ReadOnlyMemory), + typeof(bool), + typeof(bool?), + + typeof(RedisKey), + typeof(RedisValue), + }; /// - /// Determines whether or not the given type can be used to provide parameters for the given LuaScript. + /// Determines whether or not the given type can be used to provide parameters for the given . /// /// The type of the parameter. /// The script to match against. /// The first missing member, if any. /// The first type mismatched member, if any. - public static bool IsValidParameterHash(Type t, LuaScript script, out string missingMember, out string badTypeMember) + public static bool IsValidParameterHash(Type t, LuaScript script, out string? missingMember, out string? badTypeMember) { for (var i = 0; i < script.Arguments.Length; i++) { @@ -175,7 +177,7 @@ public static bool IsValidParameterHash(Type t, LuaScript script, out string mis return false; } - var memberType = member is FieldInfo ? ((FieldInfo)member).FieldType : ((PropertyInfo)member).PropertyType; + var memberType = member is FieldInfo memberFieldInfo ? memberFieldInfo.FieldType : ((PropertyInfo)member).PropertyType; if (!ConvertableTypes.Contains(memberType)) { missingMember = null; @@ -199,7 +201,7 @@ public static bool IsValidParameterHash(Type t, LuaScript script, out string mis /// types. /// /// - /// The created Func takes a RedisKey, which will be prefixed to all keys (and arguments of type RedisKey) for + /// The created Func takes a RedisKey, which will be prefixed to all keys (and arguments of type RedisKey) for /// keyspace isolation. /// /// @@ -209,27 +211,20 @@ public static bool IsValidParameterHash(Type t, LuaScript script, out string mis { if (!IsValidParameterHash(t, script, out _, out _)) throw new Exception("Shouldn't be possible"); - Expression GetMember(Expression root, MemberInfo member) + static Expression GetMember(Expression root, MemberInfo member) => member.MemberType switch { - switch (member.MemberType) - { - case MemberTypes.Property: - return Expression.Property(root, (PropertyInfo)member); - case MemberTypes.Field: - return Expression.Field(root, (FieldInfo)member); - default: - throw new ArgumentException(nameof(member)); - } - } + MemberTypes.Property => Expression.Property(root, (PropertyInfo)member), + MemberTypes.Field => Expression.Field(root, (FieldInfo)member), + _ => throw new ArgumentException($"Member type '{member.MemberType}' isn't recognized", nameof(member)), + }; var keys = new List(); var args = new List(); for (var i = 0; i < script.Arguments.Length; i++) { var argName = script.Arguments[i]; - var member = t.GetMember(argName).SingleOrDefault(m => m is PropertyInfo || m is FieldInfo); - - var memberType = member is FieldInfo ? ((FieldInfo)member).FieldType : ((PropertyInfo)member).PropertyType; + var member = t.GetMember(argName).SingleOrDefault(m => m is PropertyInfo || m is FieldInfo) ?? throw new ArgumentException($"There was no member found for {argName}"); + var memberType = member is FieldInfo memberFieldInfo ? memberFieldInfo.FieldType : ((PropertyInfo)member).PropertyType; if (memberType == typeof(RedisKey)) { @@ -247,8 +242,8 @@ Expression GetMember(Expression root, MemberInfo member) var keyPrefix = Expression.Parameter(typeof(RedisKey?), "keyPrefix"); Expression keysResult, valuesResult; - MethodInfo asRedisValue = null; - Expression[] keysResultArr = null; + MethodInfo? asRedisValue = null; + Expression[]? keysResultArr = null; if (keys.Count == 0) { // if there are no keys, don't allocate @@ -257,20 +252,18 @@ Expression GetMember(Expression root, MemberInfo member) else { var needsKeyPrefix = Expression.Property(keyPrefix, nameof(Nullable.HasValue)); - var keyPrefixValueArr = new[] { Expression.Call(keyPrefix, - nameof(Nullable.GetValueOrDefault), null, null) }; - var prepend = typeof(RedisKey).GetMethod(nameof(RedisKey.Prepend), - BindingFlags.Public | BindingFlags.Instance); - asRedisValue = typeof(RedisKey).GetMethod(nameof(RedisKey.AsRedisValue), - BindingFlags.NonPublic | BindingFlags.Instance); + var keyPrefixValueArr = new[] + { + Expression.Call(keyPrefix, nameof(Nullable.GetValueOrDefault), null, null), + }; + var prepend = typeof(RedisKey).GetMethod(nameof(RedisKey.Prepend), BindingFlags.Public | BindingFlags.Instance)!; + asRedisValue = typeof(RedisKey).GetMethod(nameof(RedisKey.AsRedisValue), BindingFlags.NonPublic | BindingFlags.Instance)!; keysResultArr = new Expression[keys.Count]; for (int i = 0; i < keysResultArr.Length; i++) { var member = GetMember(objTyped, keys[i]); - keysResultArr[i] = Expression.Condition(needsKeyPrefix, - Expression.Call(member, prepend, keyPrefixValueArr), - member); + keysResultArr[i] = Expression.Condition(needsKeyPrefix, Expression.Call(member, prepend, keyPrefixValueArr), member); } keysResult = Expression.NewArrayInit(typeof(RedisKey), keysResultArr); } @@ -285,11 +278,11 @@ Expression GetMember(Expression root, MemberInfo member) valuesResult = Expression.NewArrayInit(typeof(RedisValue), args.Select(arg => { var member = GetMember(objTyped, arg); - if (member.Type == typeof(RedisValue)) return member; // pass-thru + if (member.Type == typeof(RedisValue)) return member; // pass-through if (member.Type == typeof(RedisKey)) { // need to apply prefix (note we can re-use the body from earlier) - var val = keysResultArr[keys.IndexOf(arg)]; - return Expression.Call(val, asRedisValue); + var val = keysResultArr![keys.IndexOf(arg)]; + return Expression.Call(val, asRedisValue!); } // otherwise: use the conversion operator @@ -299,8 +292,7 @@ Expression GetMember(Expression root, MemberInfo member) } var body = Expression.Lambda>( - Expression.New(ScriptParameters.Cons, keysResult, valuesResult), - objUntyped, keyPrefix); + Expression.New(ScriptParameters.Cons, keysResult, valuesResult), objUntyped, keyPrefix); return body.Compile(); } } diff --git a/src/StackExchange.Redis/ServerCounters.cs b/src/StackExchange.Redis/ServerCounters.cs index 96b4bbb60..b661f27d7 100644 --- a/src/StackExchange.Redis/ServerCounters.cs +++ b/src/StackExchange.Redis/ServerCounters.cs @@ -4,7 +4,7 @@ namespace StackExchange.Redis { /// - /// Illustrates the queues associates with this server + /// Illustrates the queues associates with this server. /// public class ServerCounters { @@ -12,7 +12,7 @@ public class ServerCounters /// Creates a instance for an . /// /// The to create counters for. - public ServerCounters(EndPoint endpoint) + public ServerCounters(EndPoint? endpoint) { EndPoint = endpoint; Interactive = new ConnectionCounters(ConnectionType.Interactive); @@ -21,31 +21,32 @@ public ServerCounters(EndPoint endpoint) } /// - /// The endpoint to which this data relates (this can be null if the data represents all servers) + /// The endpoint to which this data relates (this can be null if the data represents all servers). /// - public EndPoint EndPoint { get; } + public EndPoint? EndPoint { get; } /// - /// Counters associated with the interactive (non pub-sub) connection + /// Counters associated with the interactive (non pub-sub) connection. /// public ConnectionCounters Interactive { get; } /// - /// Counters associated with other ambient activity + /// Counters associated with other ambient activity. /// public ConnectionCounters Other { get; } /// - /// Counters associated with the subscription (pub-sub) connection + /// Counters associated with the subscription (pub-sub) connection. /// public ConnectionCounters Subscription { get; } + /// - /// Indicates the total number of outstanding items against this server + /// Indicates the total number of outstanding items against this server. /// public long TotalOutstanding => Interactive.TotalOutstanding + Subscription.TotalOutstanding + Other.TotalOutstanding; /// - /// See Object.ToString(); + /// See . /// public override string ToString() { diff --git a/src/StackExchange.Redis/ServerEndPoint.cs b/src/StackExchange.Redis/ServerEndPoint.cs old mode 100755 new mode 100644 index 58bcb5c3e..abe8d8afb --- a/src/StackExchange.Redis/ServerEndPoint.cs +++ b/src/StackExchange.Redis/ServerEndPoint.cs @@ -8,7 +8,7 @@ using System.Text.RegularExpressions; using System.Threading; using System.Threading.Tasks; -using static StackExchange.Redis.ConnectionMultiplexer; +using Microsoft.Extensions.Logging; using static StackExchange.Redis.PhysicalBridge; namespace StackExchange.Redis @@ -17,24 +17,24 @@ namespace StackExchange.Redis internal enum UnselectableFlags { None = 0, - RedundantMaster = 1, + RedundantPrimary = 1, DidNotRespond = 2, - ServerType = 4 + ServerType = 4, } internal sealed partial class ServerEndPoint : IDisposable { - internal volatile ServerEndPoint Master; + internal volatile ServerEndPoint? Primary; internal volatile ServerEndPoint[] Replicas = Array.Empty(); - private static readonly Regex nameSanitizer = new Regex("[^!-~]", RegexOptions.Compiled); + private static readonly Regex nameSanitizer = new Regex("[^!-~]+", RegexOptions.Compiled); private readonly Hashtable knownScripts = new Hashtable(StringComparer.Ordinal); private int databases, writeEverySeconds; - private PhysicalBridge interactive, subscription; - private bool isDisposed; + private PhysicalBridge? interactive, subscription; + private bool isDisposed, replicaReadOnly, isReplica, allowReplicaWrites; + private bool? supportsDatabases, supportsPrimaryWrites; private ServerType serverType; - private bool replicaReadOnly, isReplica; private volatile UnselectableFlags unselectableReasons; private Version version; @@ -56,53 +56,90 @@ public ServerEndPoint(ConnectionMultiplexer multiplexer, EndPoint endpoint) writeEverySeconds = config.KeepAlive > 0 ? config.KeepAlive : 60; serverType = ServerType.Standalone; ConfigCheckSeconds = Multiplexer.RawConfig.ConfigCheckSeconds; - // overrides for twemproxy - if (multiplexer.RawConfig.Proxy == Proxy.Twemproxy) + + // overrides for twemproxy/envoyproxy + switch (multiplexer.RawConfig.Proxy) { - databases = 1; - serverType = ServerType.Twemproxy; + case Proxy.Twemproxy: + databases = 1; + serverType = ServerType.Twemproxy; + break; + case Proxy.Envoyproxy: + databases = 1; + serverType = ServerType.Envoyproxy; + break; } } - public ClusterConfiguration ClusterConfiguration { get; private set; } - - public int Databases { get { return databases; } set { SetConfig(ref databases, value); } } + private RedisServer? _defaultServer; + public RedisServer GetRedisServer(object? asyncState) + => asyncState is null + ? (_defaultServer ??= new RedisServer(this, null)) // reuse and memoize + : new RedisServer(this, asyncState); public EndPoint EndPoint { get; } - public bool HasDatabases => serverType == ServerType.Standalone; + public ClusterConfiguration? ClusterConfiguration { get; private set; } - public bool IsConnected => interactive?.IsConnected == true; + /// + /// Whether this endpoint supports databases at all. + /// Note that some servers are cluster but present as standalone (e.g. Redis Enterprise), so we respect + /// being disabled here as a performance workaround. + /// + /// + /// This is memoized because it's accessed on hot paths inside the write lock. + /// + public bool SupportsDatabases => + supportsDatabases ??= serverType == ServerType.Standalone && Multiplexer.CommandMap.IsAvailable(RedisCommand.SELECT); + + public int Databases + { + get => databases; + set => SetConfig(ref databases, value); + } public bool IsConnecting => interactive?.IsConnecting == true; + public bool IsConnected => interactive?.IsConnected == true; + public bool IsSubscriberConnected => KnowOrAssumeResp3() ? IsConnected : subscription?.IsConnected == true; + + public bool KnowOrAssumeResp3() + { + var protocol = interactive?.Protocol; + return protocol is not null + ? protocol.GetValueOrDefault() >= RedisProtocol.Resp3 // <= if we've completed handshake, use what we *know for sure* + : Multiplexer.RawConfig.TryResp3(); // otherwise, use what we *expect* + } + + public bool SupportsSubscriptions => Multiplexer.CommandMap.IsAvailable(RedisCommand.SUBSCRIBE); + public bool SupportsPrimaryWrites => supportsPrimaryWrites ??= !IsReplica || !ReplicaReadOnly || AllowReplicaWrites; private readonly List> _pendingConnectionMonitors = new List>(); /// /// Awaitable state seeing if this endpoint is connected. /// - public Task OnConnectedAsync(LogProxy log = null, bool sendTracerIfConnected = false, bool autoConfigureIfConnected = false) + public Task OnConnectedAsync(ILogger? log = null, bool sendTracerIfConnected = false, bool autoConfigureIfConnected = false) { - async Task IfConnectedAsync(LogProxy log, bool sendTracerIfConnected, bool autoConfigureIfConnected) + async Task IfConnectedAsync(ILogger? log, bool sendTracerIfConnected, bool autoConfigureIfConnected) { - log?.WriteLine($"{Format.ToString(this)}: OnConnectedAsync already connected start"); + log?.LogInformationOnConnectedAsyncAlreadyConnectedStart(new(this)); if (autoConfigureIfConnected) { await AutoConfigureAsync(null, log).ForAwait(); } if (sendTracerIfConnected) { - await SendTracer(log).ForAwait(); + await SendTracerAsync(log).ForAwait(); } - log?.WriteLine($"{Format.ToString(this)}: OnConnectedAsync already connected end"); + log?.LogInformationOnConnectedAsyncAlreadyConnectedEnd(new(this)); return "Already connected"; } if (!IsConnected) { - log?.WriteLine($"{Format.ToString(this)}: OnConnectedAsync init (State={interactive?.ConnectionState})"); + log?.LogInformationOnConnectedAsyncInit(new(this), interactive?.ConnectionState); var tcs = new TaskCompletionSource(TaskCreationOptions.RunContinuationsAsynchronously); - _ = tcs.Task.ContinueWith(t => log?.WriteLine($"{Format.ToString(this)}: OnConnectedAsync completed ({t.Result})")); + _ = tcs.Task.ContinueWith(t => log?.LogInformationOnConnectedAsyncCompleted(new(this), t.Result)); lock (_pendingConnectionMonitors) { _pendingConnectionMonitors.Add(tcs); @@ -118,7 +155,7 @@ async Task IfConnectedAsync(LogProxy log, bool sendTracerIfConnected, bo return IfConnectedAsync(log, sendTracerIfConnected, autoConfigureIfConnected); } - internal Exception LastException + internal Exception? LastException { get { @@ -126,7 +163,7 @@ internal Exception LastException var subEx = subscription?.LastException; var subExData = subEx?.Data; - //check if subscription endpoint has a better lastexception + // check if subscription endpoint has a better last exception if (subExData != null && subExData.Contains("Redis-FailureType") && subExData["Redis-FailureType"]?.ToString() != nameof(ConnectionFailureType.UnableToConnect)) { return subEx; @@ -135,57 +172,60 @@ internal Exception LastException } } - internal PhysicalBridge.State ConnectionState + internal State InteractiveConnectionState => interactive?.ConnectionState ?? State.Disconnected; + internal State SubscriptionConnectionState => KnowOrAssumeResp3() ? InteractiveConnectionState : subscription?.ConnectionState ?? State.Disconnected; + + public long OperationCount => interactive?.OperationCount ?? 0 + subscription?.OperationCount ?? 0; + + public bool RequiresReadMode => serverType == ServerType.Cluster && IsReplica; + + public ServerType ServerType { - get - { - var tmp = interactive; - return tmp.ConnectionState; - } + get => serverType; + set => SetConfig(ref serverType, value); } - public bool IsReplica { get { return isReplica; } set { SetConfig(ref isReplica, value); } } + public bool IsReplica + { + get => isReplica; + set => SetConfig(ref isReplica, value); + } - public long OperationCount + public bool ReplicaReadOnly { - get + get => replicaReadOnly; + set => SetConfig(ref replicaReadOnly, value); + } + + public bool AllowReplicaWrites + { + get => allowReplicaWrites; + set { - long total = 0; - var tmp = interactive; - if (tmp != null) total += tmp.OperationCount; - tmp = subscription; - if (tmp != null) total += tmp.OperationCount; - return total; + allowReplicaWrites = value; + ClearMemoized(); } } - public bool RequiresReadMode => serverType == ServerType.Cluster && IsReplica; - - public ServerType ServerType { get { return serverType; } set { SetConfig(ref serverType, value); } } - - public bool ReplicaReadOnly { get { return replicaReadOnly; } set { SetConfig(ref replicaReadOnly, value); } } - - public bool AllowReplicaWrites { get; set; } - - public Version Version { get { return version; } set { SetConfig(ref version, value); } } - - public int WriteEverySeconds { get { return writeEverySeconds; } set { SetConfig(ref writeEverySeconds, value); } } + public Version Version + { + get => version; + set => SetConfig(ref version, value); + } - internal ConnectionMultiplexer Multiplexer { get; } + /// + /// If we have a connection (interactive), report the protocol being used. + /// + public RedisProtocol? Protocol => interactive?.Protocol; - public void ClearUnselectable(UnselectableFlags flags) + public int WriteEverySeconds { - var oldFlags = unselectableReasons; - if (oldFlags != 0) - { - unselectableReasons &= ~flags; - if (unselectableReasons != oldFlags) - { - Multiplexer.Trace(unselectableReasons == 0 ? "Now usable" : ("Now unusable: " + flags), ToString()); - } - } + get => writeEverySeconds; + set => SetConfig(ref writeEverySeconds, value); } + internal ConnectionMultiplexer Multiplexer { get; } + public void Dispose() { isDisposed = true; @@ -198,20 +238,46 @@ public void Dispose() tmp?.Dispose(); } - public PhysicalBridge GetBridge(ConnectionType type, bool create = true, LogProxy log = null) + public PhysicalBridge? GetBridge(ConnectionType type, bool create = true, ILogger? log = null) { if (isDisposed) return null; switch (type) { case ConnectionType.Interactive: + case ConnectionType.Subscription when KnowOrAssumeResp3(): return interactive ?? (create ? interactive = CreateBridge(ConnectionType.Interactive, log) : null); case ConnectionType.Subscription: return subscription ?? (create ? subscription = CreateBridge(ConnectionType.Subscription, log) : null); + default: + return null; + } + } + + public PhysicalBridge? GetBridge(Message message) + { + if (isDisposed) return null; + + // Subscription commands go to a specific bridge - so we need to set that up. + // There are other commands we need to send to the right connection (e.g. subscriber PING with an explicit SetForSubscriptionBridge call), + // but these always go subscriber. + switch (message.Command) + { + case RedisCommand.SUBSCRIBE: + case RedisCommand.UNSUBSCRIBE: + case RedisCommand.PSUBSCRIBE: + case RedisCommand.PUNSUBSCRIBE: + case RedisCommand.SSUBSCRIBE: + case RedisCommand.SUNSUBSCRIBE: + message.SetForSubscriptionBridge(); + break; } - return null; + + return (message.IsForSubscriptionBridge && !KnowOrAssumeResp3()) + ? subscription ??= CreateBridge(ConnectionType.Subscription, null) + : interactive ??= CreateBridge(ConnectionType.Interactive, null); } - public PhysicalBridge GetBridge(RedisCommand command, bool create = true) + public PhysicalBridge? GetBridge(RedisCommand command, bool create = true) { if (isDisposed) return null; switch (command) @@ -220,10 +286,15 @@ public PhysicalBridge GetBridge(RedisCommand command, bool create = true) case RedisCommand.UNSUBSCRIBE: case RedisCommand.PSUBSCRIBE: case RedisCommand.PUNSUBSCRIBE: - return subscription ?? (create ? subscription = CreateBridge(ConnectionType.Subscription, null) : null); - default: - return interactive ?? (create ? interactive = CreateBridge(ConnectionType.Interactive, null) : null); + case RedisCommand.SSUBSCRIBE: + case RedisCommand.SUNSUBSCRIBE: + if (!KnowOrAssumeResp3()) + { + return subscription ?? (create ? subscription = CreateBridge(ConnectionType.Subscription, null) : null); + } + break; } + return interactive ?? (create ? interactive = CreateBridge(ConnectionType.Interactive, null) : null); } public RedisFeatures GetFeatures() => new RedisFeatures(version); @@ -244,24 +315,24 @@ public void SetClusterConfiguration(ClusterConfiguration configuration) public void UpdateNodeRelations(ClusterConfiguration configuration) { - var thisNode = configuration.Nodes.FirstOrDefault(x => x.EndPoint.Equals(EndPoint)); + var thisNode = configuration.Nodes.FirstOrDefault(x => x.EndPoint?.Equals(EndPoint) == true); if (thisNode != null) { - Multiplexer.Trace($"Updating node relations for {thisNode.EndPoint.ToString()}..."); - List replicas = null; - ServerEndPoint master = null; + Multiplexer.Trace($"Updating node relations for {Format.ToString(thisNode.EndPoint)}..."); + List? replicas = null; + ServerEndPoint? primary = null; foreach (var node in configuration.Nodes) { if (node.NodeId == thisNode.ParentNodeId) { - master = Multiplexer.GetServerEndPoint(node.EndPoint); + primary = Multiplexer.GetServerEndPoint(node.EndPoint); } - else if (node.ParentNodeId == thisNode.NodeId) + else if (node.ParentNodeId == thisNode.NodeId && node.EndPoint is not null) { - (replicas ?? (replicas = new List())).Add(Multiplexer.GetServerEndPoint(node.EndPoint)); + (replicas ??= new List()).Add(Multiplexer.GetServerEndPoint(node.EndPoint)); } } - Master = master; + Primary = primary; Replicas = replicas?.ToArray() ?? Array.Empty(); } } @@ -279,17 +350,27 @@ public void SetUnselectable(UnselectableFlags flags) } } + public void ClearUnselectable(UnselectableFlags flags) + { + var oldFlags = unselectableReasons; + if (oldFlags != 0) + { + unselectableReasons &= ~flags; + if (unselectableReasons != oldFlags) + { + Multiplexer.Trace(unselectableReasons == 0 ? "Now usable" : ("Now unusable: " + flags), ToString()); + } + } + } + public override string ToString() => Format.ToString(EndPoint); [Obsolete("prefer async")] - public WriteResult TryWriteSync(Message message) => GetBridge(message.Command)?.TryWriteSync(message, isReplica) ?? WriteResult.NoConnectionAvailable; + public WriteResult TryWriteSync(Message message) => GetBridge(message)?.TryWriteSync(message, isReplica) ?? WriteResult.NoConnectionAvailable; - public ValueTask TryWriteAsync(Message message) => GetBridge(message.Command)?.TryWriteAsync(message, isReplica) ?? new ValueTask(WriteResult.NoConnectionAvailable); + public ValueTask TryWriteAsync(Message message) => GetBridge(message)?.TryWriteAsync(message, isReplica) ?? new ValueTask(WriteResult.NoConnectionAvailable); - internal void Activate(ConnectionType type, LogProxy log) - { - GetBridge(type, true, log); - } + internal void Activate(ConnectionType type, ILogger? log) => GetBridge(type, true, log); internal void AddScript(string script, byte[] hash) { @@ -299,25 +380,23 @@ internal void AddScript(string script, byte[] hash) } } - internal async Task AutoConfigureAsync(PhysicalConnection connection, LogProxy log = null) + internal async Task AutoConfigureAsync(PhysicalConnection? connection, ILogger? log = null) { - if (serverType == ServerType.Twemproxy) + if (!serverType.SupportsAutoConfigure()) { - // don't try to detect configuration; all the config commands are disabled, and - // the fallback master/replica detection won't help + // Don't try to detect configuration. + // All the config commands are disabled and the fallback primary/replica detection won't help return; } - log?.WriteLine($"{Format.ToString(this)}: Auto-configuring..."); + log?.LogInformationAutoConfiguring(new(this)); var commandMap = Multiplexer.CommandMap; -#pragma warning disable CS0618 - const CommandFlags flags = CommandFlags.FireAndForget | CommandFlags.HighPriority | CommandFlags.NoRedirect; -#pragma warning restore CS0618 + const CommandFlags flags = CommandFlags.FireAndForget | CommandFlags.NoRedirect; var features = GetFeatures(); Message msg; - var autoConfigProcessor = new ResultProcessor.AutoConfigureProcessor(log); + var autoConfigProcessor = ResultProcessor.AutoConfigureProcessor.Create(log); if (commandMap.IsAvailable(RedisCommand.CONFIG)) { @@ -345,6 +424,10 @@ internal async Task AutoConfigureAsync(PhysicalConnection connection, LogProxy l lastInfoReplicationCheckTicks = Environment.TickCount; if (features.InfoSections) { + // note: Redis 7.0 has a multi-section usage, but we don't know + // the server version at this point; we *could* use the optional + // value on the config, but let's keep things simple: these + // commands are suitably cheap msg = Message.Create(-1, flags, RedisCommand.INFO, RedisLiterals.replication); msg.SetInternalCall(); await WriteDirectOrQueueFireAndForgetAsync(connection, msg, autoConfigProcessor).ForAwait(); @@ -362,10 +445,11 @@ internal async Task AutoConfigureAsync(PhysicalConnection connection, LogProxy l } else if (commandMap.IsAvailable(RedisCommand.SET)) { - // this is a nasty way to find if we are a replica, and it will only work on up-level servers, but... + // This is a nasty way to find if we are a replica, and it will only work on up-level servers, but... RedisKey key = Multiplexer.UniqueId; - // the actual value here doesn't matter (we detect the error code if it fails); the value here is to at least give some - // indication to anyone watching via "monitor", but we could send two guids (key/value) and it would work the same + // The actual value here doesn't matter (we detect the error code if it fails). + // The value here is to at least give some indication to anyone watching via "monitor", + // but we could send two GUIDs (key/value) and it would work the same. msg = Message.Create(0, flags, RedisCommand.SET, key, RedisLiterals.replica_read_only, RedisLiterals.PX, 1, RedisLiterals.NX); msg.SetInternalCall(); await WriteDirectOrQueueFireAndForgetAsync(connection, msg, autoConfigProcessor).ForAwait(); @@ -376,11 +460,25 @@ internal async Task AutoConfigureAsync(PhysicalConnection connection, LogProxy l msg.SetInternalCall(); await WriteDirectOrQueueFireAndForgetAsync(connection, msg, ResultProcessor.ClusterNodes).ForAwait(); } + // If we are going to fetch a tie breaker, do so last and we'll get it in before the tracer fires completing the connection + // But if GETs are disabled on this, do not fail the connection - we just don't get tiebreaker benefits + if (Multiplexer.RawConfig.TryGetTieBreaker(out var tieBreakerKey) && Multiplexer.CommandMap.IsAvailable(RedisCommand.GET)) + { + log?.LogInformationRequestingTieBreak(new(EndPoint), tieBreakerKey); + msg = Message.Create(0, flags, RedisCommand.GET, tieBreakerKey); + msg.SetInternalCall(); + msg = LoggingMessage.Create(log, msg); + await WriteDirectOrQueueFireAndForgetAsync(connection, msg, ResultProcessor.TieBreaker).ForAwait(); + } } private int _nextReplicaOffset; - internal uint NextReplicaOffset() // used to round-robin between multiple replicas - => (uint)System.Threading.Interlocked.Increment(ref _nextReplicaOffset); + + /// + /// Used to round-robin between multiple replicas. + /// + internal uint NextReplicaOffset() + => (uint)Interlocked.Increment(ref _nextReplicaOffset); internal Task Close(ConnectionType connectionType) { @@ -410,18 +508,21 @@ internal void FlushScriptCache() } } - private string runId; - internal string RunId + private string? runId; + internal string? RunId { - get { return runId; } + get => runId; set { - if (value != runId) // we only care about changes + // We only care about changes + if (value != runId) { - // if we had an old run-id, and it has changed, then the - // server has been restarted; which means the script cache - // is toast - if (runId != null) FlushScriptCache(); + // If we had an old run-id, and it has changed, then the server has been restarted + // ...which means the script cache is toast + if (runId != null) + { + FlushScriptCache(); + } runId = value; } } @@ -435,23 +536,19 @@ internal ServerCounters GetCounters() return counters; } - internal void GetOutstandingCount(RedisCommand command, out int inst, out int qs, out long @in, out int qu, out bool aw, out long toRead, out long toWrite, - out BacklogStatus bs, out PhysicalConnection.ReadStatus rs, out PhysicalConnection.WriteStatus ws) + internal BridgeStatus GetBridgeStatus(ConnectionType connectionType) { - var bridge = GetBridge(command, false); - if (bridge == null) + try { - inst = qs = qu = 0; - @in = toRead = toWrite = 0; - aw = false; - bs = BacklogStatus.Inactive; - rs = PhysicalConnection.ReadStatus.NA; - ws = PhysicalConnection.WriteStatus.NA; + return GetBridge(connectionType, false)?.GetStatus() ?? BridgeStatus.Zero; } - else + catch (Exception ex) { - bridge.GetOutstandingCount(out inst, out qs, out @in, out qu, out aw, out toRead, out toWrite, out bs, out rs, out ws); + // only needs to be best efforts + System.Diagnostics.Debug.WriteLine(ex.Message); } + + return BridgeStatus.Zero; } internal string GetProfile() @@ -464,12 +561,12 @@ internal string GetProfile() return sb.ToString(); } - internal byte[] GetScriptHash(string script, RedisCommand command) + internal byte[]? GetScriptHash(string script, RedisCommand command) { - var found = (byte[])knownScripts[script]; + var found = (byte[]?)knownScripts[script]; if (found == null && command == RedisCommand.EVALSHA) { - // the script provided is a hex sha; store and re-use the ascii for that + // The script provided is a hex SHA - store and re-use the ASCii for that found = Encoding.ASCII.GetBytes(script); lock (knownScripts) { @@ -479,22 +576,18 @@ internal byte[] GetScriptHash(string script, RedisCommand command) return found; } - internal string GetStormLog(RedisCommand command) - { - var bridge = GetBridge(command); - return bridge?.GetStormLog(); - } + internal string? GetStormLog(Message message) => GetBridge(message)?.GetStormLog(); - internal Message GetTracerMessage(bool assertIdentity) + internal Message GetTracerMessage(bool checkResponse) { - // different configurations block certain commands, as can ad-hoc local configurations, so - // we'll do the best with what we have available. - // note that the muxer-ctor asserts that one of ECHO, PING, TIME of GET is available - // see also: TracerProcessor + // Different configurations block certain commands, as can ad-hoc local configurations, so + // we'll do the best with what we have available. + // Note: muxer-ctor asserts that one of ECHO, PING, TIME of GET is available + // See also: TracerProcessor var map = Multiplexer.CommandMap; Message msg; const CommandFlags flags = CommandFlags.NoRedirect | CommandFlags.FireAndForget; - if (assertIdentity && map.IsAvailable(RedisCommand.ECHO)) + if (checkResponse && map.IsAvailable(RedisCommand.ECHO)) { msg = Message.Create(-1, flags, RedisCommand.ECHO, (RedisValue)Multiplexer.UniqueId); } @@ -506,9 +599,9 @@ internal Message GetTracerMessage(bool assertIdentity) { msg = Message.Create(-1, flags, RedisCommand.TIME); } - else if (!assertIdentity && map.IsAvailable(RedisCommand.ECHO)) + else if (!checkResponse && map.IsAvailable(RedisCommand.ECHO)) { - // we'll use echo as a PING substitute if it is all we have (in preference to EXISTS) + // We'll use echo as a PING substitute if it is all we have (in preference to EXISTS) msg = Message.Create(-1, flags, RedisCommand.ECHO, (RedisValue)Multiplexer.UniqueId); } else @@ -524,7 +617,11 @@ internal Message GetTracerMessage(bool assertIdentity) internal bool IsSelectable(RedisCommand command, bool allowDisconnected = false) { - var bridge = unselectableReasons == 0 ? GetBridge(command, false) : null; + // Until we've connected at least once, we're going to have a DidNotRespond unselectable reason present + var bridge = unselectableReasons == 0 || (allowDisconnected && unselectableReasons == UnselectableFlags.DidNotRespond) + ? GetBridge(command, false) + : null; + return bridge != null && (allowDisconnected || bridge.IsConnected); } @@ -545,18 +642,41 @@ internal void OnDisconnected(PhysicalBridge bridge) if (bridge == interactive) { CompletePendingConnectionMonitors("Disconnected"); + if (Protocol is RedisProtocol.Resp3) + { + Multiplexer.UpdateSubscriptions(); + } + } + else if (bridge == subscription) + { + Multiplexer.UpdateSubscriptions(); } } - internal Task OnEstablishingAsync(PhysicalConnection connection, LogProxy log) + internal Task OnEstablishingAsync(PhysicalConnection connection, ILogger? log) { + static async Task OnEstablishingAsyncAwaited(PhysicalConnection connection, Task handshake) + { + try + { + await handshake.ForAwait(); + } + catch (Exception ex) + { + connection.RecordConnectionFailed(ConnectionFailureType.InternalFailure, ex); + } + } + try { if (connection == null) return Task.CompletedTask; + var handshake = HandshakeAsync(connection, log); if (handshake.Status != TaskStatus.RanToCompletion) + { return OnEstablishingAsyncAwaited(connection, handshake); + } } catch (Exception ex) { @@ -565,18 +685,6 @@ internal Task OnEstablishingAsync(PhysicalConnection connection, LogProxy log) return Task.CompletedTask; } - private static async Task OnEstablishingAsyncAwaited(PhysicalConnection connection, Task handshake) - { - try - { - await handshake.ForAwait(); - } - catch (Exception ex) - { - connection.RecordConnectionFailed(ConnectionFailureType.InternalFailure, ex); - } - } - internal void OnFullyEstablished(PhysicalConnection connection, string source) { try @@ -584,48 +692,61 @@ internal void OnFullyEstablished(PhysicalConnection connection, string source) var bridge = connection?.BridgeCouldBeNull; if (bridge != null) { - if (bridge == subscription) + // Clear the unselectable flag ASAP since we are open for business + ClearUnselectable(UnselectableFlags.DidNotRespond); + + bool isResp3 = KnowOrAssumeResp3(); + if (bridge == subscription || isResp3) { - Multiplexer.ResendSubscriptions(this); + // Note: this MUST be fire and forget, because we might be in the middle of a Sync processing + // TracerProcessor which is executing this line inside a SetResultCore(). + // Since we're issuing commands inside a SetResult path in a message, we'd create a deadlock by waiting. + Multiplexer.EnsureSubscriptions(CommandFlags.FireAndForget); } - else if (bridge == interactive) + if (IsConnected && (IsSubscriberConnected || !SupportsSubscriptions || isResp3)) { + // Only connect on the second leg - we can accomplish this by checking both + // Or the first leg, if we're only making 1 connection because subscriptions aren't supported CompletePendingConnectionMonitors(source); } - Multiplexer.OnConnectionRestored(EndPoint, bridge.ConnectionType, connection?.ToString()); + Multiplexer.OnConnectionRestored(EndPoint, bridge.ConnectionType, connection?.ToString()); } } catch (Exception ex) { - connection.RecordConnectionFailed(ConnectionFailureType.InternalFailure, ex); + connection?.RecordConnectionFailed(ConnectionFailureType.InternalFailure, ex); } } - internal int LastInfoReplicationCheckSecondsAgo - { - get { return unchecked(Environment.TickCount - Thread.VolatileRead(ref lastInfoReplicationCheckTicks)) / 1000; } - } + internal int LastInfoReplicationCheckSecondsAgo => + unchecked(Environment.TickCount - Volatile.Read(ref lastInfoReplicationCheckTicks)) / 1000; - private EndPoint masterEndPoint; - public EndPoint MasterEndPoint + private EndPoint? primaryEndPoint; + public EndPoint? PrimaryEndPoint { - get { return masterEndPoint; } - set { SetConfig(ref masterEndPoint, value); } + get => primaryEndPoint; + set => SetConfig(ref primaryEndPoint, value); } + /// + /// Result of the latest tie breaker (from the last reconfigure). + /// + internal string? TieBreakerResult { get; set; } + internal bool CheckInfoReplication() { lastInfoReplicationCheckTicks = Environment.TickCount; ResetExponentiallyReplicationCheck(); - PhysicalBridge bridge; - if (version >= RedisFeatures.v2_8_0 && Multiplexer.CommandMap.IsAvailable(RedisCommand.INFO) - && (bridge = GetBridge(ConnectionType.Interactive, false)) != null) + + if (version.IsAtLeast(RedisFeatures.v2_8_0) && Multiplexer.CommandMap.IsAvailable(RedisCommand.INFO) + && GetBridge(ConnectionType.Interactive, false) is PhysicalBridge bridge) { -#pragma warning disable CS0618 - var msg = Message.Create(-1, CommandFlags.FireAndForget | CommandFlags.HighPriority | CommandFlags.NoRedirect, RedisCommand.INFO, RedisLiterals.replication); + var msg = Message.Create(-1, CommandFlags.FireAndForget | CommandFlags.NoRedirect, RedisCommand.INFO, RedisLiterals.replication); msg.SetInternalCall(); - WriteDirectFireAndForgetSync(msg, ResultProcessor.AutoConfigure, bridge); + msg.SetSource(ResultProcessor.AutoConfigure, null); +#pragma warning disable CS0618 // Type or member is obsolete + bridge.TryWriteSync(msg, isReplica); #pragma warning restore CS0618 return true; } @@ -635,20 +756,21 @@ internal bool CheckInfoReplication() private int lastInfoReplicationCheckTicks; internal volatile int ConfigCheckSeconds; [ThreadStatic] - private static Random r; + private static Random? r; - - // Forces frequent replication check starting from 1 second up to max ConfigCheckSeconds with an exponential increment + /// + /// Forces frequent replication check starting from 1 second up to max ConfigCheckSeconds with an exponential increment. + /// internal void ForceExponentialBackoffReplicationCheck() { - ConfigCheckSeconds = 1; // start checking info replication more frequently + ConfigCheckSeconds = 1; } private void ResetExponentiallyReplicationCheck() { if (ConfigCheckSeconds < Multiplexer.RawConfig.ConfigCheckSeconds) { - r = r ?? new Random(); + r ??= new Random(); var newExponentialConfigCheck = ConfigCheckSeconds * 2; var jitter = r.Next(ConfigCheckSeconds + 1, newExponentialConfigCheck); ConfigCheckSeconds = Math.Min(jitter, Multiplexer.RawConfig.ConfigCheckSeconds); @@ -658,7 +780,7 @@ private void ResetExponentiallyReplicationCheck() private int _heartBeatActive; internal void OnHeartbeat() { - // don't overlap operations on an endpoint + // Don't overlap heartbeat operations on an endpoint if (Interlocked.CompareExchange(ref _heartBeatActive, 1, 0) == 0) { try @@ -677,9 +799,9 @@ internal void OnHeartbeat() } } - internal Task WriteDirectAsync(Message message, ResultProcessor processor, object asyncState = null, PhysicalBridge bridge = null) + internal Task WriteDirectAsync(Message message, ResultProcessor processor, PhysicalBridge? bridge = null) { - static async Task Awaited(ServerEndPoint @this, Message message, ValueTask write, TaskCompletionSource tcs) + static async Task Awaited(ServerEndPoint @this, Message message, ValueTask write, TaskCompletionSource tcs) { var result = await write.ForAwait(); if (result != WriteResult.Success) @@ -690,9 +812,9 @@ static async Task Awaited(ServerEndPoint @this, Message message, ValueTask.Create(out var tcs, asyncState); + var source = TaskResultBox.Create(out var tcs, null); message.SetSource(processor, source); - if (bridge == null) bridge = GetBridge(message.Command); + bridge ??= GetBridge(message); WriteResult result; if (bridge == null) @@ -717,26 +839,13 @@ static async Task Awaited(ServerEndPoint @this, Message message, ValueTask(Message message, ResultProcessor processor, PhysicalBridge bridge = null) - { - if (message != null) - { - message.SetSource(processor, null); - Multiplexer.Trace("Enqueue: " + message); -#pragma warning disable CS0618 - (bridge ?? GetBridge(message.Command)).TryWriteSync(message, isReplica); -#pragma warning restore CS0618 - } - } - internal void ReportNextFailure() { interactive?.ReportNextFailure(); subscription?.ReportNextFailure(); } - internal Task SendTracer(LogProxy log = null) + internal Task SendTracerAsync(ILogger? log = null) { var msg = GetTracerMessage(false); msg = LoggingMessage.Create(log, msg); @@ -746,7 +855,7 @@ internal Task SendTracer(LogProxy log = null) internal string Summary() { var sb = new StringBuilder(Format.ToString(EndPoint)) - .Append(": ").Append(serverType).Append(" v").Append(version).Append(", ").Append(isReplica ? "replica" : "master"); + .Append(": ").Append(serverType).Append(" v").Append(version).Append(", ").Append(isReplica ? "replica" : "primary"); if (databases > 0) sb.Append("; ").Append(databases).Append(" databases"); if (writeEverySeconds > 0) @@ -776,7 +885,11 @@ internal string Summary() return sb.ToString(); } - internal ValueTask WriteDirectOrQueueFireAndForgetAsync(PhysicalConnection connection, Message message, ResultProcessor processor) + /// + /// Write the message directly to the pipe or fail...will not queue. + /// + /// The type of the result processor. + internal ValueTask WriteDirectOrQueueFireAndForgetAsync(PhysicalConnection? connection, Message message, ResultProcessor processor) { static async ValueTask Awaited(ValueTask l_result) => await l_result.ForAwait(); @@ -787,7 +900,8 @@ internal ValueTask WriteDirectOrQueueFireAndForgetAsync(PhysicalConnection co if (connection == null) { Multiplexer.Trace($"{Format.ToString(this)}: Enqueue (async): " + message); - result = GetBridge(message.Command).TryWriteAsync(message, isReplica); + // A bridge will be created if missing, so not nullable here + result = GetBridge(message)!.TryWriteAsync(message, isReplica); } else { @@ -799,7 +913,7 @@ internal ValueTask WriteDirectOrQueueFireAndForgetAsync(PhysicalConnection co } else { - result = bridge.WriteMessageTakingWriteLockAsync(connection, message); + result = bridge.WriteMessageTakingWriteLockAsync(connection, message, bypassBacklog: true); } } @@ -811,7 +925,7 @@ internal ValueTask WriteDirectOrQueueFireAndForgetAsync(PhysicalConnection co return default; } - private PhysicalBridge CreateBridge(ConnectionType type, LogProxy log) + private PhysicalBridge? CreateBridge(ConnectionType type, ILogger? log) { if (Multiplexer.IsDisposed) return null; Multiplexer.Trace(type.ToString()); @@ -820,27 +934,84 @@ private PhysicalBridge CreateBridge(ConnectionType type, LogProxy log) return bridge; } - private async Task HandshakeAsync(PhysicalConnection connection, LogProxy log) + private async Task HandshakeAsync(PhysicalConnection connection, ILogger? log) { - log?.WriteLine($"{Format.ToString(this)}: Server handshake"); + log?.LogInformationServerHandshake(new(this)); if (connection == null) { Multiplexer.Trace("No connection!?"); return; } Message msg; - // note that we need "" (not null) for password in the case of 'nopass' logins - string user = Multiplexer.RawConfig.User, password = Multiplexer.RawConfig.Password ?? ""; - if (!string.IsNullOrWhiteSpace(user)) + // Note that we need "" (not null) for password in the case of 'nopass' logins + var config = Multiplexer.RawConfig; + string? user = config.User; + string password = config.Password ?? ""; + + string clientName = Multiplexer.ClientName; + if (!string.IsNullOrWhiteSpace(clientName)) + { + clientName = nameSanitizer.Replace(clientName, ""); + } + + // NOTE: + // we might send the auth and client-name *twice* in RESP3 mode; this is intentional: + // - we don't know for sure which commands are available; HELLO is not always available, + // even on v6 servers, and we don't usually even know the server version yet; likewise, + // CLIENT could be disabled/renamed + // - on an authenticated server, you MUST issue HELLO with AUTH, so we can't avoid it there + // - but if the HELLO with AUTH isn't recognized, we might still need to auth; the following is + // legal in all scenarios, and results in a consistent state: + // + // (auth enabled) + // + // HELLO 3 AUTH {user} {password} SETNAME {client} + // AUTH {user} {password} + // CLIENT SETNAME {client} + // + // (auth disabled) + // + // HELLO 3 SETNAME {client} + // CLIENT SETNAME {client} + // + // this might look a little redundant, but: we only do it once per connection, and it isn't + // many bytes different; this allows us to pipeline the entire handshake without having to + // add latency + + // note on the use of FireAndForget here; in F+F, the result processor is still invoked, which + // is what we need for things to work; what *doesn't* happen is the result-box activation etc; + // that's fine and doesn't cause a problem; if we wanted we could probably just discard (`_ =`) + // the various tasks and just `return connection.FlushAsync();` - however, since handshake is low + // volume, we can afford to optimize for a good stack-trace rather than avoiding state machines. + ResultProcessor? autoConfig = null; + if (Multiplexer.RawConfig.TryResp3()) // note this includes an availability check on HELLO + { + log?.LogInformationAuthenticatingViaHello(new(this)); + var hello = Message.CreateHello(3, user, password, clientName, CommandFlags.FireAndForget); + hello.SetInternalCall(); + await WriteDirectOrQueueFireAndForgetAsync(connection, hello, autoConfig ??= ResultProcessor.AutoConfigureProcessor.Create(log)).ForAwait(); + + // note that the server can reject RESP3 via either an -ERR response (HELLO not understood), or by simply saying "nope", + // so we don't set the actual .Protocol until we process the result of the HELLO request + } + else + { + // if we're not even issuing HELLO, we're RESP2 + connection.SetProtocol(RedisProtocol.Resp2); + } + + // note: we auth EVEN IF we have used HELLO to AUTH; because otherwise the fallback/detection path is pure hell, + // and: we're pipelined here, so... meh + if (!string.IsNullOrWhiteSpace(user) && Multiplexer.CommandMap.IsAvailable(RedisCommand.AUTH)) { - log?.WriteLine($"{Format.ToString(this)}: Authenticating (user/password)"); + log?.LogInformationAuthenticatingUserPassword(new(this)); msg = Message.Create(-1, CommandFlags.FireAndForget, RedisCommand.AUTH, (RedisValue)user, (RedisValue)password); msg.SetInternalCall(); await WriteDirectOrQueueFireAndForgetAsync(connection, msg, ResultProcessor.DemandOK).ForAwait(); } - else if (!string.IsNullOrWhiteSpace(password)) + else if (!string.IsNullOrWhiteSpace(password) && Multiplexer.CommandMap.IsAvailable(RedisCommand.AUTH)) { - log?.WriteLine($"{Format.ToString(this)}: Authenticating (password)"); + log?.LogInformationAuthenticatingPassword(new(this)); msg = Message.Create(-1, CommandFlags.FireAndForget, RedisCommand.AUTH, (RedisValue)password); msg.SetInternalCall(); await WriteDirectOrQueueFireAndForgetAsync(connection, msg, ResultProcessor.DemandOK).ForAwait(); @@ -848,69 +1019,109 @@ private async Task HandshakeAsync(PhysicalConnection connection, LogProxy log) if (Multiplexer.CommandMap.IsAvailable(RedisCommand.CLIENT)) { - string name = Multiplexer.ClientName; - if (!string.IsNullOrWhiteSpace(name)) + if (!string.IsNullOrWhiteSpace(clientName)) + { + log?.LogInformationSettingClientName(new(this), clientName); + msg = Message.Create(-1, CommandFlags.FireAndForget, RedisCommand.CLIENT, RedisLiterals.SETNAME, (RedisValue)clientName); + msg.SetInternalCall(); + await WriteDirectOrQueueFireAndForgetAsync(connection, msg, ResultProcessor.DemandOK).ForAwait(); + } + + if (config.SetClientLibrary) { - name = nameSanitizer.Replace(name, ""); - if (!string.IsNullOrWhiteSpace(name)) + // note that this is a relatively new feature, but usually we won't know the + // server version, so we will use this speculatively and hope for the best + log?.LogInformationSettingClientLibVer(new(this)); + + var libName = Multiplexer.GetFullLibraryName(); + if (!string.IsNullOrWhiteSpace(libName)) + { + msg = Message.Create(-1, CommandFlags.FireAndForget, RedisCommand.CLIENT, RedisLiterals.SETINFO, RedisLiterals.lib_name, libName); + msg.SetInternalCall(); + await WriteDirectOrQueueFireAndForgetAsync(connection, msg, ResultProcessor.DemandOK).ForAwait(); + } + + var version = ClientInfoSanitize(Utils.GetLibVersion()); + if (!string.IsNullOrWhiteSpace(version)) { - log?.WriteLine($"{Format.ToString(this)}: Setting client name: {name}"); - msg = Message.Create(-1, CommandFlags.FireAndForget, RedisCommand.CLIENT, RedisLiterals.SETNAME, (RedisValue)name); + msg = Message.Create(-1, CommandFlags.FireAndForget, RedisCommand.CLIENT, RedisLiterals.SETINFO, RedisLiterals.lib_ver, version); msg.SetInternalCall(); await WriteDirectOrQueueFireAndForgetAsync(connection, msg, ResultProcessor.DemandOK).ForAwait(); } } + + msg = Message.Create(-1, CommandFlags.FireAndForget, RedisCommand.CLIENT, RedisLiterals.ID); + msg.SetInternalCall(); + await WriteDirectOrQueueFireAndForgetAsync(connection, msg, autoConfig ??= ResultProcessor.AutoConfigureProcessor.Create(log)).ForAwait(); } var bridge = connection.BridgeCouldBeNull; - if (bridge == null) + if (bridge is null) { return; } - var connType = bridge.ConnectionType; + var connType = bridge.ConnectionType; if (connType == ConnectionType.Interactive) { - await AutoConfigureAsync(connection, log); + await AutoConfigureAsync(connection, log).ForAwait(); } var tracer = GetTracerMessage(true); tracer = LoggingMessage.Create(log, tracer); - log?.WriteLine($"{Format.ToString(this)}: Sending critical tracer (handshake): {tracer.CommandAndKey}"); + log?.LogInformationSendingCriticalTracer(new(this), tracer.CommandAndKey); await WriteDirectOrQueueFireAndForgetAsync(connection, tracer, ResultProcessor.EstablishConnection).ForAwait(); - // note: this **must** be the last thing on the subscription handshake, because after this + // Note: this **must** be the last thing on the subscription handshake, because after this // we will be in subscriber mode: regular commands cannot be sent if (connType == ConnectionType.Subscription) { var configChannel = Multiplexer.ConfigurationChangedChannel; if (configChannel != null) { - msg = Message.Create(-1, CommandFlags.FireAndForget, RedisCommand.SUBSCRIBE, (RedisChannel)configChannel); + msg = Message.Create(-1, CommandFlags.FireAndForget, RedisCommand.SUBSCRIBE, RedisChannel.Literal(configChannel)); + // Note: this is NOT internal, we want it to queue in a backlog for sending when ready if necessary await WriteDirectOrQueueFireAndForgetAsync(connection, msg, ResultProcessor.TrackSubscriptions).ForAwait(); } } - log?.WriteLine($"{Format.ToString(this)}: Flushing outbound buffer"); + log?.LogInformationFlushingOutboundBuffer(new(this)); await connection.FlushAsync().ForAwait(); } - private void SetConfig(ref T field, T value, [CallerMemberName] string caller = null) + private void SetConfig(ref T field, T value, [CallerMemberName] string? caller = null) { if (!EqualityComparer.Default.Equals(field, value)) { - Multiplexer.Trace(caller + " changed from " + field + " to " + value, "Configuration"); + // multiplexer might be null here in some test scenarios; just roll with it... + Multiplexer?.Trace(caller + " changed from " + field + " to " + value, "Configuration"); field = value; - Multiplexer.ReconfigureIfNeeded(EndPoint, false, caller); + ClearMemoized(); + Multiplexer?.ReconfigureIfNeeded(EndPoint, false, caller!); } } + internal static string ClientInfoSanitize(string? value) + => string.IsNullOrWhiteSpace(value) ? "" : nameSanitizer.Replace(value!.Trim(), "-"); + + private void ClearMemoized() + { + supportsDatabases = null; + supportsPrimaryWrites = null; + } /// - /// For testing only + /// For testing only. /// - internal void SimulateConnectionFailure() + internal void SimulateConnectionFailure(SimulatedFailureType failureType) + { + interactive?.SimulateConnectionFailure(failureType); + subscription?.SimulateConnectionFailure(failureType); + } + + internal bool HasPendingCallerFacingItems() { - interactive?.SimulateConnectionFailure(); - subscription?.SimulateConnectionFailure(); + // check whichever bridges exist + if (interactive?.HasPendingCallerFacingItems() == true) return true; + return subscription?.HasPendingCallerFacingItems() ?? false; } } } diff --git a/src/StackExchange.Redis/ServerSelectionStrategy.cs b/src/StackExchange.Redis/ServerSelectionStrategy.cs index 45dbce923..48ba32a77 100644 --- a/src/StackExchange.Redis/ServerSelectionStrategy.cs +++ b/src/StackExchange.Redis/ServerSelectionStrategy.cs @@ -1,4 +1,6 @@ using System; +using System.Buffers; +using System.Diagnostics; using System.Net; using System.Threading; @@ -8,76 +10,140 @@ internal sealed class ServerSelectionStrategy { public const int NoSlot = -1, MultipleSlots = -2; private const int RedisClusterSlotCount = 16384; -#pragma warning disable IDE1006 // Naming Styles - private static readonly ushort[] s_crc16tab = new ushort[] + private static readonly ushort[] Crc16tab = new ushort[] { - 0x0000,0x1021,0x2042,0x3063,0x4084,0x50a5,0x60c6,0x70e7, - 0x8108,0x9129,0xa14a,0xb16b,0xc18c,0xd1ad,0xe1ce,0xf1ef, - 0x1231,0x0210,0x3273,0x2252,0x52b5,0x4294,0x72f7,0x62d6, - 0x9339,0x8318,0xb37b,0xa35a,0xd3bd,0xc39c,0xf3ff,0xe3de, - 0x2462,0x3443,0x0420,0x1401,0x64e6,0x74c7,0x44a4,0x5485, - 0xa56a,0xb54b,0x8528,0x9509,0xe5ee,0xf5cf,0xc5ac,0xd58d, - 0x3653,0x2672,0x1611,0x0630,0x76d7,0x66f6,0x5695,0x46b4, - 0xb75b,0xa77a,0x9719,0x8738,0xf7df,0xe7fe,0xd79d,0xc7bc, - 0x48c4,0x58e5,0x6886,0x78a7,0x0840,0x1861,0x2802,0x3823, - 0xc9cc,0xd9ed,0xe98e,0xf9af,0x8948,0x9969,0xa90a,0xb92b, - 0x5af5,0x4ad4,0x7ab7,0x6a96,0x1a71,0x0a50,0x3a33,0x2a12, - 0xdbfd,0xcbdc,0xfbbf,0xeb9e,0x9b79,0x8b58,0xbb3b,0xab1a, - 0x6ca6,0x7c87,0x4ce4,0x5cc5,0x2c22,0x3c03,0x0c60,0x1c41, - 0xedae,0xfd8f,0xcdec,0xddcd,0xad2a,0xbd0b,0x8d68,0x9d49, - 0x7e97,0x6eb6,0x5ed5,0x4ef4,0x3e13,0x2e32,0x1e51,0x0e70, - 0xff9f,0xefbe,0xdfdd,0xcffc,0xbf1b,0xaf3a,0x9f59,0x8f78, - 0x9188,0x81a9,0xb1ca,0xa1eb,0xd10c,0xc12d,0xf14e,0xe16f, - 0x1080,0x00a1,0x30c2,0x20e3,0x5004,0x4025,0x7046,0x6067, - 0x83b9,0x9398,0xa3fb,0xb3da,0xc33d,0xd31c,0xe37f,0xf35e, - 0x02b1,0x1290,0x22f3,0x32d2,0x4235,0x5214,0x6277,0x7256, - 0xb5ea,0xa5cb,0x95a8,0x8589,0xf56e,0xe54f,0xd52c,0xc50d, - 0x34e2,0x24c3,0x14a0,0x0481,0x7466,0x6447,0x5424,0x4405, - 0xa7db,0xb7fa,0x8799,0x97b8,0xe75f,0xf77e,0xc71d,0xd73c, - 0x26d3,0x36f2,0x0691,0x16b0,0x6657,0x7676,0x4615,0x5634, - 0xd94c,0xc96d,0xf90e,0xe92f,0x99c8,0x89e9,0xb98a,0xa9ab, - 0x5844,0x4865,0x7806,0x6827,0x18c0,0x08e1,0x3882,0x28a3, - 0xcb7d,0xdb5c,0xeb3f,0xfb1e,0x8bf9,0x9bd8,0xabbb,0xbb9a, - 0x4a75,0x5a54,0x6a37,0x7a16,0x0af1,0x1ad0,0x2ab3,0x3a92, - 0xfd2e,0xed0f,0xdd6c,0xcd4d,0xbdaa,0xad8b,0x9de8,0x8dc9, - 0x7c26,0x6c07,0x5c64,0x4c45,0x3ca2,0x2c83,0x1ce0,0x0cc1, - 0xef1f,0xff3e,0xcf5d,0xdf7c,0xaf9b,0xbfba,0x8fd9,0x9ff8, - 0x6e17,0x7e36,0x4e55,0x5e74,0x2e93,0x3eb2,0x0ed1,0x1ef0 + 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, + 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, + 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, + 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, + 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, + 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, + 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, + 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, + 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, + 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, + 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, + 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, + 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, + 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, + 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, + 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, + 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, + 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, + 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, + 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, + 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, + 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, + 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, + 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, + 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, + 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, + 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, + 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, + 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, + 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, + 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, + 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0, }; -#pragma warning restore IDE1006 // Naming Styles - private readonly ConnectionMultiplexer multiplexer; - private int anyStartOffset; + private readonly ConnectionMultiplexer? multiplexer; + private int anyStartOffset = SharedRandom.Next(); // initialize to a random value so routing isn't uniform - private ServerEndPoint[] map; + #if NET + private static Random SharedRandom => Random.Shared; + #else + private static Random SharedRandom { get; } = new(); + #endif - public ServerSelectionStrategy(ConnectionMultiplexer multiplexer) - { - this.multiplexer = multiplexer; - } + private ServerEndPoint[]? map; + + public ServerSelectionStrategy(ConnectionMultiplexer? multiplexer) => this.multiplexer = multiplexer; public ServerType ServerType { get; set; } = ServerType.Standalone; - internal int TotalSlots => RedisClusterSlotCount; + internal static int TotalSlots => RedisClusterSlotCount; /// - /// Computes the hash-slot that would be used by the given key + /// Computes the hash-slot that would be used by the given key. /// /// The to determine a slot ID for. public int HashSlot(in RedisKey key) - => ServerType == ServerType.Standalone ? NoSlot : GetClusterSlot(key); + { + if (ServerType == ServerType.Standalone || key.IsNull) return NoSlot; + if (key.TryGetSimpleBuffer(out var arr)) // key was constructed from a byte[] + { + return GetClusterSlot(arr); + } + else + { + var length = key.TotalLength(); + if (length <= 256) + { + Span span = stackalloc byte[length]; + var written = key.CopyTo(span); + Debug.Assert(written == length, "key length/write error"); + return GetClusterSlot(span); + } + else + { + arr = ArrayPool.Shared.Rent(length); + var span = new Span(arr, 0, length); + var written = key.CopyTo(span); + Debug.Assert(written == length, "key length/write error"); + var result = GetClusterSlot(span); + ArrayPool.Shared.Return(arr); + return result; + } + } + } + + private byte[] ChannelPrefix => multiplexer?.ChannelPrefix ?? []; + + /// + /// Computes the hash-slot that would be used by the given channel. + /// + /// The to determine a slot ID for. + public int HashSlot(in RedisChannel channel) + { + if (ServerType == ServerType.Standalone || channel.IsNull) return NoSlot; + + ReadOnlySpan routingSpan = channel.RoutingSpan; + byte[] prefix; + return channel.IgnoreChannelPrefix || (prefix = ChannelPrefix).Length == 0 + ? GetClusterSlot(routingSpan) : GetClusterSlotWithPrefix(prefix, routingSpan); + + static int GetClusterSlotWithPrefix(byte[] prefixRaw, ReadOnlySpan routingSpan) + { + ReadOnlySpan prefixSpan = prefixRaw; + const int MAX_STACK = 128; + byte[]? lease = null; + var totalLength = prefixSpan.Length + routingSpan.Length; + var span = totalLength <= MAX_STACK + ? stackalloc byte[MAX_STACK] + : (lease = ArrayPool.Shared.Rent(totalLength)); + + prefixSpan.CopyTo(span); + routingSpan.CopyTo(span.Slice(prefixSpan.Length)); + var result = GetClusterSlot(span.Slice(0, totalLength)); + if (lease is not null) ArrayPool.Shared.Return(lease); + return result; + } + } - private static unsafe int GetClusterSlot(in RedisKey key) + /// + /// Gets the hashslot for a given byte sequence. + /// + /// + /// HASH_SLOT = CRC16(key) mod 16384. + /// + internal static unsafe int GetClusterSlot(ReadOnlySpan key) { - //HASH_SLOT = CRC16(key) mod 16384 - if (key.IsNull) return NoSlot; unchecked { - var blob = (byte[])key; - fixed (byte* ptr = blob) + fixed (byte* ptr = key) { - fixed (ushort* crc16tab = s_crc16tab) + fixed (ushort* crc16tab = ServerSelectionStrategy.Crc16tab) { - int offset = 0, count = blob.Length, start, end; + int offset = 0, count = key.Length, start, end; if ((start = IndexOf(ptr, (byte)'{', 0, count - 1)) >= 0 && (end = IndexOf(ptr, (byte)'}', start + 1, count)) >= 0 && --end != start) @@ -95,36 +161,47 @@ private static unsafe int GetClusterSlot(in RedisKey key) } } - public ServerEndPoint Select(Message message) + public ServerEndPoint? Select(Message message, bool allowDisconnected = false) { - if (message == null) throw new ArgumentNullException(nameof(message)); int slot = NoSlot; switch (ServerType) { case ServerType.Cluster: - case ServerType.Twemproxy: // strictly speaking twemproxy uses a different hashing algo, but the hash-tag behavior is - // the same, so this does a pretty good job of spotting illegal commands before sending them - + // strictly speaking some proxies use a different hashing algorithm, but the hash-tag behavior is + // the same, so this does a pretty good job of spotting illegal commands before sending them + case ServerType.Twemproxy: slot = message.GetHashSlot(this); - if (slot == MultipleSlots) throw ExceptionFactory.MultiSlot(multiplexer.IncludeDetailInExceptions, message); + if (slot == MultipleSlots) throw ExceptionFactory.MultiSlot(multiplexer?.RawConfig?.IncludeDetailInExceptions ?? false, message); + break; + /* just shown for completeness + case ServerType.Standalone: // don't use sharding + case ServerType.Envoyproxy: // defer to the proxy; see #2426 + default: // unknown scenario; defer to the server break; + */ } - return Select(slot, message.Command, message.Flags); + return Select(slot, message.Command, message.Flags, allowDisconnected); } - public ServerEndPoint Select(RedisCommand command, in RedisKey key, CommandFlags flags) + public ServerEndPoint? Select(RedisCommand command, in RedisKey key, CommandFlags flags, bool allowDisconnected = false) { int slot = ServerType == ServerType.Cluster ? HashSlot(key) : NoSlot; - return Select(slot, command, flags); + return Select(slot, command, flags, allowDisconnected); } - public bool TryResend(int hashSlot, Message message, EndPoint endpoint, bool isMoved) + public ServerEndPoint? Select(RedisCommand command, in RedisChannel channel, CommandFlags flags, bool allowDisconnected = false) + { + int slot = ServerType == ServerType.Cluster ? HashSlot(channel) : NoSlot; + return Select(slot, command, flags, allowDisconnected); + } + + public bool TryResend(int hashSlot, Message message, EndPoint endpoint, bool isMoved, bool isSelf) { try { - if (ServerType == ServerType.Standalone || hashSlot < 0 || hashSlot >= RedisClusterSlotCount) return false; + if ((ServerType == ServerType.Standalone && !isSelf) || hashSlot < 0 || hashSlot >= RedisClusterSlotCount) return false; - ServerEndPoint server = multiplexer.GetServerEndPoint(endpoint); + ServerEndPoint? server = multiplexer?.GetServerEndPoint(endpoint); if (server != null) { bool retry = false; @@ -134,11 +211,11 @@ public bool TryResend(int hashSlot, Message message, EndPoint endpoint, bool isM message.SetNoRedirect(); // once is enough if (isMoved) message.SetInternalCall(); - // note that everything so far is talking about MASTER nodes; we might be - // wanting a REPLICA, so we'll check - ServerEndPoint resendVia = null; + // Note that everything so far is talking about PRIMARY nodes + // We might be wanting a REPLICA, so we'll check + ServerEndPoint? resendVia = null; var command = message.Command; - switch (Message.GetMasterReplicaFlags(message.Flags)) + switch (Message.GetPrimaryReplicaFlags(message.Flags)) { case CommandFlags.DemandMaster: resendVia = server.IsSelectable(command, isMoved) ? server : null; @@ -155,25 +232,25 @@ public bool TryResend(int hashSlot, Message message, EndPoint endpoint, bool isM } if (resendVia == null) { - multiplexer.Trace("Unable to resend to " + endpoint); + multiplexer?.Trace("Unable to resend to " + endpoint); } else { message.PrepareToResend(resendVia, isMoved); -#pragma warning disable CS0618 +#pragma warning disable CS0618 // Type or member is obsolete retry = resendVia.TryWriteSync(message) == WriteResult.Success; #pragma warning restore CS0618 } } - if (isMoved) // update map; note we can still update the map even if we aren't actually goint to resend + if (isMoved) // update map; note we can still update the map even if we aren't actually going to resend { var arr = MapForMutation(); var oldServer = arr[hashSlot]; arr[hashSlot] = server; if (oldServer != server) { - multiplexer.OnHashSlotMoved(hashSlot, oldServer?.EndPoint, endpoint); + multiplexer?.OnHashSlotMoved(hashSlot, oldServer?.EndPoint, endpoint); } } @@ -187,7 +264,7 @@ public bool TryResend(int hashSlot, Message message, EndPoint endpoint, bool isM } } - internal int CombineSlot(int oldSlot, int newSlot) + internal static int CombineSlot(int oldSlot, int newSlot) { if (oldSlot == MultipleSlots || newSlot == NoSlot) return oldSlot; if (oldSlot == NoSlot) return newSlot; @@ -229,24 +306,24 @@ private static unsafe int IndexOf(byte* ptr, byte value, int start, int end) return -1; } - private ServerEndPoint Any(RedisCommand command, CommandFlags flags) - { - return multiplexer.AnyConnected(ServerType, (uint)Interlocked.Increment(ref anyStartOffset), command, flags); - } + private ServerEndPoint? Any(RedisCommand command, CommandFlags flags, bool allowDisconnected) => + multiplexer?.AnyServer(ServerType, (uint)Interlocked.Increment(ref anyStartOffset), command, flags, allowDisconnected); - private ServerEndPoint FindMaster(ServerEndPoint endpoint, RedisCommand command) + private static ServerEndPoint? FindPrimary(ServerEndPoint endpoint, RedisCommand command) { + ServerEndPoint? cursor = endpoint; int max = 5; do { - if (!endpoint.IsReplica && endpoint.IsSelectable(command)) return endpoint; + if (!cursor.IsReplica && cursor.IsSelectable(command)) return cursor; - endpoint = endpoint.Master; - } while (endpoint != null && --max != 0); + cursor = cursor.Primary; + } + while (cursor != null && --max != 0); return null; } - private ServerEndPoint FindReplica(ServerEndPoint endpoint, RedisCommand command, bool allowDisconnected = false) + private static ServerEndPoint? FindReplica(ServerEndPoint endpoint, RedisCommand command, bool allowDisconnected = false) { if (endpoint.IsReplica && endpoint.IsSelectable(command, allowDisconnected)) return endpoint; @@ -275,36 +352,57 @@ private ServerEndPoint[] MapForMutation() return arr; } - private ServerEndPoint Select(int slot, RedisCommand command, CommandFlags flags) + internal ServerEndPoint? Select(int slot, RedisCommand command, CommandFlags flags, bool allowDisconnected) { - flags = Message.GetMasterReplicaFlags(flags); // only intersted in master/replica preferences + // Only interested in primary/replica preferences + flags = Message.GetPrimaryReplicaFlags(flags); - ServerEndPoint[] arr; - if (slot == NoSlot || (arr = map) == null) return Any(command, flags); + ServerEndPoint[]? arr; + if (slot == NoSlot || (arr = map) == null) return Any(command, flags, allowDisconnected); - ServerEndPoint endpoint = arr[slot], testing; - // but: ^^^ is the MASTER slots; if we want a replica, we need to do some thinking + ServerEndPoint endpoint = arr[slot]; + ServerEndPoint? testing; // but: ^^^ is the PRIMARY slots; if we want a replica, we need to do some thinking if (endpoint != null) { switch (flags) { case CommandFlags.DemandReplica: - return FindReplica(endpoint, command) ?? Any(command, flags); + return FindReplica(endpoint, command) ?? Any(command, flags, allowDisconnected); case CommandFlags.PreferReplica: testing = FindReplica(endpoint, command); - if (testing != null) return testing; + if (testing is not null) return testing; break; case CommandFlags.DemandMaster: - return FindMaster(endpoint, command) ?? Any(command, flags); + return FindPrimary(endpoint, command) ?? Any(command, flags, allowDisconnected); case CommandFlags.PreferMaster: - testing = FindMaster(endpoint, command); - if (testing != null) return testing; + testing = FindPrimary(endpoint, command); + if (testing is not null) return testing; break; } - if (endpoint.IsSelectable(command)) return endpoint; + if (endpoint.IsSelectable(command, allowDisconnected)) return endpoint; + } + return Any(command, flags, allowDisconnected); + } + + internal bool CanServeSlot(ServerEndPoint server, in RedisChannel channel) + => CanServeSlot(server, HashSlot(in channel)); + + internal bool CanServeSlot(ServerEndPoint server, int slot) + { + if (slot == NoSlot) return true; + var arr = map; + if (arr is null) return true; // means "any" + + var primary = arr[slot]; + if (server == primary) return true; + + var replicas = primary.Replicas; + for (int i = 0; i < replicas.Length; i++) + { + if (server == replicas[i]) return true; } - return Any(command, flags); + return false; } } } diff --git a/src/StackExchange.Redis/SkipLocalsInit.cs b/src/StackExchange.Redis/SkipLocalsInit.cs new file mode 100644 index 000000000..494a37a57 --- /dev/null +++ b/src/StackExchange.Redis/SkipLocalsInit.cs @@ -0,0 +1,14 @@ +// turn off ".locals init"; this gives a small perf boost, but is particularly relevant when stackalloc is used +// side-effects: locals don't have defined zero values; normally this doesn't matter, due to "definite assignment", +// but it *can* be observed when using unsafe code, any "out" method that cheats, or "stackalloc" - the last is +// the most relevant to us, so we have audited that no "stackalloc" use expects the buffers to be zero'd initially +[module:System.Runtime.CompilerServices.SkipLocalsInit] + +#if !NET +// when not available, we can spoof it in a private type +namespace System.Runtime.CompilerServices +{ + [AttributeUsage(AttributeTargets.Module | AttributeTargets.Class | AttributeTargets.Struct | AttributeTargets.Constructor | AttributeTargets.Method | AttributeTargets.Property | AttributeTargets.Event | AttributeTargets.Interface, Inherited = false)] + internal sealed class SkipLocalsInitAttribute : Attribute { } +} +#endif diff --git a/src/StackExchange.Redis/SocketManager.cs b/src/StackExchange.Redis/SocketManager.cs index f7ea567ea..146e576ff 100644 --- a/src/StackExchange.Redis/SocketManager.cs +++ b/src/StackExchange.Redis/SocketManager.cs @@ -15,19 +15,19 @@ namespace StackExchange.Redis public sealed partial class SocketManager : IDisposable { /// - /// Gets the name of this SocketManager instance + /// Gets the name of this SocketManager instance. /// public string Name { get; } /// - /// Creates a new instance + /// Creates a new instance. /// /// The name for this . public SocketManager(string name) : this(name, DEFAULT_WORKERS, SocketManagerOptions.None) { } /// - /// Creates a new instance + /// Creates a new instance. /// /// The name for this . /// Whether this should use high priority sockets. @@ -35,46 +35,48 @@ public SocketManager(string name, bool useHighPrioritySocketThreads) : this(name, DEFAULT_WORKERS, UseHighPrioritySocketThreads(useHighPrioritySocketThreads)) { } /// - /// Creates a new (optionally named) instance + /// Creates a new (optionally named) instance. /// /// The name for this . /// the number of dedicated workers for this . /// Whether this should use high priority sockets. public SocketManager(string name, int workerCount, bool useHighPrioritySocketThreads) - : this(name, workerCount, UseHighPrioritySocketThreads(useHighPrioritySocketThreads)) {} + : this(name, workerCount, UseHighPrioritySocketThreads(useHighPrioritySocketThreads)) { } private static SocketManagerOptions UseHighPrioritySocketThreads(bool value) => value ? SocketManagerOptions.UseHighPrioritySocketThreads : SocketManagerOptions.None; /// - /// Additional options for configuring the socket manager + /// Additional options for configuring the socket manager. /// [Flags] public enum SocketManagerOptions { /// - /// No additional options + /// No additional options. /// None = 0, + /// /// Whether the should use high priority sockets. /// UseHighPrioritySocketThreads = 1 << 0, + /// - /// Use the regular thread-pool for all scheduling + /// Use the regular thread-pool for all scheduling. /// UseThreadPool = 1 << 1, } /// - /// Creates a new (optionally named) instance + /// Creates a new (optionally named) instance. /// /// The name for this . - /// the number of dedicated workers for this . - /// - public SocketManager(string name = null, int workerCount = 0, SocketManagerOptions options = SocketManagerOptions.None) + /// The number of dedicated workers for this . + /// Options to use when creating the socket manager. + public SocketManager(string? name = null, int workerCount = 0, SocketManagerOptions options = SocketManagerOptions.None) { - if (string.IsNullOrWhiteSpace(name)) name = GetType().Name; + if (name.IsNullOrWhiteSpace()) name = GetType().Name; if (workerCount <= 0) workerCount = DEFAULT_WORKERS; Name = name; bool useHighPrioritySocketThreads = (options & SocketManagerOptions.UseHighPrioritySocketThreads) != 0, @@ -85,17 +87,18 @@ public SocketManager(string name = null, int workerCount = 0, SocketManagerOptio var defaultPipeOptions = PipeOptions.Default; - long Send_PauseWriterThreshold = Math.Max( - 512 * 1024,// send: let's give it up to 0.5MiB + long send_PauseWriterThreshold = Math.Max( + 512 * 1024, // send: let's give it up to 0.5MiB defaultPipeOptions.PauseWriterThreshold); // or the default, whichever is bigger - long Send_ResumeWriterThreshold = Math.Max( - Send_PauseWriterThreshold / 2, + long send_ResumeWriterThreshold = Math.Max( + send_PauseWriterThreshold / 2, defaultPipeOptions.ResumeWriterThreshold); Scheduler = PipeScheduler.ThreadPool; if (!useThreadPool) { - Scheduler = new DedicatedThreadPoolPipeScheduler(name + ":IO", + Scheduler = new DedicatedThreadPoolPipeScheduler( + name: name + ":IO", workerCount: workerCount, priority: useHighPrioritySocketThreads ? ThreadPriority.AboveNormal : ThreadPriority.Normal); } @@ -103,8 +106,8 @@ public SocketManager(string name = null, int workerCount = 0, SocketManagerOptio pool: defaultPipeOptions.Pool, readerScheduler: Scheduler, writerScheduler: Scheduler, - pauseWriterThreshold: Send_PauseWriterThreshold, - resumeWriterThreshold: Send_ResumeWriterThreshold, + pauseWriterThreshold: send_PauseWriterThreshold, + resumeWriterThreshold: send_ResumeWriterThreshold, minimumSegmentSize: Math.Max(defaultPipeOptions.MinimumSegmentSize, MINIMUM_SEGMENT_SIZE), useSynchronizationContext: false); ReceivePipeOptions = new PipeOptions( @@ -118,7 +121,7 @@ public SocketManager(string name = null, int workerCount = 0, SocketManagerOptio } /// - /// Default / shared socket manager using a dedicated thread-pool + /// Default / shared socket manager using a dedicated thread-pool. /// public static SocketManager Shared { @@ -139,7 +142,7 @@ public static SocketManager Shared } /// - /// Shared socket manager using the main thread-pool + /// Shared socket manager using the main thread-pool. /// public static SocketManager ThreadPool { @@ -159,7 +162,9 @@ public static SocketManager ThreadPool } } - /// Returns a string that represents the current object. + /// + /// Returns a string that represents the current object. + /// /// A string that represents the current object. public override string ToString() { @@ -168,7 +173,7 @@ public override string ToString() return $"{Name} - queue: {scheduler?.TotalServicedByQueue}, pool: {scheduler?.TotalServicedByPool}"; } - private static SocketManager s_shared, s_threadPool; + private static SocketManager? s_shared, s_threadPool; private const int DEFAULT_WORKERS = 5, MINIMUM_SEGMENT_SIZE = 8 * 1024; @@ -176,20 +181,25 @@ public override string ToString() internal PipeScheduler Scheduler { get; private set; } - internal DedicatedThreadPoolPipeScheduler SchedulerPool => Scheduler as DedicatedThreadPoolPipeScheduler; + internal DedicatedThreadPoolPipeScheduler? SchedulerPool => Scheduler as DedicatedThreadPoolPipeScheduler; private enum CallbackOperation { Read, - Error + Error, } /// - /// Releases all resources associated with this instance + /// Releases all resources associated with this instance. /// - public void Dispose() => Dispose(true); + public void Dispose() + { + DisposeRefs(); + GC.SuppressFinalize(this); + OnDispose(); + } - private void Dispose(bool disposing) + private void DisposeRefs() { // note: the scheduler *can't* be collected by itself - there will // be threads, and those threads will be rooting the DedicatedThreadPool; @@ -197,36 +207,29 @@ private void Dispose(bool disposing) var tmp = SchedulerPool; Scheduler = PipeScheduler.ThreadPool; try { tmp?.Dispose(); } catch { } - if (disposing) - { - GC.SuppressFinalize(this); - OnDispose(); - } } /// - /// Releases *appropriate* resources associated with this instance + /// Releases *appropriate* resources associated with this instance. /// - ~SocketManager() => Dispose(false); + ~SocketManager() => DisposeRefs(); internal static Socket CreateSocket(EndPoint endpoint) { var addressFamily = endpoint.AddressFamily; - if (addressFamily == AddressFamily.Unspecified && endpoint is DnsEndPoint) - { // default DNS to ipv4 if not specified explicitly - addressFamily = AddressFamily.InterNetwork; - } - var protocolType = addressFamily == AddressFamily.Unix ? ProtocolType.Unspecified : ProtocolType.Tcp; - var socket = new Socket(addressFamily, SocketType.Stream, protocolType); + + var socket = addressFamily == AddressFamily.Unspecified + ? new Socket(SocketType.Stream, protocolType) + : new Socket(addressFamily, SocketType.Stream, protocolType); SocketConnection.SetRecommendedClientOptions(socket); - //socket.SetSocketOption(SocketOptionLevel.Socket, SocketOptionName.Linger, false); + // socket.SetSocketOption(SocketOptionLevel.Socket, SocketOptionName.Linger, false); return socket; } partial void OnDispose(); - internal string GetState() + internal string? GetState() { var s = SchedulerPool; return s == null ? null : $"{s.AvailableCount} of {s.WorkerCount} available"; diff --git a/src/StackExchange.Redis/SortedSetEntry.cs b/src/StackExchange.Redis/SortedSetEntry.cs deleted file mode 100644 index 21151639b..000000000 --- a/src/StackExchange.Redis/SortedSetEntry.cs +++ /dev/null @@ -1,110 +0,0 @@ -using System; -using System.Collections.Generic; -using System.ComponentModel; - -namespace StackExchange.Redis -{ - /// - /// Describes a sorted-set element with the corresponding value - /// - public readonly struct SortedSetEntry : IEquatable, IComparable, IComparable - { - internal readonly RedisValue element; - internal readonly double score; - - /// - /// Initializes a value. - /// - /// The to get an entry for. - /// The redis score for . - public SortedSetEntry(RedisValue element, double score) - { - this.element = element; - this.score = score; - } - - /// - /// The unique element stored in the sorted set - /// - public RedisValue Element => element; - - /// - /// The score against the element - /// - public double Score => score; - - /// - /// The score against the element - /// - [Browsable(false)] - [EditorBrowsable(EditorBrowsableState.Never), Obsolete("Please use Score", false)] - public double Value { get { return score; } } - - /// - /// The unique element stored in the sorted set - /// - [Browsable(false)] - [EditorBrowsable(EditorBrowsableState.Never), Obsolete("Please use Element", false)] - public RedisValue Key { get { return element; } } - - /// - /// Converts to a key/value pair - /// - /// The to get a for. - public static implicit operator KeyValuePair(SortedSetEntry value) => new KeyValuePair(value.element, value.score); - - /// - /// Converts from a key/value pair - /// - /// The to get a for. - public static implicit operator SortedSetEntry(KeyValuePair value) => new SortedSetEntry(value.Key, value.Value); - - /// - /// See Object.ToString() - /// - public override string ToString() => element + ": " + score; - - /// - /// See Object.GetHashCode() - /// - public override int GetHashCode() => element.GetHashCode() ^ score.GetHashCode(); - - /// - /// Compares two values for equality - /// - /// The to compare to. - public override bool Equals(object obj) => obj is SortedSetEntry ssObj && Equals(ssObj); - - /// - /// Compares two values for equality - /// - /// The to compare to. - public bool Equals(SortedSetEntry other) => score == other.score && element == other.element; - - /// - /// Compares two values by score - /// - /// The to compare to. - public int CompareTo(SortedSetEntry other) => score.CompareTo(other.score); - - /// - /// Compares two values by score - /// - /// The to compare to. - public int CompareTo(object obj) => obj is SortedSetEntry ssObj ? CompareTo(ssObj) : -1; - - /// - /// Compares two values for equality - /// - /// The first to compare. - /// The second to compare. - public static bool operator ==(SortedSetEntry x, SortedSetEntry y) => x.score == y.score && x.element == y.element; - - /// - /// Compares two values for non-equality - /// - /// The first to compare. - /// The second to compare. - public static bool operator !=(SortedSetEntry x, SortedSetEntry y) => x.score != y.score || x.element != y.element; - } -} diff --git a/src/StackExchange.Redis/StackExchange.Redis.csproj b/src/StackExchange.Redis/StackExchange.Redis.csproj index a571adf1d..4bff8ce53 100644 --- a/src/StackExchange.Redis/StackExchange.Redis.csproj +++ b/src/StackExchange.Redis/StackExchange.Redis.csproj @@ -1,31 +1,62 @@  + enable - net461;netstandard2.0;net472;netcoreapp3.1;net5.0 + net461;netstandard2.0;net472;net6.0;net8.0;net10.0 High performance Redis client, incorporating both synchronous and asynchronous usage. StackExchange.Redis StackExchange.Redis StackExchange.Redis Async;Redis;Cache;PubSub;Messaging true - true $(DefineConstants);VECTOR_SAFE $(DefineConstants);UNIX_SOCKET + README.md - + + + - + + + + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/src/StackExchange.Redis/StreamConfiguration.cs b/src/StackExchange.Redis/StreamConfiguration.cs new file mode 100644 index 000000000..46e5d0ba3 --- /dev/null +++ b/src/StackExchange.Redis/StreamConfiguration.cs @@ -0,0 +1,21 @@ +using System.Diagnostics.CodeAnalysis; +using RESPite; + +namespace StackExchange.Redis; + +/// +/// Configuration parameters for a stream, for example idempotent producer (IDMP) duration and maxsize. +/// +[Experimental(Experiments.Server_8_6, UrlFormat = Experiments.UrlFormat)] +public sealed class StreamConfiguration +{ + /// + /// How long the server remembers each iid, in seconds. + /// + public long? IdmpDuration { get; set; } + + /// + /// Maximum number of iids the server remembers per pid. + /// + public long? IdmpMaxSize { get; set; } +} diff --git a/src/StackExchange.Redis/StreamConstants.cs b/src/StackExchange.Redis/StreamConstants.cs index 23addbf3f..92c37222a 100644 --- a/src/StackExchange.Redis/StreamConstants.cs +++ b/src/StackExchange.Redis/StreamConstants.cs @@ -1,4 +1,5 @@ - +using System; + namespace StackExchange.Redis { /// @@ -51,8 +52,6 @@ internal static class StreamConstants internal static readonly RedisValue Destroy = "DESTROY"; - internal static readonly RedisValue Group = "GROUP"; - internal static readonly RedisValue Groups = "GROUPS"; internal static readonly RedisValue JustId = "JUSTID"; @@ -60,13 +59,22 @@ internal static class StreamConstants internal static readonly RedisValue SetId = "SETID"; internal static readonly RedisValue MaxLen = "MAXLEN"; + internal static readonly RedisValue MinId = "MINID"; internal static readonly RedisValue MkStream = "MKSTREAM"; - - internal static readonly RedisValue NoAck = "NOACK"; internal static readonly RedisValue Stream = "STREAM"; - internal static readonly RedisValue Streams = "STREAMS"; + private static readonly RedisValue KeepRef = "KEEPREF", DelRef = "DELREF", Acked = "ACKED"; + + internal static readonly RedisValue Ids = "IDS"; + + internal static RedisValue GetMode(StreamTrimMode mode) => mode switch + { + StreamTrimMode.KeepReferences => KeepRef, + StreamTrimMode.DeleteReferences => DelRef, + StreamTrimMode.Acknowledged => Acked, + _ => throw new ArgumentOutOfRangeException(nameof(mode)), + }; } } diff --git a/src/StackExchange.Redis/StreamConsumer.cs b/src/StackExchange.Redis/StreamConsumer.cs deleted file mode 100644 index f92933180..000000000 --- a/src/StackExchange.Redis/StreamConsumer.cs +++ /dev/null @@ -1,25 +0,0 @@ - -namespace StackExchange.Redis -{ - /// - /// Describes a consumer off a Redis Stream. - /// - public readonly struct StreamConsumer - { - internal StreamConsumer(RedisValue name, int pendingMessageCount) - { - Name = name; - PendingMessageCount = pendingMessageCount; - } - - /// - /// The name of the consumer. - /// - public RedisValue Name { get; } - - /// - /// The number of messages that have been delivered by not yet acknowledged by the consumer. - /// - public int PendingMessageCount { get; } - } -} diff --git a/src/StackExchange.Redis/StreamConsumerInfo.cs b/src/StackExchange.Redis/StreamConsumerInfo.cs deleted file mode 100644 index 49b50ef7f..000000000 --- a/src/StackExchange.Redis/StreamConsumerInfo.cs +++ /dev/null @@ -1,32 +0,0 @@ - -namespace StackExchange.Redis -{ - /// - /// Describes a consumer within a consumer group, retrieved using the XINFO CONSUMERS command. - /// - public readonly struct StreamConsumerInfo - { - internal StreamConsumerInfo(string name, int pendingMessageCount, long idleTimeInMilliseconds) - { - Name = name; - PendingMessageCount = pendingMessageCount; - IdleTimeInMilliseconds = idleTimeInMilliseconds; - } - - /// - /// The name of the consumer. - /// - public string Name { get; } - - /// - /// The number of pending messages for the consumer. A pending message is one that has been - /// received by the consumer but not yet acknowledged. - /// - public int PendingMessageCount { get; } - - /// - /// The idle time, if any, for the consumer. - /// - public long IdleTimeInMilliseconds { get; } - } -} diff --git a/src/StackExchange.Redis/StreamEntry.cs b/src/StackExchange.Redis/StreamEntry.cs deleted file mode 100644 index 5611af808..000000000 --- a/src/StackExchange.Redis/StreamEntry.cs +++ /dev/null @@ -1,56 +0,0 @@ -using System; - -namespace StackExchange.Redis -{ - /// - /// Describes an entry contained in a Redis Stream. - /// - public readonly struct StreamEntry - { - internal StreamEntry(RedisValue id, NameValueEntry[] values) - { - Id = id; - Values = values; - } - - /// - /// A null stream entry. - /// - public static StreamEntry Null { get; } = new StreamEntry(RedisValue.Null, null); - - /// - /// The ID assigned to the message. - /// - public RedisValue Id { get; } - - /// - /// The values contained within the message. - /// - public NameValueEntry[] Values { get; } - - /// - /// Search for a specific field by name, returning the value - /// - public RedisValue this[RedisValue fieldName] - { - get - { - var values = Values; - if (values != null) - { - for (int i = 0; i < values.Length; i++) - { - if (values[i].name == fieldName) - return values[i].value; - } - } - return RedisValue.Null; - } - } - - /// - /// Indicates that the Redis Stream Entry is null. - /// - public bool IsNull => Id == RedisValue.Null && Values == null; - } -} diff --git a/src/StackExchange.Redis/StreamGroupInfo.cs b/src/StackExchange.Redis/StreamGroupInfo.cs deleted file mode 100644 index 93a35cceb..000000000 --- a/src/StackExchange.Redis/StreamGroupInfo.cs +++ /dev/null @@ -1,38 +0,0 @@ - -namespace StackExchange.Redis -{ - /// - /// Describes a consumer group retrieved using the XINFO GROUPS command. - /// - public readonly struct StreamGroupInfo - { - internal StreamGroupInfo(string name, int consumerCount, int pendingMessageCount, string lastDeliveredId) - { - Name = name; - ConsumerCount = consumerCount; - PendingMessageCount = pendingMessageCount; - LastDeliveredId = lastDeliveredId; - } - - /// - /// The name of the consumer group. - /// - public string Name { get; } - - /// - /// The number of consumers within the consumer group. - /// - public int ConsumerCount { get; } - - /// - /// The total number of pending messages for the consumer group. A pending message is one that has been - /// received by a consumer but not yet acknowledged. - /// - public int PendingMessageCount { get; } - - /// - /// The Id of the last message delivered to the group - /// - public string LastDeliveredId { get; } - } -} diff --git a/src/StackExchange.Redis/StreamIdempotentId.cs b/src/StackExchange.Redis/StreamIdempotentId.cs new file mode 100644 index 000000000..1ad331eda --- /dev/null +++ b/src/StackExchange.Redis/StreamIdempotentId.cs @@ -0,0 +1,83 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using RESPite; + +namespace StackExchange.Redis; + +/// +/// The idempotent id for a stream entry, ensuring at-most-once production. Each producer should have a unique +/// that is stable and consistent between runs. When adding stream entries, the +/// caller can specify an that is unique and repeatable for a given data item, or omit it +/// and let the server generate it from the content of the data item. In either event: duplicates are rejected. +/// +[Experimental(Experiments.Server_8_6, UrlFormat = Experiments.UrlFormat)] +public readonly struct StreamIdempotentId +{ + // note: if exposing wider, maybe expose as a by-ref property rather than a readonly field + internal static readonly StreamIdempotentId Empty = default; + + /// + /// Create a new with the given producer id. + /// + public StreamIdempotentId(RedisValue producerId) + { + if (producerId.IsNull) throw new ArgumentNullException(nameof(producerId)); + ProducerId = producerId; + IdempotentId = RedisValue.Null; + } + + /// + /// The idempotent id for a stream entry, ensuring at-most-once production. + /// + public StreamIdempotentId(RedisValue producerId, RedisValue idempotentId) + { + if (!producerId.HasValue) throw new ArgumentNullException(nameof(producerId)); + ProducerId = producerId; + IdempotentId = idempotentId; // can be explicit null, fine + } + + /// + /// The producer of the idempotent id; this is fixed for a given data generator. + /// + public RedisValue ProducerId { get; } + + /// + /// The optional idempotent id; this should be unique for a given data item. If omitted / null, + /// the server will generate the idempotent id from the content of the data item. + /// + public RedisValue IdempotentId { get; } + + /// + public override string ToString() + { + if (IdempotentId.HasValue) return $"IDMP {ProducerId} {IdempotentId}"; + if (ProducerId.HasValue) return $"IDMPAUTO {ProducerId}"; + return ""; + } + + internal int ArgCount => IdempotentId.HasValue ? 3 : ProducerId.HasValue ? 2 : 0; + + internal void WriteTo(RedisValue[] args, ref int index) + { + if (IdempotentId.HasValue) + { + args[index++] = RedisLiterals.IDMP; + args[index++] = ProducerId; + args[index++] = IdempotentId; + } + else if (ProducerId.HasValue) + { + args[index++] = RedisLiterals.IDMPAUTO; + args[index++] = ProducerId; + } + } + + /// + public override int GetHashCode() => ProducerId.GetHashCode() ^ IdempotentId.GetHashCode(); + + /// + public override bool Equals(object? obj) => + obj is StreamIdempotentId other + && ProducerId == other.ProducerId + && IdempotentId == other.IdempotentId; +} diff --git a/src/StackExchange.Redis/StreamInfo.cs b/src/StackExchange.Redis/StreamInfo.cs deleted file mode 100644 index 318224279..000000000 --- a/src/StackExchange.Redis/StreamInfo.cs +++ /dev/null @@ -1,62 +0,0 @@ - -namespace StackExchange.Redis -{ - /// - /// Describes stream information retrieved using the XINFO STREAM command. - /// - public readonly struct StreamInfo - { - internal StreamInfo( - int length, - int radixTreeKeys, - int radixTreeNodes, - int groups, - StreamEntry firstEntry, - StreamEntry lastEntry, - RedisValue lastGeneratedId) - { - Length = length; - RadixTreeKeys = radixTreeKeys; - RadixTreeNodes = radixTreeNodes; - ConsumerGroupCount = groups; - FirstEntry = firstEntry; - LastEntry = lastEntry; - LastGeneratedId = lastGeneratedId; - } - - /// - /// The number of entries in the stream. - /// - public int Length { get; } - - /// - /// The number of radix tree keys in the stream. - /// - public int RadixTreeKeys { get; } - - /// - /// The number of radix tree nodes in the stream. - /// - public int RadixTreeNodes { get; } - - /// - /// The number of consumers groups in the stream. - /// - public int ConsumerGroupCount { get; } - - /// - /// The first entry in the stream. - /// - public StreamEntry FirstEntry { get; } - - /// - /// The last entry in the stream. - /// - public StreamEntry LastEntry { get; } - - /// - /// The last generated id - /// - public RedisValue LastGeneratedId { get; } - } -} diff --git a/src/StackExchange.Redis/StreamInfoField.cs b/src/StackExchange.Redis/StreamInfoField.cs new file mode 100644 index 000000000..5429dec5e --- /dev/null +++ b/src/StackExchange.Redis/StreamInfoField.cs @@ -0,0 +1,121 @@ +using System; +using RESPite; + +namespace StackExchange.Redis; + +/// +/// Fields that can appear in a XINFO STREAM response. +/// +internal enum StreamInfoField +{ + /// + /// Unknown or unrecognized field. + /// + [AsciiHash("")] + Unknown = 0, + + /// + /// The number of entries in the stream. + /// + [AsciiHash("length")] + Length, + + /// + /// The number of radix tree keys. + /// + [AsciiHash("radix-tree-keys")] + RadixTreeKeys, + + /// + /// The number of radix tree nodes. + /// + [AsciiHash("radix-tree-nodes")] + RadixTreeNodes, + + /// + /// The number of consumer groups. + /// + [AsciiHash("groups")] + Groups, + + /// + /// The last generated ID. + /// + [AsciiHash("last-generated-id")] + LastGeneratedId, + + /// + /// The first entry in the stream. + /// + [AsciiHash("first-entry")] + FirstEntry, + + /// + /// The last entry in the stream. + /// + [AsciiHash("last-entry")] + LastEntry, + + /// + /// The maximum deleted entry ID (Redis 7.0+). + /// + [AsciiHash("max-deleted-entry-id")] + MaxDeletedEntryId, + + /// + /// The recorded first entry ID (Redis 7.0+). + /// + [AsciiHash("recorded-first-entry-id")] + RecordedFirstEntryId, + + /// + /// The total number of entries added (Redis 7.0+). + /// + [AsciiHash("entries-added")] + EntriesAdded, + + /// + /// IDMP duration in seconds (Redis 8.6+). + /// + [AsciiHash("idmp-duration")] + IdmpDuration, + + /// + /// IDMP max size (Redis 8.6+). + /// + [AsciiHash("idmp-maxsize")] + IdmpMaxsize, + + /// + /// Number of PIDs tracked (Redis 8.6+). + /// + [AsciiHash("pids-tracked")] + PidsTracked, + + /// + /// Number of IIDs tracked (Redis 8.6+). + /// + [AsciiHash("iids-tracked")] + IidsTracked, + + /// + /// Number of IIDs added (Redis 8.6+). + /// + [AsciiHash("iids-added")] + IidsAdded, + + /// + /// Number of duplicate IIDs (Redis 8.6+). + /// + [AsciiHash("iids-duplicates")] + IidsDuplicates, +} + +/// +/// Metadata and parsing methods for StreamInfoField. +/// +internal static partial class StreamInfoFieldMetadata +{ + [AsciiHash] + internal static partial bool TryParse(ReadOnlySpan value, out StreamInfoField field); +} diff --git a/src/StackExchange.Redis/StreamPendingInfo.cs b/src/StackExchange.Redis/StreamPendingInfo.cs deleted file mode 100644 index d9393c88e..000000000 --- a/src/StackExchange.Redis/StreamPendingInfo.cs +++ /dev/null @@ -1,40 +0,0 @@ - -namespace StackExchange.Redis -{ - /// - /// Describes basic information about pending messages for a consumer group. - /// - public readonly struct StreamPendingInfo - { - internal StreamPendingInfo(int pendingMessageCount, - RedisValue lowestId, - RedisValue highestId, - StreamConsumer[] consumers) - { - PendingMessageCount = pendingMessageCount; - LowestPendingMessageId = lowestId; - HighestPendingMessageId = highestId; - Consumers = consumers; - } - - /// - /// The number of pending messages. A pending message is a message that has been consumed but not yet acknowledged. - /// - public int PendingMessageCount { get; } - - /// - /// The lowest message ID in the set of pending messages. - /// - public RedisValue LowestPendingMessageId { get; } - - /// - /// The highest message ID in the set of pending messages. - /// - public RedisValue HighestPendingMessageId { get; } - - /// - /// An array of consumers within the consumer group that have pending messages. - /// - public StreamConsumer[] Consumers { get; } - } -} diff --git a/src/StackExchange.Redis/StreamPendingMessageInfo.cs b/src/StackExchange.Redis/StreamPendingMessageInfo.cs deleted file mode 100644 index b1c87f296..000000000 --- a/src/StackExchange.Redis/StreamPendingMessageInfo.cs +++ /dev/null @@ -1,41 +0,0 @@ - -namespace StackExchange.Redis -{ - /// - /// Describes properties of a pending message. A pending message is one that has - /// been received by a consumer but has not yet been acknowledged. - /// - public readonly struct StreamPendingMessageInfo - { - internal StreamPendingMessageInfo(RedisValue messageId, - RedisValue consumerName, - long idleTimeInMs, - int deliveryCount) - { - MessageId = messageId; - ConsumerName = consumerName; - IdleTimeInMilliseconds = idleTimeInMs; - DeliveryCount = deliveryCount; - } - - /// - /// The ID of the pending message. - /// - public RedisValue MessageId { get; } - - /// - /// The consumer that received the pending message. - /// - public RedisValue ConsumerName { get; } - - /// - /// The time that has passed since the message was last delivered to a consumer. - /// - public long IdleTimeInMilliseconds { get; } - - /// - /// The number of times the message has been delivered to a consumer. - /// - public int DeliveryCount { get; } - } -} diff --git a/src/StackExchange.Redis/StreamPosition.cs b/src/StackExchange.Redis/StreamPosition.cs deleted file mode 100644 index 53b954226..000000000 --- a/src/StackExchange.Redis/StreamPosition.cs +++ /dev/null @@ -1,66 +0,0 @@ -using System; - -namespace StackExchange.Redis -{ - /// - /// Describes a pair consisting of the Stream Key and the from which to begin reading a stream. - /// - public struct StreamPosition - { - /// - /// Read from the beginning of a stream. - /// - public static RedisValue Beginning => StreamConstants.ReadMinValue; - - /// - /// Read new messages. - /// - public static RedisValue NewMessages => StreamConstants.NewMessages; - - /// - /// Initializes a value. - /// - /// The key for the stream. - /// The position from which to begin reading the stream. - public StreamPosition(RedisKey key, RedisValue position) - { - Key = key; - Position = position; - } - - /// - /// The stream key. - /// - public RedisKey Key { get; } - - /// - /// The offset at which to begin reading the stream. - /// - public RedisValue Position { get; } - - internal static RedisValue Resolve(RedisValue value, RedisCommand command) - { - if (value == NewMessages) - { - switch (command) - { - case RedisCommand.XREAD: throw new InvalidOperationException("StreamPosition.NewMessages cannot be used with StreamRead."); - case RedisCommand.XREADGROUP: return StreamConstants.UndeliveredMessages; - case RedisCommand.XGROUP: return StreamConstants.NewMessages; - default: // new is only valid for the above - throw new ArgumentException($"Unsupported command in StreamPosition.Resolve: {command}.", nameof(command)); - } - } else if (value == StreamPosition.Beginning) - { - switch(command) - { - case RedisCommand.XREAD: - case RedisCommand.XREADGROUP: - case RedisCommand.XGROUP: - return StreamConstants.AllMessages; - } - } - return value; - } - } -} diff --git a/src/StackExchange.Redis/Subscription.cs b/src/StackExchange.Redis/Subscription.cs new file mode 100644 index 000000000..99f3d00cb --- /dev/null +++ b/src/StackExchange.Redis/Subscription.cs @@ -0,0 +1,520 @@ +using System; +using System.Buffers; +using System.Collections.Concurrent; +using System.Net; +using System.Threading; +using System.Threading.Tasks; +using Pipelines.Sockets.Unofficial; + +namespace StackExchange.Redis; + +public partial class ConnectionMultiplexer +{ + /// + /// This is the record of a single subscription to a redis server. + /// It's the singular channel (which may or may not be a pattern), to one or more handlers. + /// We subscriber to a redis server once (for all messages) and execute 1-many handlers when a message arrives. + /// + internal abstract class Subscription + { + private Action? _handlers; + private readonly object _handlersLock = new(); + private ChannelMessageQueue? _queues; + public CommandFlags Flags { get; } + public ResultProcessor.TrackSubscriptionsProcessor Processor { get; } + + internal abstract bool IsConnectedAny(); + internal abstract bool IsConnectedTo(EndPoint endpoint); + + internal abstract void AddEndpoint(ServerEndPoint server); + + // conditional clear + internal abstract bool TryRemoveEndpoint(ServerEndPoint expected); + + internal abstract void RemoveDisconnectedEndpoints(); + + // returns the number of changes required + internal abstract int EnsureSubscribedToServer( + RedisSubscriber subscriber, + in RedisChannel channel, + CommandFlags flags, + bool internalCall); + + // returns the number of changes required + internal abstract Task EnsureSubscribedToServerAsync( + RedisSubscriber subscriber, + RedisChannel channel, + CommandFlags flags, + bool internalCall, + ServerEndPoint? server = null); + + internal abstract bool UnsubscribeFromServer( + RedisSubscriber subscriber, + in RedisChannel channel, + CommandFlags flags, + bool internalCall); + + internal abstract Task UnsubscribeFromServerAsync( + RedisSubscriber subscriber, + RedisChannel channel, + CommandFlags flags, + object? asyncState, + bool internalCall); + + internal abstract int GetConnectionCount(); + + internal abstract ServerEndPoint? GetAnyCurrentServer(); + + public Subscription(CommandFlags flags) + { + Flags = flags; + Processor = new ResultProcessor.TrackSubscriptionsProcessor(this); + } + + /// + /// Gets the configured (P)SUBSCRIBE or (P)UNSUBSCRIBE for an action. + /// + internal Message GetSubscriptionMessage( + in RedisChannel channel, + SubscriptionAction action, + CommandFlags flags, + bool internalCall) + { + const RedisChannel.RedisChannelOptions OPTIONS_MASK = ~( + RedisChannel.RedisChannelOptions.KeyRouted | RedisChannel.RedisChannelOptions.IgnoreChannelPrefix); + var command = + action switch // note that the Routed flag doesn't impact the message here - just the routing + { + SubscriptionAction.Subscribe => (channel.Options & OPTIONS_MASK) switch + { + RedisChannel.RedisChannelOptions.None => RedisCommand.SUBSCRIBE, + RedisChannel.RedisChannelOptions.MultiNode => RedisCommand.SUBSCRIBE, + RedisChannel.RedisChannelOptions.Pattern => RedisCommand.PSUBSCRIBE, + RedisChannel.RedisChannelOptions.Pattern | RedisChannel.RedisChannelOptions.MultiNode => + RedisCommand.PSUBSCRIBE, + RedisChannel.RedisChannelOptions.Sharded => RedisCommand.SSUBSCRIBE, + _ => Unknown(action, channel.Options), + }, + SubscriptionAction.Unsubscribe => (channel.Options & OPTIONS_MASK) switch + { + RedisChannel.RedisChannelOptions.None => RedisCommand.UNSUBSCRIBE, + RedisChannel.RedisChannelOptions.MultiNode => RedisCommand.UNSUBSCRIBE, + RedisChannel.RedisChannelOptions.Pattern => RedisCommand.PUNSUBSCRIBE, + RedisChannel.RedisChannelOptions.Pattern | RedisChannel.RedisChannelOptions.MultiNode => + RedisCommand.PUNSUBSCRIBE, + RedisChannel.RedisChannelOptions.Sharded => RedisCommand.SUNSUBSCRIBE, + _ => Unknown(action, channel.Options), + }, + _ => Unknown(action, channel.Options), + }; + + // TODO: Consider flags here - we need to pass Fire and Forget, but don't want to intermingle Primary/Replica + var msg = Message.Create(-1, Flags | flags, command, channel); + msg.SetForSubscriptionBridge(); + if (internalCall) + { + msg.SetInternalCall(); + } + + return msg; + } + + private RedisCommand Unknown(SubscriptionAction action, RedisChannel.RedisChannelOptions options) + => throw new ArgumentException( + $"Unable to determine pub/sub operation for '{action}' against '{options}'"); + + public void Add(Action? handler, ChannelMessageQueue? queue) + { + if (handler != null) + { + lock (_handlersLock) + { + _handlers += handler; + } + } + + if (queue != null) + { + ChannelMessageQueue.Combine(ref _queues, queue); + } + } + + public bool Remove(Action? handler, ChannelMessageQueue? queue) + { + if (handler != null) + { + lock (_handlersLock) + { + _handlers -= handler; + } + } + + if (queue != null) + { + ChannelMessageQueue.Remove(ref _queues, queue); + } + + return _handlers == null & _queues == null; + } + + public ICompletable? ForInvoke(in RedisChannel channel, in RedisValue message, out ChannelMessageQueue? queues) + { + var handlers = _handlers; + queues = Volatile.Read(ref _queues); + return handlers == null ? null : new MessageCompletable(channel, message, handlers); + } + + internal void MarkCompleted() + { + lock (_handlersLock) + { + _handlers = null; + } + + ChannelMessageQueue.MarkAllCompleted(ref _queues); + } + + internal void GetSubscriberCounts(out int handlers, out int queues) + { + queues = ChannelMessageQueue.Count(ref _queues); + var tmp = _handlers; + if (tmp == null) + { + handlers = 0; + } + else if (tmp.IsSingle()) + { + handlers = 1; + } + else + { + handlers = 0; + foreach (var sub in tmp.AsEnumerable()) { handlers++; } + } + } + } + + // used for most subscriptions; routed to a single node + internal sealed class SingleNodeSubscription(CommandFlags flags) : Subscription(flags) + { + internal override bool IsConnectedAny() => _currentServer is { IsSubscriberConnected: true }; + + internal override int GetConnectionCount() => IsConnectedAny() ? 1 : 0; + + internal override bool IsConnectedTo(EndPoint endpoint) + { + var server = _currentServer; + return server is { IsSubscriberConnected: true } && server.EndPoint == endpoint; + } + + internal override void AddEndpoint(ServerEndPoint server) => _currentServer = server; + + internal override bool TryRemoveEndpoint(ServerEndPoint expected) + { + if (_currentServer == expected) + { + _currentServer = null; + return true; + } + + return false; + } + + internal override bool UnsubscribeFromServer( + RedisSubscriber subscriber, + in RedisChannel channel, + CommandFlags flags, + bool internalCall) + { + var server = _currentServer; + if (server is not null) + { + var message = GetSubscriptionMessage(channel, SubscriptionAction.Unsubscribe, flags, internalCall); + return subscriber.multiplexer.ExecuteSyncImpl(message, Processor, server); + } + + return true; + } + + internal override Task UnsubscribeFromServerAsync( + RedisSubscriber subscriber, + RedisChannel channel, + CommandFlags flags, + object? asyncState, + bool internalCall) + { + var server = _currentServer; + if (server is not null) + { + var message = GetSubscriptionMessage(channel, SubscriptionAction.Unsubscribe, flags, internalCall); + return subscriber.multiplexer.ExecuteAsyncImpl(message, Processor, asyncState, server); + } + + return CompletedTask.FromResult(true, asyncState); + } + + private ServerEndPoint? _currentServer; + internal ServerEndPoint? GetCurrentServer() => Volatile.Read(ref _currentServer); + + internal override ServerEndPoint? GetAnyCurrentServer() => Volatile.Read(ref _currentServer); + + /// + /// Evaluates state and if we're not currently connected, clears the server reference. + /// + internal override void RemoveDisconnectedEndpoints() + { + var server = _currentServer; + if (server is { IsSubscriberConnected: false }) + { + _currentServer = null; + } + } + + internal override int EnsureSubscribedToServer( + RedisSubscriber subscriber, + in RedisChannel channel, + CommandFlags flags, + bool internalCall) + { + RemoveIncorrectRouting(subscriber, in channel, flags, internalCall); + if (IsConnectedAny()) return 0; + + // we're not appropriately connected, so blank it out for eligible reconnection + _currentServer = null; + var message = GetSubscriptionMessage(channel, SubscriptionAction.Subscribe, flags, internalCall); + var selected = subscriber.multiplexer.SelectServer(message); + _ = subscriber.ExecuteSync(message, Processor, selected); + return 1; + } + + private void RemoveIncorrectRouting(RedisSubscriber subscriber, in RedisChannel channel, CommandFlags flags, bool internalCall) + { + // only applies to cluster, when using key-routed channels (sharded, explicit key-routed, or + // a single-key keyspace notification); is the subscribed server still handling that channel? + if (channel.IsKeyRouted && _currentServer is { ServerType: ServerType.Cluster } current) + { + // if we consider replicas, there can be multiple valid target servers; we can't ask + // "is this the correct server?", but we can ask "is it suitable?", based on the slot + if (!subscriber.multiplexer.ServerSelectionStrategy.CanServeSlot(_currentServer, channel)) + { + var message = GetSubscriptionMessage(channel, SubscriptionAction.Unsubscribe, flags | CommandFlags.FireAndForget, internalCall); + subscriber.multiplexer.ExecuteSyncImpl(message, Processor, current); + _currentServer = null; // pre-emptively disconnect - F+F + } + } + } + + internal override async Task EnsureSubscribedToServerAsync( + RedisSubscriber subscriber, + RedisChannel channel, + CommandFlags flags, + bool internalCall, + ServerEndPoint? server = null) + { + RemoveIncorrectRouting(subscriber, in channel, flags, internalCall); + if (IsConnectedAny()) return 0; + + // we're not appropriately connected, so blank it out for eligible reconnection + _currentServer = null; + var message = GetSubscriptionMessage(channel, SubscriptionAction.Subscribe, flags, internalCall); + server ??= subscriber.multiplexer.SelectServer(message); + await subscriber.ExecuteAsync(message, Processor, server).ForAwait(); + return 1; + } + } + + // used for keyspace subscriptions, which are routed to multiple nodes + internal sealed class MultiNodeSubscription(CommandFlags flags) : Subscription(flags) + { + private readonly ConcurrentDictionary _servers = new(); + + internal override bool IsConnectedAny() + { + foreach (var server in _servers) + { + if (server.Value is { IsSubscriberConnected: true }) return true; + } + + return false; + } + + internal override int GetConnectionCount() + { + int count = 0; + foreach (var server in _servers) + { + if (server.Value is { IsSubscriberConnected: true }) count++; + } + + return count; + } + + internal override bool IsConnectedTo(EndPoint endpoint) + => _servers.TryGetValue(endpoint, out var server) + && server.IsSubscriberConnected; + + internal override void AddEndpoint(ServerEndPoint server) + { + var ep = server.EndPoint; + if (!_servers.TryAdd(ep, server)) + { + _servers[ep] = server; + } + } + + internal override bool TryRemoveEndpoint(ServerEndPoint expected) + { + return _servers.TryRemove(expected.EndPoint, out _); + } + + internal override ServerEndPoint? GetAnyCurrentServer() + { + ServerEndPoint? last = null; + // prefer actively connected servers, but settle for anything + foreach (var server in _servers) + { + last = server.Value; + if (last is { IsSubscriberConnected: true }) + { + break; + } + } + + return last; + } + + internal override void RemoveDisconnectedEndpoints() + { + // This looks more complicated than it is, because of avoiding mutating the collection + // while iterating; instead, buffer any removals in a scratch buffer, and remove them in a second pass. + EndPoint[] scratch = []; + int count = 0; + foreach (var server in _servers) + { + if (server.Value.IsSubscriberConnected) + { + // flag for removal + if (scratch.Length == count) // need to resize the scratch buffer, using the pool + { + // let the array pool worry about min-sizing etc + var newLease = ArrayPool.Shared.Rent(count + 1); + scratch.CopyTo(newLease, 0); + ArrayPool.Shared.Return(scratch); + scratch = newLease; + } + + scratch[count++] = server.Key; + } + } + + // did we find anything to remove? + if (count != 0) + { + foreach (var ep in scratch.AsSpan(0, count)) + { + _servers.TryRemove(ep, out _); + } + } + + ArrayPool.Shared.Return(scratch); + } + + internal override int EnsureSubscribedToServer( + RedisSubscriber subscriber, + in RedisChannel channel, + CommandFlags flags, + bool internalCall) + { + int delta = 0; + var muxer = subscriber.multiplexer; + foreach (var server in muxer.GetServerSnapshot()) + { + var change = GetSubscriptionChange(server, flags); + if (change is not null) + { + // make it so + var message = GetSubscriptionMessage(channel, change.GetValueOrDefault(), flags, internalCall); + subscriber.ExecuteSync(message, Processor, server); + delta++; + } + } + + return delta; + } + + private SubscriptionAction? GetSubscriptionChange(ServerEndPoint server, CommandFlags flags) + { + // exclude sentinel, and only use replicas if we're explicitly asking for them + bool useReplica = (Flags & CommandFlags.DemandReplica) != 0; + bool shouldBeConnected = server.ServerType != ServerType.Sentinel & server.IsReplica == useReplica; + if (shouldBeConnected == IsConnectedTo(server.EndPoint)) + { + return null; + } + return shouldBeConnected ? SubscriptionAction.Subscribe : SubscriptionAction.Unsubscribe; + } + + internal override async Task EnsureSubscribedToServerAsync( + RedisSubscriber subscriber, + RedisChannel channel, + CommandFlags flags, + bool internalCall, + ServerEndPoint? server = null) + { + int delta = 0; + var muxer = subscriber.multiplexer; + var snapshot = muxer.GetServerSnaphotMemory(); + var len = snapshot.Length; + for (int i = 0; i < len; i++) + { + var loopServer = snapshot.Span[i]; // spans and async do not mix well + if (server is null || server == loopServer) // either "all" or "just the one we passed in" + { + var change = GetSubscriptionChange(loopServer, flags); + if (change is not null) + { + // make it so + var message = GetSubscriptionMessage(channel, change.GetValueOrDefault(), flags, internalCall); + await subscriber.ExecuteAsync(message, Processor, loopServer).ForAwait(); + delta++; + } + } + } + + return delta; + } + + internal override bool UnsubscribeFromServer( + RedisSubscriber subscriber, + in RedisChannel channel, + CommandFlags flags, + bool internalCall) + { + bool any = false; + foreach (var server in _servers) + { + var message = GetSubscriptionMessage(channel, SubscriptionAction.Unsubscribe, flags, internalCall); + any |= subscriber.ExecuteSync(message, Processor, server.Value); + } + + return any; + } + + internal override async Task UnsubscribeFromServerAsync( + RedisSubscriber subscriber, + RedisChannel channel, + CommandFlags flags, + object? asyncState, + bool internalCall) + { + bool any = false; + foreach (var server in _servers) + { + var message = GetSubscriptionMessage(channel, SubscriptionAction.Unsubscribe, flags, internalCall); + any |= await subscriber.ExecuteAsync(message, Processor, server.Value).ForAwait(); + } + + return any; + } + } +} diff --git a/src/StackExchange.Redis/TaskExtensions.cs b/src/StackExchange.Redis/TaskExtensions.cs index 208e10e5e..5b5684da1 100644 --- a/src/StackExchange.Redis/TaskExtensions.cs +++ b/src/StackExchange.Redis/TaskExtensions.cs @@ -13,33 +13,73 @@ private static void ObverveErrors(this Task task) if (task != null) GC.KeepAlive(task.Exception); } - public static Task ObserveErrors(this Task task) + internal static Task ObserveErrors(this Task task) { - task?.ContinueWith(observeErrors, TaskContinuationOptions.OnlyOnFaulted); + task.ContinueWith(observeErrors, TaskContinuationOptions.OnlyOnFaulted); return task; } - public static Task ObserveErrors(this Task task) + internal static Task ObserveErrors(this Task task) { - task?.ContinueWith(observeErrors, TaskContinuationOptions.OnlyOnFaulted); + task.ContinueWith(observeErrors, TaskContinuationOptions.OnlyOnFaulted); return task; } +#if !NET + // suboptimal polyfill version of the .NET 6+ API, but reasonable for light use + internal static Task WaitAsync(this Task task, CancellationToken cancellationToken) + { + if (task.IsCompleted || !cancellationToken.CanBeCanceled) return task; + return Wrap(task, cancellationToken); + + static async Task Wrap(Task task, CancellationToken cancellationToken) + { + var tcs = new TaskSourceWithToken(cancellationToken); + using var reg = cancellationToken.Register( + static state => ((TaskSourceWithToken)state!).Cancel(), tcs); + _ = task.ContinueWith( + static (t, state) => + { + var tcs = (TaskSourceWithToken)state!; + if (t.IsCanceled) tcs.TrySetCanceled(); + else if (t.IsFaulted) tcs.TrySetException(t.Exception!); + else tcs.TrySetResult(t.Result); + }, + tcs); + return await tcs.Task; + } + } + + // the point of this type is to combine TCS and CT so that we can use a static + // registration via Register + private sealed class TaskSourceWithToken : TaskCompletionSource + { + public TaskSourceWithToken(CancellationToken cancellationToken) + => _cancellationToken = cancellationToken; + + private readonly CancellationToken _cancellationToken; + + public void Cancel() => TrySetCanceled(_cancellationToken); + } +#endif + [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static ConfiguredTaskAwaitable ForAwait(this Task task) => task.ConfigureAwait(false); + internal static ConfiguredTaskAwaitable ForAwait(this Task task) => task.ConfigureAwait(false); [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static ConfiguredValueTaskAwaitable ForAwait(this in ValueTask task) => task.ConfigureAwait(false); + internal static ConfiguredValueTaskAwaitable ForAwait(this in ValueTask task) => task.ConfigureAwait(false); [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static ConfiguredTaskAwaitable ForAwait(this Task task) => task.ConfigureAwait(false); + internal static ConfiguredTaskAwaitable ForAwait(this Task task) => task.ConfigureAwait(false); [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static ConfiguredValueTaskAwaitable ForAwait(this in ValueTask task) => task.ConfigureAwait(false); + internal static ConfiguredValueTaskAwaitable ForAwait(this in ValueTask task) => task.ConfigureAwait(false); - internal static void RedisFireAndForget(this Task task) => task?.ContinueWith(t => GC.KeepAlive(t.Exception), TaskContinuationOptions.OnlyOnFaulted); + internal static void RedisFireAndForget(this Task task) => task?.ContinueWith(static t => GC.KeepAlive(t.Exception), TaskContinuationOptions.OnlyOnFaulted); - // Inspired from https://github.com/dotnet/corefx/blob/81a246f3adf1eece3d981f1d8bb8ae9de12de9c6/src/Common/tests/System/Threading/Tasks/TaskTimeoutExtensions.cs#L15-L43 - // Licensed to the .NET Foundation under one or more agreements. - // The .NET Foundation licenses this file to you under the MIT license. - public static async Task TimeoutAfter(this Task task, int timeoutMs) + /// + /// Licensed to the .NET Foundation under one or more agreements. + /// The .NET Foundation licenses this file to you under the MIT license. + /// + /// Inspired from . + internal static async Task TimeoutAfter(this Task task, int timeoutMs) { var cts = new CancellationTokenSource(); if (task == await Task.WhenAny(task, Task.Delay(timeoutMs, cts.Token)).ForAwait()) diff --git a/src/StackExchange.Redis/TaskSource.cs b/src/StackExchange.Redis/TaskSource.cs index c1cd6fff0..00f83cb04 100644 --- a/src/StackExchange.Redis/TaskSource.cs +++ b/src/StackExchange.Redis/TaskSource.cs @@ -5,12 +5,12 @@ namespace StackExchange.Redis internal static class TaskSource { /// - /// Create a new TaskCompletion source + /// Create a new TaskCompletion source. /// /// The type for the created . /// The state for the created . - /// The options to apply to the task - public static TaskCompletionSource Create(object asyncState, TaskCreationOptions options = TaskCreationOptions.None) + /// The options to apply to the task. + internal static TaskCompletionSource Create(object? asyncState, TaskCreationOptions options = TaskCreationOptions.None) => new TaskCompletionSource(asyncState, options); } } diff --git a/src/StackExchange.Redis/TextWriterLogger.cs b/src/StackExchange.Redis/TextWriterLogger.cs new file mode 100644 index 000000000..4d8507b95 --- /dev/null +++ b/src/StackExchange.Redis/TextWriterLogger.cs @@ -0,0 +1,68 @@ +using System; +using System.IO; +using Microsoft.Extensions.Logging; + +namespace StackExchange.Redis; + +internal sealed class TextWriterLogger : ILogger +{ + private TextWriter? _writer; + private readonly ILogger? _wrapped; + + internal static Action NullWriter = _ => { }; + + public TextWriterLogger(TextWriter writer, ILogger? wrapped) + { + _writer = writer; + _wrapped = wrapped; + } + +#if NET8_0_OR_GREATER + public IDisposable? BeginScope(TState state) where TState : notnull => NothingDisposable.Instance; +#else + public IDisposable BeginScope(TState state) => NothingDisposable.Instance; +#endif + + public bool IsEnabled(LogLevel logLevel) => _writer is not null || _wrapped is not null; + public void Log(LogLevel logLevel, EventId eventId, TState state, Exception? exception, Func formatter) + { + _wrapped?.Log(logLevel, eventId, state, exception, formatter); + if (_writer is TextWriter writer) + { + lock (writer) + { + // We check here again because it's possible we've released below, and never want to write past releasing. + if (_writer is TextWriter innerWriter) + { + innerWriter.Write($"{DateTime.UtcNow:HH:mm:ss.ffff}: "); + innerWriter.WriteLine(formatter(state, exception)); + } + } + } + } + + public void Release() + { + // We lock here because we may have piled up on a lock above and still be writing. + // We never want a write to go past the Release(), as many TextWriter implementations are not thread safe. + if (_writer is TextWriter writer) + { + lock (writer) + { + _writer = null; + } + } + } +} + +internal static class TextWriterLoggerExtensions +{ + internal static ILogger? With(this ILogger? logger, TextWriter? writer) => + writer is not null ? new TextWriterLogger(writer, logger) : logger; +} + +internal sealed class NothingDisposable : IDisposable +{ + public static readonly NothingDisposable Instance = new NothingDisposable(); + public void Dispose() { } +} diff --git a/src/StackExchange.Redis/Utils.cs b/src/StackExchange.Redis/Utils.cs new file mode 100644 index 000000000..a4beb0295 --- /dev/null +++ b/src/StackExchange.Redis/Utils.cs @@ -0,0 +1,19 @@ +using System; +using System.Reflection; + +namespace StackExchange.Redis; + +internal static class Utils +{ + private static string? _libVersion; + internal static string GetLibVersion() + { + if (_libVersion == null) + { + var assembly = typeof(ConnectionMultiplexer).Assembly; + _libVersion = ((AssemblyFileVersionAttribute)Attribute.GetCustomAttribute(assembly, typeof(AssemblyFileVersionAttribute))!)?.Version + ?? assembly.GetName().Version!.ToString(); + } + return _libVersion; + } +} diff --git a/src/StackExchange.Redis/ValueCondition.cs b/src/StackExchange.Redis/ValueCondition.cs new file mode 100644 index 000000000..c5cf4bd5a --- /dev/null +++ b/src/StackExchange.Redis/ValueCondition.cs @@ -0,0 +1,374 @@ +using System; +using System.Buffers.Binary; +using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; +using System.IO.Hashing; +using System.Runtime.CompilerServices; +using RESPite; + +namespace StackExchange.Redis; + +/// +/// Represents a check for an existing value - this could be existence (NX/XX), equality (IFEQ/IFNE), or digest equality (IFDEQ/IFDNE). +/// +public readonly struct ValueCondition +{ + internal enum ConditionKind : byte + { + Always, // default, importantly + Exists, + NotExists, + ValueEquals, + ValueNotEquals, + DigestEquals, + DigestNotEquals, + } + + // Supported: equality and non-equality checks for values and digests. Values are stored a RedisValue; + // digests are stored as a native (CPU-endian) Int64 (long) value, inside the same RedisValue (via the + // RedisValue.DirectOverlappedBits64 feature). This native Int64 value is an implementation detail that + // is not directly exposed to the consumer. + // + // The exchange format with Redis is hex of the bytes; for the purposes of interfacing this with our + // raw integer value, this should be considered big-endian, based on the behaviour of XxHash3. + internal const int DigestBytes = 8; // XXH3 is 64-bit + + private readonly ConditionKind _kind; + private readonly RedisValue _value; + + internal ConditionKind Kind => _kind; + + /// + /// Always perform the operation; equivalent to . + /// + public static ValueCondition Always { get; } = new(ConditionKind.Always, RedisValue.Null); + + /// + /// Only perform the operation if the value exists; equivalent to . + /// + public static ValueCondition Exists { get; } = new(ConditionKind.Exists, RedisValue.Null); + + /// + /// Only perform the operation if the value does not exist; equivalent to . + /// + public static ValueCondition NotExists { get; } = new(ConditionKind.NotExists, RedisValue.Null); + + /// + public override string ToString() + { + switch (_kind) + { + case ConditionKind.Exists: + return "XX"; + case ConditionKind.NotExists: + return "NX"; + case ConditionKind.ValueEquals: + return $"IFEQ {_value}"; + case ConditionKind.ValueNotEquals: + return $"IFNE {_value}"; + case ConditionKind.DigestEquals: + var written = WriteHex(_value.DirectOverlappedBits64, stackalloc char[2 * DigestBytes]); + return $"IFDEQ {written.ToString()}"; + case ConditionKind.DigestNotEquals: + written = WriteHex(_value.DirectOverlappedBits64, stackalloc char[2 * DigestBytes]); + return $"IFDNE {written.ToString()}"; + case ConditionKind.Always: + return ""; + default: + return ThrowInvalidOperation().ToString(); + } + } + + /// + public override bool Equals(object? obj) => obj is ValueCondition other && _kind == other._kind && _value == other._value; + + /// + public override int GetHashCode() => _kind.GetHashCode() ^ _value.GetHashCode(); + + /// + /// Indicates whether this instance represents a value comparison test. + /// + internal bool IsValueTest => _kind is ConditionKind.ValueEquals or ConditionKind.ValueNotEquals; + + /// + /// Indicates whether this instance represents a digest test. + /// + internal bool IsDigestTest => _kind is ConditionKind.DigestEquals or ConditionKind.DigestNotEquals; + + /// + /// Indicates whether this instance represents an existence test. + /// + internal bool IsExistenceTest => _kind is ConditionKind.Exists or ConditionKind.NotExists; + + /// + /// Indicates whether this instance represents a negative test (not-equals, not-exists, digest-not-equals). + /// + internal bool IsNegated => _kind is ConditionKind.ValueNotEquals or ConditionKind.DigestNotEquals or ConditionKind.NotExists; + + /// + /// Gets the underlying value for this condition. + /// + public RedisValue Value + { + [Experimental(Experiments.Server_8_4, UrlFormat = Experiments.UrlFormat)] + get => _value; + } + + private ValueCondition(ConditionKind kind, in RedisValue value) + { + if (value.IsNull) + { + kind = kind switch + { + // interpret === null as "does not exist" + ConditionKind.DigestEquals or ConditionKind.ValueEquals => ConditionKind.NotExists, + + // interpret !== null as "exists" + ConditionKind.DigestNotEquals or ConditionKind.ValueNotEquals => ConditionKind.Exists, + + // otherwise: leave alone + _ => kind, + }; + } + _kind = kind; + _value = value; + // if it's a digest operation, the value must be an int64 + Debug.Assert(_kind is not (ConditionKind.DigestEquals or ConditionKind.DigestNotEquals) || + value.Type == RedisValue.StorageType.Int64); + } + + /// + /// Create a value equality condition with the supplied value. + /// + [Experimental(Experiments.Server_8_4, UrlFormat = Experiments.UrlFormat)] + public static ValueCondition Equal(in RedisValue value) => new(ConditionKind.ValueEquals, value); + + /// + /// Create a value non-equality condition with the supplied value. + /// + [Experimental(Experiments.Server_8_4, UrlFormat = Experiments.UrlFormat)] + + public static ValueCondition NotEqual(in RedisValue value) => new(ConditionKind.ValueNotEquals, value); + + /// + /// Create a digest equality condition, computing the digest of the supplied value. + /// + [Experimental(Experiments.Server_8_4, UrlFormat = Experiments.UrlFormat)] + public static ValueCondition DigestEqual(in RedisValue value) => value.Digest(); + + /// + /// Create a digest non-equality condition, computing the digest of the supplied value. + /// + [Experimental(Experiments.Server_8_4, UrlFormat = Experiments.UrlFormat)] + public static ValueCondition DigestNotEqual(in RedisValue value) => !value.Digest(); + + /// + /// Calculate the digest of a payload, as an equality test. For a non-equality test, use on the result. + /// + [Experimental(Experiments.Server_8_4, UrlFormat = Experiments.UrlFormat)] + public static ValueCondition CalculateDigest(ReadOnlySpan value) + { + // the internal impl of XxHash3 uses ulong (not Span), so: use + // that to avoid extra steps, and store the CPU-endian value + var digest = unchecked((long)XxHash3.HashToUInt64(value)); + return new ValueCondition(ConditionKind.DigestEquals, digest); + } + + /// + /// Creates an equality match based on the specified digest bytes. + /// + [Experimental(Experiments.Server_8_4, UrlFormat = Experiments.UrlFormat)] + public static ValueCondition ParseDigest(ReadOnlySpan digest) + { + if (digest.Length != 2 * DigestBytes) ThrowDigestLength(); + + // we receive 16 hex characters, as bytes; parse that into a long, by + // first dealing with the nibbles + Span tmp = stackalloc byte[DigestBytes]; + int offset = 0; + for (int i = 0; i < tmp.Length; i++) + { + tmp[i] = (byte)( + (ParseNibble(digest[offset++]) << 4) // hi + | ParseNibble(digest[offset++])); // lo + } + // now interpret that as big-endian + var digestInt64 = BinaryPrimitives.ReadInt64BigEndian(tmp); + return new ValueCondition(ConditionKind.DigestEquals, digestInt64); + } + + private static byte ParseNibble(int b) + { + if (b >= '0' & b <= '9') return (byte)(b - '0'); + if (b >= 'a' & b <= 'f') return (byte)(b - 'a' + 10); + if (b >= 'A' & b <= 'F') return (byte)(b - 'A' + 10); + return ThrowInvalidBytes(); + + static byte ThrowInvalidBytes() => throw new ArgumentException("Invalid digest bytes"); + } + + private static void ThrowDigestLength() => throw new ArgumentException($"Invalid digest length; expected {2 * DigestBytes} bytes"); + + /// + /// Creates an equality match based on the specified digest bytes. + /// + [Experimental(Experiments.Server_8_4, UrlFormat = Experiments.UrlFormat)] + public static ValueCondition ParseDigest(ReadOnlySpan digest) + { + if (digest.Length != 2 * DigestBytes) ThrowDigestLength(); + + // we receive 16 hex characters, as bytes; parse that into a long, by + // first dealing with the nibbles + Span tmp = stackalloc byte[DigestBytes]; + int offset = 0; + for (int i = 0; i < tmp.Length; i++) + { + tmp[i] = (byte)( + (ToNibble(digest[offset++]) << 4) // hi + | ToNibble(digest[offset++])); // lo + } + // now interpret that as big-endian + var digestInt64 = BinaryPrimitives.ReadInt64BigEndian(tmp); + return new ValueCondition(ConditionKind.DigestEquals, digestInt64); + + static byte ToNibble(int b) + { + if (b >= '0' & b <= '9') return (byte)(b - '0'); + if (b >= 'a' & b <= 'f') return (byte)(b - 'a' + 10); + if (b >= 'A' & b <= 'F') return (byte)(b - 'A' + 10); + return ThrowInvalidBytes(); + } + + static byte ThrowInvalidBytes() => throw new ArgumentException("Invalid digest bytes"); + } + + internal int TokenCount => _kind switch + { + ConditionKind.Exists or ConditionKind.NotExists => 1, + ConditionKind.ValueEquals or ConditionKind.ValueNotEquals or ConditionKind.DigestEquals or ConditionKind.DigestNotEquals => 2, + _ => 0, + }; + + internal void WriteTo(PhysicalConnection physical) + { + switch (_kind) + { + case ConditionKind.Exists: + physical.WriteBulkString("XX"u8); + break; + case ConditionKind.NotExists: + physical.WriteBulkString("NX"u8); + break; + case ConditionKind.ValueEquals: + physical.WriteBulkString("IFEQ"u8); + physical.WriteBulkString(_value); + break; + case ConditionKind.ValueNotEquals: + physical.WriteBulkString("IFNE"u8); + physical.WriteBulkString(_value); + break; + case ConditionKind.DigestEquals: + physical.WriteBulkString("IFDEQ"u8); + var written = WriteHex(_value.DirectOverlappedBits64, stackalloc byte[2 * DigestBytes]); + physical.WriteBulkString(written); + break; + case ConditionKind.DigestNotEquals: + physical.WriteBulkString("IFDNE"u8); + written = WriteHex(_value.DirectOverlappedBits64, stackalloc byte[2 * DigestBytes]); + physical.WriteBulkString(written); + break; + } + } + + internal static Span WriteHex(long value, Span target) + { + Debug.Assert(target.Length >= 2 * DigestBytes); + + // iterate over the bytes in big-endian order, writing the hi/lo nibbles, + // using pointer-like behaviour (rather than complex shifts and masks) + if (BitConverter.IsLittleEndian) + { + value = BinaryPrimitives.ReverseEndianness(value); + } + ref byte ptr = ref Unsafe.As(ref value); + int targetOffset = 0; + ReadOnlySpan hex = "0123456789abcdef"u8; + for (int sourceOffset = 0; sourceOffset < sizeof(long); sourceOffset++) + { + byte b = Unsafe.Add(ref ptr, sourceOffset); + target[targetOffset++] = hex[(b >> 4) & 0xF]; // hi nibble + target[targetOffset++] = hex[b & 0xF]; // lo + } + return target.Slice(0, 2 * DigestBytes); + } + + internal static Span WriteHex(long value, Span target) + { + Debug.Assert(target.Length >= 2 * DigestBytes); + + // iterate over the bytes in big-endian order, writing the hi/lo nibbles, + // using pointer-like behaviour (rather than complex shifts and masks) + if (BitConverter.IsLittleEndian) + { + value = BinaryPrimitives.ReverseEndianness(value); + } + ref byte ptr = ref Unsafe.As(ref value); + int targetOffset = 0; + const string hex = "0123456789abcdef"; + for (int sourceOffset = 0; sourceOffset < sizeof(long); sourceOffset++) + { + byte b = Unsafe.Add(ref ptr, sourceOffset); + target[targetOffset++] = hex[(b >> 4) & 0xF]; // hi nibble + target[targetOffset++] = hex[b & 0xF]; // lo + } + return target.Slice(0, 2 * DigestBytes); + } + + /// + /// Negate this condition. The nature of the condition is preserved. + /// + public static ValueCondition operator !(in ValueCondition value) => value._kind switch + { + ConditionKind.ValueEquals => new(ConditionKind.ValueNotEquals, value._value), + ConditionKind.ValueNotEquals => new(ConditionKind.ValueEquals, value._value), + ConditionKind.DigestEquals => new(ConditionKind.DigestNotEquals, value._value), + ConditionKind.DigestNotEquals => new(ConditionKind.DigestEquals, value._value), + ConditionKind.Exists => new(ConditionKind.NotExists, value._value), + ConditionKind.NotExists => new(ConditionKind.Exists, value._value), + // ReSharper disable once ExplicitCallerInfoArgument + _ => value.ThrowInvalidOperation("operator !"), + }; + + /// + /// Convert a to a . + /// + public static implicit operator ValueCondition(When when) => when switch + { + When.Always => Always, + When.Exists => Exists, + When.NotExists => NotExists, + _ => throw new ArgumentOutOfRangeException(nameof(when)), + }; + + /// + /// Convert a value condition to a digest condition. + /// + [Experimental(Experiments.Server_8_4, UrlFormat = Experiments.UrlFormat)] + public ValueCondition AsDigest() => _kind switch + { + ConditionKind.ValueEquals => _value.Digest(), + ConditionKind.ValueNotEquals => !_value.Digest(), + _ => ThrowInvalidOperation(), + }; + + internal ValueCondition ThrowInvalidOperation([CallerMemberName] string? operation = null) + => throw new InvalidOperationException($"{operation} cannot be used with a {_kind} condition."); + + internal When AsWhen() => _kind switch + { + ConditionKind.Always => When.Always, + ConditionKind.Exists => When.Exists, + ConditionKind.NotExists => When.NotExists, + _ => ThrowInvalidOperation().AsWhen(), + }; +} diff --git a/src/StackExchange.Redis/ValueStopwatch.cs b/src/StackExchange.Redis/ValueStopwatch.cs new file mode 100644 index 000000000..e7f93b102 --- /dev/null +++ b/src/StackExchange.Redis/ValueStopwatch.cs @@ -0,0 +1,33 @@ +using System; +using System.Diagnostics; + +namespace StackExchange.Redis; + +/// +/// Optimization over . +/// +/// From . +internal struct ValueStopwatch +{ + private static readonly double TimestampToTicks = TimeSpan.TicksPerSecond / (double)Stopwatch.Frequency; + private readonly long _startTimestamp; + public bool IsActive => _startTimestamp != 0; + + private ValueStopwatch(long startTimestamp) => _startTimestamp = startTimestamp; + public static ValueStopwatch StartNew() => new ValueStopwatch(Stopwatch.GetTimestamp()); + + public int ElapsedMilliseconds => checked((int)GetElapsedTime().TotalMilliseconds); + + public TimeSpan GetElapsedTime() + { + if (!IsActive) + { + throw new InvalidOperationException("An uninitialized, or 'default', ValueStopwatch cannot be used to get elapsed time."); + } + + var end = Stopwatch.GetTimestamp(); + var timestampDelta = end - _startTimestamp; + var ticks = (long)(TimestampToTicks * timestampDelta); + return new TimeSpan(ticks); + } +} diff --git a/src/StackExchange.Redis/VectorSetAddMessage.cs b/src/StackExchange.Redis/VectorSetAddMessage.cs new file mode 100644 index 000000000..0beb65205 --- /dev/null +++ b/src/StackExchange.Redis/VectorSetAddMessage.cs @@ -0,0 +1,168 @@ +using System; +using System.Runtime.InteropServices; +using System.Threading; + +namespace StackExchange.Redis; + +internal abstract class VectorSetAddMessage( + int db, + CommandFlags flags, + RedisKey key, + int? reducedDimensions, + VectorSetQuantization quantization, + int? buildExplorationFactor, + int? maxConnections, + bool useCheckAndSet) : Message(db, flags, RedisCommand.VADD) +{ + public override int ArgCount => GetArgCount(UseFp32); + + private int GetArgCount(bool packed) + { + var count = 2 + GetElementArgCount(packed); // key, element and either "FP32 {vector}" or VALUES {num}" + if (reducedDimensions.HasValue) count += 2; // [REDUCE {dim}] + + if (useCheckAndSet) count++; // [CAS] + count += quantization switch + { + VectorSetQuantization.None or VectorSetQuantization.Binary => 1, // [NOQUANT] or [BIN] + VectorSetQuantization.Int8 => 0, // implicit + _ => throw new ArgumentOutOfRangeException(nameof(quantization)), + }; + + if (buildExplorationFactor.HasValue) count += 2; // [EF {build-exploration-factor}] + count += GetAttributeArgCount(); // [SETATTR {attributes}] + if (maxConnections.HasValue) count += 2; // [M {numlinks}] + return count; + } + + public abstract int GetElementArgCount(bool packed); + public abstract int GetAttributeArgCount(); + + public override int GetHashSlot(ServerSelectionStrategy serverSelectionStrategy) + => serverSelectionStrategy.HashSlot(key); + + private static readonly bool CanUseFp32 = BitConverter.IsLittleEndian && CheckFp32(); + + private static bool CheckFp32() // check endianness with a known value + { + // ReSharper disable once CompareOfFloatsByEqualityOperator - expect exact + return MemoryMarshal.Cast("\0\0(B"u8)[0] == 42; + } + +#if DEBUG + private static int _fp32Disabled; + internal static bool UseFp32 => CanUseFp32 & Volatile.Read(ref _fp32Disabled) == 0; + internal static void SuppressFp32() => Interlocked.Increment(ref _fp32Disabled); + internal static void RestoreFp32() => Interlocked.Decrement(ref _fp32Disabled); +#else + internal static bool UseFp32 => CanUseFp32; + internal static void SuppressFp32() { } + internal static void RestoreFp32() { } +#endif + + protected abstract void WriteElement(bool packed, PhysicalConnection physical); + + protected override void WriteImpl(PhysicalConnection physical) + { + bool packed = UseFp32; // snapshot to avoid race in debug scenarios + physical.WriteHeader(Command, GetArgCount(packed)); + physical.Write(key); + if (reducedDimensions.HasValue) + { + physical.WriteBulkString("REDUCE"u8); + physical.WriteBulkString(reducedDimensions.GetValueOrDefault()); + } + + WriteElement(packed, physical); + if (useCheckAndSet) physical.WriteBulkString("CAS"u8); + + switch (quantization) + { + case VectorSetQuantization.Int8: + break; + case VectorSetQuantization.None: + physical.WriteBulkString("NOQUANT"u8); + break; + case VectorSetQuantization.Binary: + physical.WriteBulkString("BIN"u8); + break; + default: + throw new ArgumentOutOfRangeException(nameof(quantization)); + } + + if (buildExplorationFactor.HasValue) + { + physical.WriteBulkString("EF"u8); + physical.WriteBulkString(buildExplorationFactor.GetValueOrDefault()); + } + + WriteAttributes(physical); + + if (maxConnections.HasValue) + { + physical.WriteBulkString("M"u8); + physical.WriteBulkString(maxConnections.GetValueOrDefault()); + } + } + + protected abstract void WriteAttributes(PhysicalConnection physical); + + internal sealed class VectorSetAddMemberMessage( + int db, + CommandFlags flags, + RedisKey key, + int? reducedDimensions, + VectorSetQuantization quantization, + int? buildExplorationFactor, + int? maxConnections, + bool useCheckAndSet, + RedisValue element, + ReadOnlyMemory values, + string? attributesJson) : VectorSetAddMessage( + db, + flags, + key, + reducedDimensions, + quantization, + buildExplorationFactor, + maxConnections, + useCheckAndSet) + { + private readonly string? _attributesJson = string.IsNullOrWhiteSpace(attributesJson) ? null : attributesJson; + public override int GetElementArgCount(bool packed) + => 2 // "FP32 {vector}" or "VALUES {num}" + + (packed ? 0 : values.Length); // {vector...}" + + public override int GetAttributeArgCount() + => _attributesJson is null ? 0 : 2; // [SETATTR {attributes}] + + protected override void WriteElement(bool packed, PhysicalConnection physical) + { + if (packed) + { + physical.WriteBulkString("FP32"u8); + physical.WriteBulkString(MemoryMarshal.AsBytes(values.Span)); + } + else + { + physical.WriteBulkString("VALUES"u8); + physical.WriteBulkString(values.Length); + foreach (var val in values.Span) + { + physical.WriteBulkString(val); + } + } + + physical.WriteBulkString(element); + } + + protected override void WriteAttributes(PhysicalConnection physical) + { + if (_attributesJson is not null) + { + physical.WriteBulkString("SETATTR"u8); + physical.WriteBulkString(_attributesJson); + } + } + } +} diff --git a/src/StackExchange.Redis/VectorSetAddRequest.cs b/src/StackExchange.Redis/VectorSetAddRequest.cs new file mode 100644 index 000000000..8262d4750 --- /dev/null +++ b/src/StackExchange.Redis/VectorSetAddRequest.cs @@ -0,0 +1,81 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using RESPite; + +namespace StackExchange.Redis; + +/// +/// Represents the request for a vectorset add operation. +/// +[Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] +public abstract class VectorSetAddRequest +{ + // polymorphism left open for future, but needs to be handled internally + internal VectorSetAddRequest() + { + } + + /// + /// Add a member to the vectorset. + /// + /// The element name. + /// The vector data. + /// Optional JSON attributes for the element (SETATTR parameter). + public static VectorSetAddRequest Member( + RedisValue element, + ReadOnlyMemory values, +#if NET8_0_OR_GREATER + [StringSyntax(StringSyntaxAttribute.Json)] +#endif + string? attributesJson = null) + => new VectorSetAddMemberRequest(element, values, attributesJson); + + /// + /// Optional check-and-set mode for partial threading (CAS parameter). + /// + public bool UseCheckAndSet { get; set; } + + /// + /// Optional dimension reduction using random projection (REDUCE parameter). + /// + public int? ReducedDimensions { get; set; } + + /// + /// Quantization type - Int8 (Q8), None (NOQUANT), or Binary (BIN). Default: Int8. + /// + public VectorSetQuantization Quantization { get; set; } = VectorSetQuantization.Int8; + + /// + /// Optional HNSW build exploration factor (EF parameter, default: 200). + /// + public int? BuildExplorationFactor { get; set; } + + /// + /// Optional maximum connections per HNSW node (M parameter, default: 16). + /// + public int? MaxConnections { get; set; } + + // snapshot the values; I don't trust people not to mutate the object behind my back + internal abstract VectorSetAddMessage ToMessage(RedisKey key, int db, CommandFlags flags); + + internal sealed class VectorSetAddMemberRequest( + RedisValue element, + ReadOnlyMemory values, + string? attributesJson) + : VectorSetAddRequest + { + internal override VectorSetAddMessage ToMessage(RedisKey key, int db, CommandFlags flags) + => new VectorSetAddMessage.VectorSetAddMemberMessage( + db, + flags, + key, + ReducedDimensions, + Quantization, + BuildExplorationFactor, + MaxConnections, + UseCheckAndSet, + element, + values, + attributesJson); + } +} diff --git a/src/StackExchange.Redis/VectorSetInfo.cs b/src/StackExchange.Redis/VectorSetInfo.cs new file mode 100644 index 000000000..afbc3fece --- /dev/null +++ b/src/StackExchange.Redis/VectorSetInfo.cs @@ -0,0 +1,55 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using RESPite; + +namespace StackExchange.Redis; + +/// +/// Contains metadata information about a vectorset returned by VINFO command. +/// +[Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] +public readonly struct VectorSetInfo( + VectorSetQuantization quantization, + string? quantizationRaw, + int dimension, + long length, + int maxLevel, + long vectorSetUid, + long hnswMaxNodeUid) +{ + /// + /// The quantization type used for vectors in this vectorset. + /// + public VectorSetQuantization Quantization { get; } = quantization; + + /// + /// The raw representation of the quantization type used for vectors in this vectorset. This is only + /// populated if the is . + /// + public string? QuantizationRaw { get; } = quantizationRaw; + + /// + /// The number of dimensions in each vector. + /// + public int Dimension { get; } = dimension; + + /// + /// The number of elements (cardinality) in the vectorset. + /// + public long Length { get; } = length; + + /// + /// The maximum level in the HNSW graph structure. + /// + public int MaxLevel { get; } = maxLevel; + + /// + /// The unique identifier for this vectorset. + /// + public long VectorSetUid { get; } = vectorSetUid; + + /// + /// The maximum node unique identifier in the HNSW graph. + /// + public long HnswMaxNodeUid { get; } = hnswMaxNodeUid; +} diff --git a/src/StackExchange.Redis/VectorSetInfoField.cs b/src/StackExchange.Redis/VectorSetInfoField.cs new file mode 100644 index 000000000..1ed9266be --- /dev/null +++ b/src/StackExchange.Redis/VectorSetInfoField.cs @@ -0,0 +1,61 @@ +using System; +using RESPite; + +namespace StackExchange.Redis; + +/// +/// Represents fields in a VSET.INFO response. +/// +internal enum VectorSetInfoField +{ + /// + /// Unknown or unrecognized field. + /// + [AsciiHash("")] + Unknown = 0, + + /// + /// The size field. + /// + [AsciiHash("size")] + Size, + + /// + /// The vset-uid field. + /// + [AsciiHash("vset-uid")] + VsetUid, + + /// + /// The max-level field. + /// + [AsciiHash("max-level")] + MaxLevel, + + /// + /// The vector-dim field. + /// + [AsciiHash("vector-dim")] + VectorDim, + + /// + /// The quant-type field. + /// + [AsciiHash("quant-type")] + QuantType, + + /// + /// The hnsw-max-node-uid field. + /// + [AsciiHash("hnsw-max-node-uid")] + HnswMaxNodeUid, +} + +/// +/// Metadata and parsing methods for VectorSetInfoField. +/// +internal static partial class VectorSetInfoFieldMetadata +{ + [AsciiHash] + internal static partial bool TryParse(ReadOnlySpan value, out VectorSetInfoField field); +} diff --git a/src/StackExchange.Redis/VectorSetLink.cs b/src/StackExchange.Redis/VectorSetLink.cs new file mode 100644 index 000000000..5d58a8d7f --- /dev/null +++ b/src/StackExchange.Redis/VectorSetLink.cs @@ -0,0 +1,25 @@ +using System.Diagnostics.CodeAnalysis; +using RESPite; + +namespace StackExchange.Redis; + +/// +/// Represents a link/connection between members in a vectorset with similarity score. +/// Used by VLINKS command with WITHSCORES option. +/// +[Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] +public readonly struct VectorSetLink(RedisValue member, double score) +{ + /// + /// The linked member name/identifier. + /// + public RedisValue Member { get; } = member; + + /// + /// The similarity score between the queried member and this linked member. + /// + public double Score { get; } = score; + + /// + public override string ToString() => $"{Member}: {Score}"; +} diff --git a/src/StackExchange.Redis/VectorSetQuantization.cs b/src/StackExchange.Redis/VectorSetQuantization.cs new file mode 100644 index 000000000..c7c5bf2e7 --- /dev/null +++ b/src/StackExchange.Redis/VectorSetQuantization.cs @@ -0,0 +1,45 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using RESPite; + +namespace StackExchange.Redis; + +/// +/// Specifies the quantization type for vectors in a vectorset. +/// +[Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] +public enum VectorSetQuantization +{ + /// + /// Unknown or unrecognized quantization type. + /// + [AsciiHash("")] + Unknown = 0, + + /// + /// No quantization (full precision). This maps to "NOQUANT" or "f32". + /// + [AsciiHash("f32")] + None = 1, + + /// + /// 8-bit integer quantization (default). This maps to "Q8" or "int8". + /// + [AsciiHash("int8")] + Int8 = 2, + + /// + /// Binary quantization. This maps to "BIN" or "bin". + /// + [AsciiHash("bin")] + Binary = 3, +} + +/// +/// Metadata and parsing methods for VectorSetQuantization. +/// +internal static partial class VectorSetQuantizationMetadata +{ + [AsciiHash] + internal static partial bool TryParse(ReadOnlySpan value, out VectorSetQuantization quantization); +} diff --git a/src/StackExchange.Redis/VectorSetSimilaritySearchMessage.cs b/src/StackExchange.Redis/VectorSetSimilaritySearchMessage.cs new file mode 100644 index 000000000..1bbc418d5 --- /dev/null +++ b/src/StackExchange.Redis/VectorSetSimilaritySearchMessage.cs @@ -0,0 +1,263 @@ +using System; + +namespace StackExchange.Redis; + +internal abstract class VectorSetSimilaritySearchMessage( + int db, + CommandFlags flags, + VectorSetSimilaritySearchMessage.VsimFlags vsimFlags, + RedisKey key, + int count, + double epsilon, + int searchExplorationFactor, + string? filterExpression, + int maxFilteringEffort) : Message(db, flags, RedisCommand.VSIM) +{ + // For "FP32" and "VALUES" scenarios; in the future we might want other vector sizes / encodings - for + // example, there could be some "FP16" or "FP8" transport that requires a ROM-short or ROM-sbyte from + // the calling code. Or, as a convenience, we might want to allow ROM-double input, but transcode that + // to FP32 on the way out. + internal sealed class VectorSetSimilaritySearchBySingleVectorMessage( + int db, + CommandFlags flags, + VsimFlags vsimFlags, + RedisKey key, + ReadOnlyMemory vector, + int count, + double epsilon, + int searchExplorationFactor, + string? filterExpression, + int maxFilteringEffort) : VectorSetSimilaritySearchMessage(db, flags, vsimFlags, key, count, epsilon, + searchExplorationFactor, filterExpression, maxFilteringEffort) + { + internal override int GetSearchTargetArgCount(bool packed) => + packed ? 2 : 2 + vector.Length; // FP32 {vector} or VALUES {num} {vector} + + internal override void WriteSearchTarget(bool packed, PhysicalConnection physical) + { + if (packed) + { + physical.WriteBulkString("FP32"u8); + physical.WriteBulkString(System.Runtime.InteropServices.MemoryMarshal.AsBytes(vector.Span)); + } + else + { + physical.WriteBulkString("VALUES"u8); + physical.WriteBulkString(vector.Length); + foreach (var val in vector.Span) + { + physical.WriteBulkString(val); + } + } + } + } + + // for "ELE" scenarios + internal sealed class VectorSetSimilaritySearchByMemberMessage( + int db, + CommandFlags flags, + VsimFlags vsimFlags, + RedisKey key, + RedisValue member, + int count, + double epsilon, + int searchExplorationFactor, + string? filterExpression, + int maxFilteringEffort) : VectorSetSimilaritySearchMessage(db, flags, vsimFlags, key, count, epsilon, + searchExplorationFactor, filterExpression, maxFilteringEffort) + { + internal override int GetSearchTargetArgCount(bool packed) => 2; // ELE {member} + + internal override void WriteSearchTarget(bool packed, PhysicalConnection physical) + { + physical.WriteBulkString("ELE"u8); + physical.WriteBulkString(member); + } + } + + internal abstract int GetSearchTargetArgCount(bool packed); + internal abstract void WriteSearchTarget(bool packed, PhysicalConnection physical); + + public ResultProcessor?> GetResultProcessor() => + VectorSetSimilaritySearchProcessor.Instance; + + private sealed class VectorSetSimilaritySearchProcessor : ResultProcessor?> + { + // keep local, since we need to know what flags were being sent + public static readonly VectorSetSimilaritySearchProcessor Instance = new(); + private VectorSetSimilaritySearchProcessor() { } + + protected override bool SetResultCore(PhysicalConnection connection, Message message, in RawResult result) + { + if (result.Resp2TypeArray == ResultType.Array && message is VectorSetSimilaritySearchMessage vssm) + { + if (result.IsNull) + { + SetResult(message, null); + return true; + } + + bool withScores = vssm.HasFlag(VsimFlags.WithScores); + bool withAttribs = vssm.HasFlag(VsimFlags.WithAttributes); + + // in RESP3 mode (only), when both are requested, we get a sub-array per item; weird, but true + bool internalNesting = withScores && withAttribs && connection.Protocol is RedisProtocol.Resp3; + + int rowsPerItem = internalNesting + ? 2 + : 1 + ((withScores ? 1 : 0) + (withAttribs ? 1 : 0)); // each value is separate root element + + var items = result.GetItems(); + var length = checked((int)items.Length) / rowsPerItem; + var lease = Lease.Create(length, clear: false); + var target = lease.Span; + int count = 0; + var iter = items.GetEnumerator(); + for (int i = 0; i < target.Length && iter.MoveNext(); i++) + { + var member = iter.Current.AsRedisValue(); + double score = double.NaN; + string? attributesJson = null; + + if (internalNesting) + { + if (!iter.MoveNext() || iter.Current.Resp2TypeArray != ResultType.Array) break; + if (!iter.Current.IsNull) + { + var subArray = iter.Current.GetItems(); + if (subArray.Length >= 1 && !subArray[0].TryGetDouble(out score)) break; + if (subArray.Length >= 2) attributesJson = subArray[1].GetString(); + } + } + else + { + if (withScores) + { + if (!iter.MoveNext() || !iter.Current.TryGetDouble(out score)) break; + } + + if (withAttribs) + { + if (!iter.MoveNext()) break; + attributesJson = iter.Current.GetString(); + } + } + + target[i] = new VectorSetSimilaritySearchResult(member, score, attributesJson); + count++; + } + + if (count == target.Length) + { + SetResult(message, lease); + return true; + } + + lease.Dispose(); // failed to fill? + } + + return false; + } + } + + [Flags] + internal enum VsimFlags + { + None = 0, + Count = 1 << 0, + WithScores = 1 << 1, + WithAttributes = 1 << 2, + UseExactSearch = 1 << 3, + DisableThreading = 1 << 4, + Epsilon = 1 << 5, + SearchExplorationFactor = 1 << 6, + MaxFilteringEffort = 1 << 7, + FilterExpression = 1 << 8, + } + + private bool HasFlag(VsimFlags flag) => (vsimFlags & flag) != 0; + + public override int ArgCount => GetArgCount(VectorSetAddMessage.UseFp32); + + private int GetArgCount(bool packed) + { + int argCount = 1 + GetSearchTargetArgCount(packed); // {key} and whatever we need for the vector/element portion + if (HasFlag(VsimFlags.WithScores)) argCount++; // [WITHSCORES] + if (HasFlag(VsimFlags.WithAttributes)) argCount++; // [WITHATTRIBS] + if (HasFlag(VsimFlags.Count)) argCount += 2; // [COUNT {count}] + if (HasFlag(VsimFlags.Epsilon)) argCount += 2; // [EPSILON {epsilon}] + if (HasFlag(VsimFlags.SearchExplorationFactor)) argCount += 2; // [EF {search-exploration-factor}] + if (HasFlag(VsimFlags.FilterExpression)) argCount += 2; // [FILTER {filterExpression}] + if (HasFlag(VsimFlags.MaxFilteringEffort)) argCount += 2; // [FILTER-EF {max-filtering-effort}] + if (HasFlag(VsimFlags.UseExactSearch)) argCount++; // [TRUTH] + if (HasFlag(VsimFlags.DisableThreading)) argCount++; // [NOTHREAD] + return argCount; + } + + protected override void WriteImpl(PhysicalConnection physical) + { + // snapshot to avoid race in debug scenarios + bool packed = VectorSetAddMessage.UseFp32; + physical.WriteHeader(Command, GetArgCount(packed)); + + // Write key + physical.Write(key); + + // Write search target: either "ELE {member}" or vector data + WriteSearchTarget(packed, physical); + + if (HasFlag(VsimFlags.WithScores)) + { + physical.WriteBulkString("WITHSCORES"u8); + } + + if (HasFlag(VsimFlags.WithAttributes)) + { + physical.WriteBulkString("WITHATTRIBS"u8); + } + + // Write optional parameters + if (HasFlag(VsimFlags.Count)) + { + physical.WriteBulkString("COUNT"u8); + physical.WriteBulkString(count); + } + + if (HasFlag(VsimFlags.Epsilon)) + { + physical.WriteBulkString("EPSILON"u8); + physical.WriteBulkString(epsilon); + } + + if (HasFlag(VsimFlags.SearchExplorationFactor)) + { + physical.WriteBulkString("EF"u8); + physical.WriteBulkString(searchExplorationFactor); + } + + if (HasFlag(VsimFlags.FilterExpression)) + { + physical.WriteBulkString("FILTER"u8); + physical.WriteBulkString(filterExpression); + } + + if (HasFlag(VsimFlags.MaxFilteringEffort)) + { + physical.WriteBulkString("FILTER-EF"u8); + physical.WriteBulkString(maxFilteringEffort); + } + + if (HasFlag(VsimFlags.UseExactSearch)) + { + physical.WriteBulkString("TRUTH"u8); + } + + if (HasFlag(VsimFlags.DisableThreading)) + { + physical.WriteBulkString("NOTHREAD"u8); + } + } + + public override int GetHashSlot(ServerSelectionStrategy serverSelectionStrategy) + => serverSelectionStrategy.HashSlot(key); +} diff --git a/src/StackExchange.Redis/VectorSetSimilaritySearchRequest.cs b/src/StackExchange.Redis/VectorSetSimilaritySearchRequest.cs new file mode 100644 index 000000000..1343fd3f1 --- /dev/null +++ b/src/StackExchange.Redis/VectorSetSimilaritySearchRequest.cs @@ -0,0 +1,220 @@ +using System; +using System.ComponentModel; +using System.Diagnostics.CodeAnalysis; +using RESPite; +using VsimFlags = StackExchange.Redis.VectorSetSimilaritySearchMessage.VsimFlags; + +namespace StackExchange.Redis; + +/// +/// Represents the request for a vector similarity search operation. +/// +[Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] +public abstract class VectorSetSimilaritySearchRequest +{ + internal VectorSetSimilaritySearchRequest() + { + } // polymorphism left open for future, but needs to be handled internally + + private sealed class VectorSetSimilarityByMemberSearchRequest(RedisValue member) : VectorSetSimilaritySearchRequest + { + internal override VectorSetSimilaritySearchMessage ToMessage(RedisKey key, int db, CommandFlags flags) + => new VectorSetSimilaritySearchMessage.VectorSetSimilaritySearchByMemberMessage( + db, + flags, + _vsimFlags, + key, + member, + _count, + _epsilon, + _searchExplorationFactor, + _filterExpression, + _maxFilteringEffort); + } + + private sealed class VectorSetSimilarityVectorSingleSearchRequest(ReadOnlyMemory vector) + : VectorSetSimilaritySearchRequest + { + internal override VectorSetSimilaritySearchMessage ToMessage(RedisKey key, int db, CommandFlags flags) + => new VectorSetSimilaritySearchMessage.VectorSetSimilaritySearchBySingleVectorMessage( + db, + flags, + _vsimFlags, + key, + vector, + _count, + _epsilon, + _searchExplorationFactor, + _filterExpression, + _maxFilteringEffort); + } + + // snapshot the values; I don't trust people not to mutate the object behind my back + internal abstract VectorSetSimilaritySearchMessage ToMessage(RedisKey key, int db, CommandFlags flags); + + /// + /// Create a request to search by an existing member in the index. + /// + /// The member to search for. + public static VectorSetSimilaritySearchRequest ByMember(RedisValue member) + => new VectorSetSimilarityByMemberSearchRequest(member); + + /// + /// Create a request to search by a vector value. + /// + /// The vector value to search for. + public static VectorSetSimilaritySearchRequest ByVector(ReadOnlyMemory vector) + => new VectorSetSimilarityVectorSingleSearchRequest(vector); + + private VsimFlags _vsimFlags; + + // use the flags to reduce storage from N*Nullable + private int _searchExplorationFactor, _maxFilteringEffort, _count; + private double _epsilon; + + private bool HasFlag(VsimFlags flag) => (_vsimFlags & flag) != 0; + + private void SetFlag(VsimFlags flag, bool value) + { + if (value) + { + _vsimFlags |= flag; + } + else + { + _vsimFlags &= ~flag; + } + } + + /// + /// The number of similar vectors to return (COUNT parameter). + /// + public int? Count + { + get => HasFlag(VsimFlags.Count) ? _count : null; + set + { + if (value.HasValue) + { + _count = value.GetValueOrDefault(); + SetFlag(VsimFlags.Count, true); + } + else + { + SetFlag(VsimFlags.Count, false); + } + } + } + + /// + /// Whether to include similarity scores in the results (WITHSCORES parameter). + /// + public bool WithScores + { + get => HasFlag(VsimFlags.WithScores); + set => SetFlag(VsimFlags.WithScores, value); + } + + /// + /// Whether to include JSON attributes in the results (WITHATTRIBS parameter). + /// + public bool WithAttributes + { + get => HasFlag(VsimFlags.WithAttributes); + set => SetFlag(VsimFlags.WithAttributes, value); + } + + /// + /// Optional similarity threshold - only return elements with similarity >= (1 - epsilon) (EPSILON parameter). + /// + public double? Epsilon + { + get => HasFlag(VsimFlags.Epsilon) ? _epsilon : null; + set + { + if (value.HasValue) + { + _epsilon = value.GetValueOrDefault(); + SetFlag(VsimFlags.Epsilon, true); + } + else + { + SetFlag(VsimFlags.Epsilon, false); + } + } + } + + /// + /// Optional search exploration factor for better recall (EF parameter). + /// + public int? SearchExplorationFactor + { + get => HasFlag(VsimFlags.SearchExplorationFactor) ? _searchExplorationFactor : null; + set + { + if (value.HasValue) + { + _searchExplorationFactor = value.GetValueOrDefault(); + SetFlag(VsimFlags.SearchExplorationFactor, true); + } + else + { + SetFlag(VsimFlags.SearchExplorationFactor, false); + } + } + } + + /// + /// Optional maximum filtering attempts (FILTER-EF parameter). + /// + public int? MaxFilteringEffort + { + get => HasFlag(VsimFlags.MaxFilteringEffort) ? _maxFilteringEffort : null; + set + { + if (value.HasValue) + { + _maxFilteringEffort = value.GetValueOrDefault(); + SetFlag(VsimFlags.MaxFilteringEffort, true); + } + else + { + SetFlag(VsimFlags.MaxFilteringEffort, false); + } + } + } + + private string? _filterExpression; + + /// + /// Optional filter expression to restrict results (FILTER parameter); . + /// + public string? FilterExpression + { + get => _filterExpression; + set + { + _filterExpression = value; + SetFlag(VsimFlags.FilterExpression, !string.IsNullOrWhiteSpace(value)); + } + } + + /// + /// Whether to use exact linear scan instead of HNSW (TRUTH parameter). + /// + public bool UseExactSearch + { + get => HasFlag(VsimFlags.UseExactSearch); + set => SetFlag(VsimFlags.UseExactSearch, value); + } + + /// + /// Whether to run search in main thread (NOTHREAD parameter). + /// + [Browsable(false), EditorBrowsable(EditorBrowsableState.Advanced)] + public bool DisableThreading + { + get => HasFlag(VsimFlags.DisableThreading); + set => SetFlag(VsimFlags.DisableThreading, value); + } +} diff --git a/src/StackExchange.Redis/VectorSetSimilaritySearchResult.cs b/src/StackExchange.Redis/VectorSetSimilaritySearchResult.cs new file mode 100644 index 000000000..c87e04bc1 --- /dev/null +++ b/src/StackExchange.Redis/VectorSetSimilaritySearchResult.cs @@ -0,0 +1,45 @@ +using System.Diagnostics.CodeAnalysis; +using RESPite; + +namespace StackExchange.Redis; + +/// +/// Represents a result from vector similarity search operations. +/// +[Experimental(Experiments.VectorSets, UrlFormat = Experiments.UrlFormat)] +public readonly struct VectorSetSimilaritySearchResult(RedisValue member, double score = double.NaN, string? attributesJson = null) +{ + /// + /// The member name/identifier in the vectorset. + /// + public RedisValue Member { get; } = member; + + /// + /// The similarity score (0-1) when WITHSCORES is used, NaN otherwise. + /// A score of 1 means identical vectors, 0 means opposite vectors. + /// + public double Score { get; } = score; + + /// + /// The JSON attributes associated with the member when WITHATTRIBS is used, null otherwise. + /// +#if NET8_0_OR_GREATER + [StringSyntax(StringSyntaxAttribute.Json)] +#endif + public string? AttributesJson { get; } = attributesJson; + + /// + public override string ToString() + { + if (double.IsNaN(Score)) + { + return AttributesJson is null + ? Member.ToString() + : $"{Member}: {AttributesJson}"; + } + + return AttributesJson is null + ? $"{Member} ({Score})" + : $"{Member} ({Score}): {AttributesJson}"; + } +} diff --git a/src/StackExchange.Redis/WriteResult.cs b/src/StackExchange.Redis/WriteResult.cs new file mode 100644 index 000000000..b7e87b915 --- /dev/null +++ b/src/StackExchange.Redis/WriteResult.cs @@ -0,0 +1,9 @@ +namespace StackExchange.Redis; + +internal enum WriteResult +{ + Success, + NoConnectionAvailable, + TimeoutBeforeWrite, + WriteFailure, +} diff --git a/tests/BasicTest/BasicTest.csproj b/tests/BasicTest/BasicTest.csproj index 7c283e13f..593d26619 100644 --- a/tests/BasicTest/BasicTest.csproj +++ b/tests/BasicTest/BasicTest.csproj @@ -2,16 +2,18 @@ StackExchange.Redis.BasicTest .NET Core - net472;net5.0 + net472;net8.0 BasicTest Exe BasicTest - win7-x64 - false + + + + diff --git a/tests/BasicTest/Program.cs b/tests/BasicTest/Program.cs index e14a4a67d..2977c42c2 100644 --- a/tests/BasicTest/Program.cs +++ b/tests/BasicTest/Program.cs @@ -21,7 +21,7 @@ internal class CustomConfig : ManualConfig { protected virtual Job Configure(Job j) => j.WithGcMode(new GcMode { Force = true }) - //.With(InProcessToolchain.Instance) + // .With(InProcessToolchain.Instance) ; public CustomConfig() @@ -31,20 +31,17 @@ public CustomConfig() AddValidator(JitOptimizationsValidator.FailOnError); AddJob(Configure(Job.Default.WithRuntime(ClrRuntime.Net472))); - AddJob(Configure(Job.Default.WithRuntime(CoreRuntime.Core31))); AddJob(Configure(Job.Default.WithRuntime(CoreRuntime.Core50))); } } - internal class SlowConfig : CustomConfig + internal sealed class SlowConfig : CustomConfig { protected override Job Configure(Job j) => j.WithLaunchCount(1) .WithWarmupCount(1) .WithIterationCount(5); } - /// - /// The tests - /// + [Config(typeof(CustomConfig))] public class RedisBenchmarks : IDisposable { @@ -52,14 +49,10 @@ public class RedisBenchmarks : IDisposable private ConnectionMultiplexer connection; private IDatabase db; - /// - /// Create - /// [GlobalSetup] public void Setup() { // Pipelines.Sockets.Unofficial.SocketConnection.AssertDependencies(); - var options = ConfigurationOptions.Parse("127.0.0.1:6379"); connection = ConnectionMultiplexer.Connect(options); db = connection.GetDatabase(3); @@ -83,15 +76,15 @@ void IDisposable.Dispose() mgr = null; db = null; connection = null; + GC.SuppressFinalize(this); } private const int COUNT = 50; /// - /// Run INCRBY lots of times + /// Run INCRBY lots of times. /// // [Benchmark(Description = "INCRBY/s", OperationsPerInvoke = COUNT)] - public int ExecuteIncrBy() { var rand = new Random(12345); @@ -110,7 +103,7 @@ public int ExecuteIncrBy() } /// - /// Run INCRBY lots of times + /// Run INCRBY lots of times. /// // [Benchmark(Description = "INCRBY/a", OperationsPerInvoke = COUNT)] public async Task ExecuteIncrByAsync() @@ -131,7 +124,7 @@ public async Task ExecuteIncrByAsync() } /// - /// Run GEORADIUS lots of times + /// Run GEORADIUS lots of times. /// // [Benchmark(Description = "GEORADIUS/s", OperationsPerInvoke = COUNT)] public int ExecuteGeoRadius() @@ -139,15 +132,14 @@ public int ExecuteGeoRadius() int total = 0; for (int i = 0; i < COUNT; i++) { - var results = db.GeoRadius(GeoKey, 15, 37, 200, GeoUnit.Kilometers, - options: GeoRadiusOptions.WithCoordinates | GeoRadiusOptions.WithDistance | GeoRadiusOptions.WithGeoHash); + var results = db.GeoRadius(GeoKey, 15, 37, 200, GeoUnit.Kilometers, options: GeoRadiusOptions.WithCoordinates | GeoRadiusOptions.WithDistance | GeoRadiusOptions.WithGeoHash); total += results.Length; } return total; } /// - /// Run GEORADIUS lots of times + /// Run GEORADIUS lots of times. /// // [Benchmark(Description = "GEORADIUS/a", OperationsPerInvoke = COUNT)] public async Task ExecuteGeoRadiusAsync() @@ -155,15 +147,14 @@ public async Task ExecuteGeoRadiusAsync() int total = 0; for (int i = 0; i < COUNT; i++) { - var results = await db.GeoRadiusAsync(GeoKey, 15, 37, 200, GeoUnit.Kilometers, - options: GeoRadiusOptions.WithCoordinates | GeoRadiusOptions.WithDistance | GeoRadiusOptions.WithGeoHash).ConfigureAwait(false); + var results = await db.GeoRadiusAsync(GeoKey, 15, 37, 200, GeoUnit.Kilometers, options: GeoRadiusOptions.WithCoordinates | GeoRadiusOptions.WithDistance | GeoRadiusOptions.WithGeoHash).ConfigureAwait(false); total += results.Length; } return total; } /// - /// Run StringSet lots of times + /// Run StringSet lots of times. /// [Benchmark(Description = "StringSet/s", OperationsPerInvoke = COUNT)] public void StringSet() @@ -175,7 +166,7 @@ public void StringSet() } /// - /// Run StringGet lots of times + /// Run StringGet lots of times. /// [Benchmark(Description = "StringGet/s", OperationsPerInvoke = COUNT)] public void StringGet() @@ -187,7 +178,7 @@ public void StringGet() } /// - /// Run HashGetAll lots of times + /// Run HashGetAll lots of times. /// [Benchmark(Description = "HashGetAll F+F/s", OperationsPerInvoke = COUNT)] public void HashGetAll_FAF() @@ -200,7 +191,7 @@ public void HashGetAll_FAF() } /// - /// Run HashGetAll lots of times + /// Run HashGetAll lots of times. /// [Benchmark(Description = "HashGetAll F+F/a", OperationsPerInvoke = COUNT)] @@ -213,7 +204,6 @@ public async Task HashGetAllAsync_FAF() } } } -#pragma warning disable CS1591 [Config(typeof(SlowConfig))] public class Issue898 : IDisposable @@ -221,38 +211,42 @@ public class Issue898 : IDisposable private readonly ConnectionMultiplexer mux; private readonly IDatabase db; - public void Dispose() => mux?.Dispose(); + public void Dispose() + { + mux?.Dispose(); + GC.SuppressFinalize(this); + } public Issue898() { mux = ConnectionMultiplexer.Connect("127.0.0.1:6379"); db = mux.GetDatabase(); } - private const int max = 100000; - [Benchmark(OperationsPerInvoke = max)] + private const int Max = 100000; + [Benchmark(OperationsPerInvoke = Max)] public void Load() { - for (int i = 0; i < max; ++i) + for (int i = 0; i < Max; ++i) { db.StringSet(i.ToString(), i); } } - [Benchmark(OperationsPerInvoke = max)] + [Benchmark(OperationsPerInvoke = Max)] public async Task LoadAsync() { - for (int i = 0; i < max; ++i) + for (int i = 0; i < Max; ++i) { await db.StringSetAsync(i.ToString(), i).ConfigureAwait(false); } } - [Benchmark(OperationsPerInvoke = max)] + [Benchmark(OperationsPerInvoke = Max)] public void Sample() { var rnd = new Random(); - for (int i = 0; i < max; ++i) + for (int i = 0; i < Max; ++i) { - var r = rnd.Next(0, max - 1); + var r = rnd.Next(0, Max - 1); var rv = db.StringGet(r.ToString()); if (rv != r) @@ -262,14 +256,14 @@ public void Sample() } } - [Benchmark(OperationsPerInvoke = max)] + [Benchmark(OperationsPerInvoke = Max)] public async Task SampleAsync() { var rnd = new Random(); - for (int i = 0; i < max; ++i) + for (int i = 0; i < Max; ++i) { - var r = rnd.Next(0, max - 1); + var r = rnd.Next(0, Max - 1); var rv = await db.StringGetAsync(r.ToString()).ConfigureAwait(false); if (rv != r) diff --git a/tests/BasicTestBaseline/BasicTestBaseline.csproj b/tests/BasicTestBaseline/BasicTestBaseline.csproj index 474dd3e1b..a9f75e441 100644 --- a/tests/BasicTestBaseline/BasicTestBaseline.csproj +++ b/tests/BasicTestBaseline/BasicTestBaseline.csproj @@ -2,12 +2,11 @@ StackExchange.Redis.BasicTest .NET Core - net472;net5.0 + net472;net8.0 BasicTestBaseline Exe BasicTestBaseline - win7-x64 - false + $(DefineConstants);TEST_BASELINE @@ -17,7 +16,9 @@ - + + + diff --git a/tests/ConsoleTest/ConsoleTest.csproj b/tests/ConsoleTest/ConsoleTest.csproj new file mode 100644 index 000000000..b3c1fd998 --- /dev/null +++ b/tests/ConsoleTest/ConsoleTest.csproj @@ -0,0 +1,13 @@ + + + + net8.0 + Exe + enable + enable + + + + + + diff --git a/tests/ConsoleTest/Program.cs b/tests/ConsoleTest/Program.cs new file mode 100644 index 000000000..98d96f259 --- /dev/null +++ b/tests/ConsoleTest/Program.cs @@ -0,0 +1,188 @@ +using System.Diagnostics; +using System.Reflection; +using StackExchange.Redis; + +Stopwatch stopwatch = new Stopwatch(); +stopwatch.Start(); + +var options = ConfigurationOptions.Parse("127.0.0.1"); +#if !SEREDIS_BASELINE +options.HighIntegrity = false; // as needed +Console.WriteLine($"{nameof(options.HighIntegrity)}: {options.HighIntegrity}"); +#endif + +// options.SocketManager = SocketManager.ThreadPool; +Console.WriteLine("Connecting..."); +var connection = ConnectionMultiplexer.Connect(options); +Console.WriteLine("Connected"); +connection.ConnectionFailed += Connection_ConnectionFailed; + +void Connection_ConnectionFailed(object? sender, ConnectionFailedEventArgs e) +{ + Console.Error.WriteLine($"CONNECTION FAILED: {e.ConnectionType}, {e.FailureType}, {e.Exception}"); +} + +var startTime = DateTime.UtcNow; +var startCpuUsage = Process.GetCurrentProcess().TotalProcessorTime; + +var scenario = args?.Length > 0 ? args[0] : "mass-insert-async"; + +switch (scenario) +{ + case "parallel": + Console.WriteLine("Parallel task test..."); + ParallelTasks(connection); + break; + case "mass-insert": + Console.WriteLine("Mass insert test..."); + MassInsert(connection); + break; + case "mass-insert-async": + Console.WriteLine("Mass insert (async/pipelined) test..."); + await MassInsertAsync(connection); + break; + case "mass-publish": + Console.WriteLine("Mass publish test..."); + MassPublish(connection); + break; + default: + Console.WriteLine("Scenario " + scenario + " is not recognized"); + break; +} + +stopwatch.Stop(); + +Console.WriteLine(""); +Console.WriteLine($"Done. {stopwatch.ElapsedMilliseconds} ms"); + +var endTime = DateTime.UtcNow; +var endCpuUsage = Process.GetCurrentProcess().TotalProcessorTime; +var cpuUsedMs = (endCpuUsage - startCpuUsage).TotalMilliseconds; +var totalMsPassed = (endTime - startTime).TotalMilliseconds; +var cpuUsageTotal = cpuUsedMs / (Environment.ProcessorCount * totalMsPassed); +Console.WriteLine("Avg CPU: " + (cpuUsageTotal * 100)); +Console.WriteLine("Lib Version: " + GetLibVersion()); + +static void MassInsert(ConnectionMultiplexer connection) +{ + const int NUM_INSERTIONS = 100_000; + const int BATCH = 5000; + int matchErrors = 0; + + var database = connection.GetDatabase(0); + + for (int i = 0; i < NUM_INSERTIONS; i++) + { + var key = $"StackExchange.Redis.Test.{i}"; + var value = i.ToString(); + + database.StringSet(key, value); + var retrievedValue = database.StringGet(key); + + if (retrievedValue != value) + { + matchErrors++; + } + + if (i > 0 && i % BATCH == 0) + { + Console.WriteLine(i); + } + } + + Console.WriteLine($"Match errors: {matchErrors}"); +} + +static async Task MassInsertAsync(ConnectionMultiplexer connection) +{ + const int NUM_INSERTIONS = 100_000; + const int BATCH = 5000; + int matchErrors = 0; + + var database = connection.GetDatabase(0); + + var outstanding = new List<(Task, Task, string)>(BATCH); + + for (int i = 0; i < NUM_INSERTIONS; i++) + { + var key = $"StackExchange.Redis.Test.{i}"; + var value = i.ToString(); + + var set = database.StringSetAsync(key, value); + var get = database.StringGetAsync(key); + + outstanding.Add((set, get, value)); + + if (i > 0 && i % BATCH == 0) + { + matchErrors += await ValidateAsync(outstanding); + Console.WriteLine(i); + } + } + + matchErrors += await ValidateAsync(outstanding); + + Console.WriteLine($"Match errors: {matchErrors}"); + + static async Task ValidateAsync(List<(Task, Task, string)> outstanding) + { + int matchErrors = 0; + foreach (var row in outstanding) + { + var s = await row.Item2; + await row.Item1; + if (s != row.Item3) + { + matchErrors++; + } + } + outstanding.Clear(); + return matchErrors; + } +} + +static void ParallelTasks(ConnectionMultiplexer connection) +{ + static void ParallelRun(int taskId, ConnectionMultiplexer connection) + { + Console.Write($"{taskId} Started, "); + var database = connection.GetDatabase(0); + + for (int i = 0; i < 100000; i++) + { + database.StringSet(i.ToString(), i.ToString()); + } + + Console.Write($"{taskId} Insert completed, "); + + for (int i = 0; i < 100000; i++) + { + var result = database.StringGet(i.ToString()); + } + Console.Write($"{taskId} Completed, "); + } + + var taskList = new List(); + for (int i = 0; i < 10; i++) + { + var i1 = i; + var task = new Task(() => ParallelRun(i1, connection)); + task.Start(); + taskList.Add(task); + } + Task.WaitAll(taskList.ToArray()); +} + +static void MassPublish(ConnectionMultiplexer connection) +{ + var subscriber = connection.GetSubscriber(); + Parallel.For(0, 1000, _ => subscriber.Publish(new RedisChannel("cache-events:cache-testing", RedisChannel.PatternMode.Literal), "hey")); +} + +static string GetLibVersion() +{ + var assembly = typeof(ConnectionMultiplexer).Assembly; + return (Attribute.GetCustomAttribute(assembly, typeof(AssemblyFileVersionAttribute)) as AssemblyFileVersionAttribute)?.Version + ?? assembly.GetName().Version?.ToString() + ?? "Unknown"; +} diff --git a/tests/ConsoleTestBaseline/ConsoleTestBaseline.csproj b/tests/ConsoleTestBaseline/ConsoleTestBaseline.csproj new file mode 100644 index 000000000..1a6a7149d --- /dev/null +++ b/tests/ConsoleTestBaseline/ConsoleTestBaseline.csproj @@ -0,0 +1,18 @@ + + + + net8.0 + Exe + enable + enable + $(DefineConstants);SEREDIS_BASELINE + + + + + + + + + + diff --git a/tests/NRediSearch.Test/AssemblyInfo.cs b/tests/NRediSearch.Test/AssemblyInfo.cs deleted file mode 100644 index 6a1cf5931..000000000 --- a/tests/NRediSearch.Test/AssemblyInfo.cs +++ /dev/null @@ -1,15 +0,0 @@ -using System; -using Xunit; - -[assembly: CollectionBehavior(CollectionBehavior.CollectionPerAssembly, DisableTestParallelization = true)] - -namespace NRediSearch.Test -{ - - public class AssemblyInfo - { - public AssemblyInfo() - { - } - } -} diff --git a/tests/NRediSearch.Test/Attributes.cs b/tests/NRediSearch.Test/Attributes.cs deleted file mode 100644 index 4354b5ba8..000000000 --- a/tests/NRediSearch.Test/Attributes.cs +++ /dev/null @@ -1,7 +0,0 @@ -namespace NRediSearch.Test -{ - // This is only to make the namespace more-local and not need a using at the top of every test file that's easy to forget - public class FactAttribute : StackExchange.Redis.Tests.FactAttribute { } - - public class TheoryAttribute : StackExchange.Redis.Tests.TheoryAttribute { } -} diff --git a/tests/NRediSearch.Test/ClientTests/AggregationBuilderTests.cs b/tests/NRediSearch.Test/ClientTests/AggregationBuilderTests.cs deleted file mode 100644 index b9ad926f7..000000000 --- a/tests/NRediSearch.Test/ClientTests/AggregationBuilderTests.cs +++ /dev/null @@ -1,186 +0,0 @@ -using System.Threading; -using System.Threading.Tasks; -using NRediSearch.Aggregation; -using NRediSearch.Aggregation.Reducers; -using StackExchange.Redis; -using Xunit; -using Xunit.Abstractions; -using static NRediSearch.Client; - -namespace NRediSearch.Test.ClientTests -{ - public class AggregationBuilderTests : RediSearchTestBase - { - public AggregationBuilderTests(ITestOutputHelper output) : base(output) - { - } - - [Fact] - public void TestAggregations() - { - /* - 127.0.0.1:6379> FT.CREATE test_index SCHEMA name TEXT SORTABLE count NUMERIC SORTABLE - OK - 127.0.0.1:6379> FT.ADD test_index data1 1.0 FIELDS name abc count 10 - OK - 127.0.0.1:6379> FT.ADD test_index data2 1.0 FIELDS name def count 5 - OK - 127.0.0.1:6379> FT.ADD test_index data3 1.0 FIELDS name def count 25 - */ - - Client cl = GetClient(); - Schema sc = new Schema(); - - sc.AddSortableTextField("name", 1.0); - sc.AddSortableNumericField("count"); - cl.CreateIndex(sc, new ConfiguredIndexOptions()); - cl.AddDocument(new Document("data1").Set("name", "abc").Set("count", 10)); - cl.AddDocument(new Document("data2").Set("name", "def").Set("count", 5)); - cl.AddDocument(new Document("data3").Set("name", "def").Set("count", 25)); - - AggregationBuilder r = new AggregationBuilder() - .GroupBy("@name", Reducers.Sum("@count").As("sum")) - .SortBy(10, SortedField.Descending("@sum")); - - // actual search - AggregationResult res = cl.Aggregate(r); - Row? r1 = res.GetRow(0); - Assert.NotNull(r1); - Assert.Equal("def", r1.Value.GetString("name")); - Assert.Equal(30, r1.Value.GetInt64("sum")); - Assert.Equal(30.0, r1.Value.GetDouble("sum")); - - Assert.Equal(0L, r1.Value.GetInt64("nosuchcol")); - Assert.Equal(0.0, r1.Value.GetDouble("nosuchcol")); - Assert.Null(r1.Value.GetString("nosuchcol")); - - Row? r2 = res.GetRow(1); - - Assert.NotNull(r2); - Assert.Equal("abc", r2.Value.GetString("name")); - Assert.Equal(10L, r2.Value.GetInt64("sum")); - } - - [Fact] - public void TestApplyAndFilterAggregations() - { - /* - 127.0.0.1:6379> FT.CREATE test_index SCHEMA name TEXT SORTABLE subj1 NUMERIC SORTABLE subj2 NUMERIC SORTABLE - OK - 127.0.0.1:6379> FT.ADD test_index data1 1.0 FIELDS name abc subj1 20 subj2 70 - OK - 127.0.0.1:6379> FT.ADD test_index data2 1.0 FIELDS name def subj1 60 subj2 40 - OK - 127.0.0.1:6379> FT.ADD test_index data3 1.0 FIELDS name ghi subj1 50 subj2 80 - OK - 127.0.0.1:6379> FT.ADD test_index data1 1.0 FIELDS name abc subj1 30 subj2 20 - OK - 127.0.0.1:6379> FT.ADD test_index data2 1.0 FIELDS name def subj1 65 subj2 45 - OK - 127.0.0.1:6379> FT.ADD test_index data3 1.0 FIELDS name ghi subj1 70 subj2 70 - OK - */ - - Client cl = GetClient(); - Schema sc = new Schema(); - - sc.AddSortableTextField("name", 1.0); - sc.AddSortableNumericField("subj1"); - sc.AddSortableNumericField("subj2"); - cl.CreateIndex(sc, new ConfiguredIndexOptions()); - cl.AddDocument(new Document("data1").Set("name", "abc").Set("subj1", 20).Set("subj2", 70)); - cl.AddDocument(new Document("data2").Set("name", "def").Set("subj1", 60).Set("subj2", 40)); - cl.AddDocument(new Document("data3").Set("name", "ghi").Set("subj1", 50).Set("subj2", 80)); - cl.AddDocument(new Document("data4").Set("name", "abc").Set("subj1", 30).Set("subj2", 20)); - cl.AddDocument(new Document("data5").Set("name", "def").Set("subj1", 65).Set("subj2", 45)); - cl.AddDocument(new Document("data6").Set("name", "ghi").Set("subj1", 70).Set("subj2", 70)); - - AggregationBuilder r = new AggregationBuilder().Apply("(@subj1+@subj2)/2", "attemptavg") - .GroupBy("@name", Reducers.Avg("@attemptavg").As("avgscore")) - .Filter("@avgscore>=50") - .SortBy(10, SortedField.Ascending("@name")); - - // actual search - AggregationResult res = cl.Aggregate(r); - Row? r1 = res.GetRow(0); - Assert.NotNull(r1); - Assert.Equal("def", r1.Value.GetString("name")); - Assert.Equal(52.5, r1.Value.GetDouble("avgscore")); - - Row? r2 = res.GetRow(1); - Assert.NotNull(r2); - Assert.Equal("ghi", r2.Value.GetString("name")); - Assert.Equal(67.5, r2.Value.GetDouble("avgscore")); - } - - [Fact] - public async Task TestCursor() - { - /* - 127.0.0.1:6379> FT.CREATE test_index SCHEMA name TEXT SORTABLE count NUMERIC SORTABLE - OK - 127.0.0.1:6379> FT.ADD test_index data1 1.0 FIELDS name abc count 10 - OK - 127.0.0.1:6379> FT.ADD test_index data2 1.0 FIELDS name def count 5 - OK - 127.0.0.1:6379> FT.ADD test_index data3 1.0 FIELDS name def count 25 - */ - - Client cl = GetClient(); - Schema sc = new Schema(); - sc.AddSortableTextField("name", 1.0); - sc.AddSortableNumericField("count"); - cl.CreateIndex(sc, new ConfiguredIndexOptions()); - cl.AddDocument(new Document("data1").Set("name", "abc").Set("count", 10)); - cl.AddDocument(new Document("data2").Set("name", "def").Set("count", 5)); - cl.AddDocument(new Document("data3").Set("name", "def").Set("count", 25)); - - AggregationBuilder r = new AggregationBuilder() - .GroupBy("@name", Reducers.Sum("@count").As("sum")) - .SortBy(10, SortedField.Descending("@sum")) - .Cursor(1, 3000); - - // actual search - AggregationResult res = cl.Aggregate(r); - Row? row = res.GetRow(0); - Assert.NotNull(row); - Assert.Equal("def", row.Value.GetString("name")); - Assert.Equal(30, row.Value.GetInt64("sum")); - Assert.Equal(30.0, row.Value.GetDouble("sum")); - - Assert.Equal(0L, row.Value.GetInt64("nosuchcol")); - Assert.Equal(0.0, row.Value.GetDouble("nosuchcol")); - Assert.Null(row.Value.GetString("nosuchcol")); - - res = cl.CursorRead(res.CursorId, 1); - Row? row2 = res.GetRow(0); - - Assert.NotNull(row2); - Assert.Equal("abc", row2.Value.GetString("name")); - Assert.Equal(10, row2.Value.GetInt64("sum")); - - Assert.True(cl.CursorDelete(res.CursorId)); - - try - { - cl.CursorRead(res.CursorId, 1); - Assert.True(false); - } - catch (RedisException) { } - - AggregationBuilder r2 = new AggregationBuilder() - .GroupBy("@name", Reducers.Sum("@count").As("sum")) - .SortBy(10, SortedField.Descending("@sum")) - .Cursor(1, 1000); - - await Task.Delay(1000).ForAwait(); - - try - { - cl.CursorRead(res.CursorId, 1); - Assert.True(false); - } - catch (RedisException) { } - } - } -} diff --git a/tests/NRediSearch.Test/ClientTests/AggregationTest.cs b/tests/NRediSearch.Test/ClientTests/AggregationTest.cs deleted file mode 100644 index 84c629c19..000000000 --- a/tests/NRediSearch.Test/ClientTests/AggregationTest.cs +++ /dev/null @@ -1,54 +0,0 @@ -using System; -using NRediSearch.Aggregation; -using NRediSearch.Aggregation.Reducers; -using Xunit; -using Xunit.Abstractions; -using static NRediSearch.Client; - -namespace NRediSearch.Test.ClientTests -{ - public class AggregationTest : RediSearchTestBase - { - public AggregationTest(ITestOutputHelper output) : base(output) { } - - [Fact] - [Obsolete] - public void TestAggregations() - { - /* - 127.0.0.1:6379> FT.CREATE test_index SCHEMA name TEXT SORTABLE count NUMERIC SORTABLE - OK - 127.0.0.1:6379> FT.ADD test_index data1 1.0 FIELDS name abc count 10 - OK - 127.0.0.1:6379> FT.ADD test_index data2 1.0 FIELDS name def count 5 - OK - 127.0.0.1:6379> FT.ADD test_index data3 1.0 FIELDS name def count 25 - */ - - Client cl = GetClient(); - Schema sc = new Schema(); - sc.AddSortableTextField("name", 1.0); - sc.AddSortableNumericField("count"); - cl.CreateIndex(sc, new ConfiguredIndexOptions()); - cl.AddDocument(new Document("data1").Set("name", "abc").Set("count", 10)); - cl.AddDocument(new Document("data2").Set("name", "def").Set("count", 5)); - cl.AddDocument(new Document("data3").Set("name", "def").Set("count", 25)); - - AggregationRequest r = new AggregationRequest() - .GroupBy("@name", Reducers.Sum("@count").As("sum")) - .SortBy(SortedField.Descending("@sum"), 10); - - // actual search - AggregationResult res = cl.Aggregate(r); - var r1 = res.GetRow(0); - Assert.NotNull(r1); - Assert.Equal("def", r1.Value.GetString("name")); - Assert.Equal(30, r1.Value.GetInt64("sum")); - - var r2 = res.GetRow(1); - Assert.NotNull(r2); - Assert.Equal("abc", r2.Value.GetString("name")); - Assert.Equal(10, r2.Value.GetInt64("sum")); - } - } -} diff --git a/tests/NRediSearch.Test/ClientTests/ClientTest.cs b/tests/NRediSearch.Test/ClientTests/ClientTest.cs deleted file mode 100644 index 60134a473..000000000 --- a/tests/NRediSearch.Test/ClientTests/ClientTest.cs +++ /dev/null @@ -1,881 +0,0 @@ -using System.Collections.Generic; -using System.Text; -using StackExchange.Redis; -using Xunit; -using Xunit.Abstractions; -using static NRediSearch.Client; -using static NRediSearch.Schema; -using static NRediSearch.SuggestionOptions; - -namespace NRediSearch.Test.ClientTests -{ - public class ClientTest : RediSearchTestBase - { - public ClientTest(ITestOutputHelper output) : base(output) { } - - [Fact] - public void Search() - { - Client cl = GetClient(); - - Schema sc = new Schema().AddTextField("title", 1.0).AddTextField("body", 1.0); - - Assert.True(cl.CreateIndex(sc, new ConfiguredIndexOptions())); - var fields = new Dictionary - { - { "title", "hello world" }, - { "body", "lorem ipsum" } - }; - for (int i = 0; i < 100; i++) - { - Assert.True(cl.AddDocument($"doc{i}", fields, (double)i / 100.0)); - } - - SearchResult res = cl.Search(new Query("hello world") { WithScores = true }.Limit(0, 5)); - Assert.Equal(100, res.TotalResults); - Assert.Equal(5, res.Documents.Count); - foreach (var d in res.Documents) - { - Assert.StartsWith("doc", d.Id); - Assert.True(d.Score < 100); - //System.out.println(d); - } - - Assert.True(cl.DeleteDocument("doc0")); - Assert.False(cl.DeleteDocument("doc0")); - - res = cl.Search(new Query("hello world")); - Assert.Equal(99, res.TotalResults); - - Assert.True(cl.DropIndex()); - - var ex = Assert.Throws(() => cl.Search(new Query("hello world"))); - Output.WriteLine("Exception: " + ex.Message); - Assert.True(IsMissingIndexException(ex)); - } - - [Fact] - public void TestNumericFilter() - { - Client cl = GetClient(); - - Schema sc = new Schema().AddTextField("title", 1.0).AddNumericField("price"); - - Assert.True(cl.CreateIndex(sc, new ConfiguredIndexOptions())); - - for (int i = 0; i < 100; i++) - { - var fields = new Dictionary - { - { "title", "hello world" }, - { "price", i } - }; - Assert.True(cl.AddDocument($"doc{i}", fields)); - } - - SearchResult res = cl.Search(new Query("hello world"). - AddFilter(new Query.NumericFilter("price", 0, 49))); - Assert.Equal(50, res.TotalResults); - Assert.Equal(10, res.Documents.Count); - foreach (var d in res.Documents) - { - long price = (long)d["price"]; - Assert.True(price >= 0); - Assert.True(price <= 49); - } - - res = cl.Search(new Query("hello world"). - AddFilter(new Query.NumericFilter("price", 0, true, 49, true))); - Assert.Equal(48, res.TotalResults); - Assert.Equal(10, res.Documents.Count); - foreach (var d in res.Documents) - { - long price = (long)d["price"]; - Assert.True(price > 0); - Assert.True(price < 49); - } - res = cl.Search(new Query("hello world"). - AddFilter(new Query.NumericFilter("price", 50, 100))); - Assert.Equal(50, res.TotalResults); - Assert.Equal(10, res.Documents.Count); - foreach (var d in res.Documents) - { - long price = (long)d["price"]; - Assert.True(price >= 50); - Assert.True(price <= 100); - } - - res = cl.Search(new Query("hello world"). - AddFilter(new Query.NumericFilter("price", 20, double.PositiveInfinity))); - Assert.Equal(80, res.TotalResults); - Assert.Equal(10, res.Documents.Count); - - res = cl.Search(new Query("hello world"). - AddFilter(new Query.NumericFilter("price", double.NegativeInfinity, 10))); - Assert.Equal(11, res.TotalResults); - Assert.Equal(10, res.Documents.Count); - } - - [Fact] - public void TestStopwords() - { - Client cl = GetClient(); - - Schema sc = new Schema().AddTextField("title", 1.0); - - Assert.True(cl.CreateIndex(sc, new ConfiguredIndexOptions().SetStopwords("foo", "bar", "baz"))); - - var fields = new Dictionary - { - { "title", "hello world foo bar" } - }; - Assert.True(cl.AddDocument("doc1", fields)); - SearchResult res = cl.Search(new Query("hello world")); - Assert.Equal(1, res.TotalResults); - res = cl.Search(new Query("foo bar")); - Assert.Equal(0, res.TotalResults); - - Reset(cl); - - Assert.True(cl.CreateIndex(sc, new ConfiguredIndexOptions().SetNoStopwords())); - fields = new Dictionary - { - { "title", "hello world foo bar to be or not to be" } - }; - Assert.True(cl.AddDocument("doc1", fields)); - - Assert.Equal(1, cl.Search(new Query("hello world")).TotalResults); - Assert.Equal(1, cl.Search(new Query("foo bar")).TotalResults); - Assert.Equal(1, cl.Search(new Query("to be or not to be")).TotalResults); - } - - [Fact] - public void TestGeoFilter() - { - Client cl = GetClient(); - - Schema sc = new Schema().AddTextField("title", 1.0).AddGeoField("loc"); - - Assert.True(cl.CreateIndex(sc, new ConfiguredIndexOptions())); - var fields = new Dictionary - { - { "title", "hello world" }, - { "loc", "-0.441,51.458" } - }; - Assert.True(cl.AddDocument("doc1", fields)); - - fields["loc"] = "-0.1,51.2"; - Assert.True(cl.AddDocument("doc2", fields)); - - SearchResult res = cl.Search(new Query("hello world"). - AddFilter( - new Query.GeoFilter("loc", -0.44, 51.45, - 10, GeoUnit.Kilometers) - )); - - Assert.Equal(1, res.TotalResults); - res = cl.Search(new Query("hello world"). - AddFilter( - new Query.GeoFilter("loc", -0.44, 51.45, - 100, GeoUnit.Kilometers) - )); - Assert.Equal(2, res.TotalResults); - } - - [Fact] - public void TestPayloads() - { - Client cl = GetClient(); - - Schema sc = new Schema().AddTextField("title", 1.0); - - Assert.True(cl.CreateIndex(sc, new ConfiguredIndexOptions())); - - var fields = new Dictionary - { - { "title", "hello world" } - }; - const string payload = "foo bar"; - Assert.True(cl.AddDocument("doc1", fields, 1.0, false, false, Encoding.UTF8.GetBytes(payload))); - SearchResult res = cl.Search(new Query("hello world") { WithPayloads = true }); - Assert.Equal(1, res.TotalResults); - Assert.Single(res.Documents); - - Assert.Equal(payload, Encoding.UTF8.GetString(res.Documents[0].Payload)); - } - - [Fact] - public void TestQueryFlags() - { - Client cl = GetClient(); - - Schema sc = new Schema().AddTextField("title", 1.0); - - Assert.True(cl.CreateIndex(sc, new ConfiguredIndexOptions())); - var fields = new Dictionary(); - - for (int i = 0; i < 100; i++) - { - fields["title"] = i % 2 == 1 ? "hello worlds" : "hello world"; - Assert.True(cl.AddDocument($"doc{i}", fields, (double)i / 100.0)); - } - - Query q = new Query("hello").SetWithScores(); - SearchResult res = cl.Search(q); - - Assert.Equal(100, res.TotalResults); - Assert.Equal(10, res.Documents.Count); - - foreach (var d in res.Documents) - { - Assert.StartsWith("doc", d.Id); - Assert.True(d.Score != 1.0); - Assert.StartsWith("hello world", d["title"]); - } - - q = new Query("hello").SetNoContent(); - res = cl.Search(q); - foreach (var d in res.Documents) - { - Assert.StartsWith("doc", d.Id); - Assert.True(d.Score == 1.0); - Assert.True(d["title"].IsNull); - } - - // test verbatim vs. stemming - res = cl.Search(new Query("hello worlds")); - Assert.Equal(100, res.TotalResults); - res = cl.Search(new Query("hello worlds").SetVerbatim()); - Assert.Equal(50, res.TotalResults); - - res = cl.Search(new Query("hello a world").SetVerbatim()); - Assert.Equal(50, res.TotalResults); - res = cl.Search(new Query("hello a worlds").SetVerbatim()); - Assert.Equal(50, res.TotalResults); - res = cl.Search(new Query("hello a world").SetVerbatim().SetNoStopwords()); - Assert.Equal(0, res.TotalResults); - } - - [Fact] - public void TestSortQueryFlags() - { - Client cl = GetClient(); - Schema sc = new Schema().AddSortableTextField("title", 1.0); - - Assert.True(cl.CreateIndex(sc, new ConfiguredIndexOptions())); - var fields = new Dictionary - { - ["title"] = "b title" - }; - cl.AddDocument("doc1", fields, 1.0, false, true, null); - - fields["title"] = "a title"; - cl.AddDocument("doc2", fields, 1.0, false, true, null); - - fields["title"] = "c title"; - cl.AddDocument("doc3", fields, 1.0, false, true, null); - - Query q = new Query("title").SetSortBy("title", true); - SearchResult res = cl.Search(q); - - Assert.Equal(3, res.TotalResults); - Document doc1 = res.Documents[0]; - Assert.Equal("a title", doc1["title"]); - - doc1 = res.Documents[1]; - Assert.Equal("b title", doc1["title"]); - - doc1 = res.Documents[2]; - Assert.Equal("c title", doc1["title"]); - } - - [Fact] - public void TestIndexDefinition() - { - Client cl = GetClient(); - Schema sc = new Schema().AddTextField("title", 1.0); - ConfiguredIndexOptions options = new ConfiguredIndexOptions( - new IndexDefinition( prefixes: new string[]{cl.IndexName})); - Assert.True(cl.CreateIndex(sc, options)); - - RedisKey hashKey = (string)cl.IndexName + ":foo"; - Db.KeyDelete(hashKey); - Db.HashSet(hashKey, "title", "hello world"); - - try - { -#pragma warning disable 0618 - Assert.True(cl.AddHash(hashKey, 1, false)); -#pragma warning restore 0618 - } - catch (RedisServerException e) - { - Assert.StartsWith("ERR unknown command `FT.ADDHASH`", e.Message); - return; // Starting from RediSearch 2.0 this command is not supported anymore - } - SearchResult res = cl.Search(new Query("hello world").SetVerbatim()); - Assert.Equal(1, res.TotalResults); - Assert.Equal(hashKey, res.Documents[0].Id); - } - - [Fact] - public void TestDrop() - { - Client cl = GetClient(); - - Schema sc = new Schema().AddTextField("title", 1.0); - - Assert.True(cl.CreateIndex(sc, new ConfiguredIndexOptions())); - var fields = new Dictionary - { - { "title", "hello world" } - }; - for (int i = 0; i < 100; i++) - { - Assert.True(cl.AddDocument($"doc{i}", fields)); - } - - SearchResult res = cl.Search(new Query("hello world")); - Assert.Equal(100, res.TotalResults); - - var key = (string)Db.KeyRandom(); - Output.WriteLine("Found key: " + key); - Assert.NotNull(key); - - Reset(cl); - - var indexExists = Db.KeyExists(cl.IndexName); - Assert.False(indexExists); - } - - [Fact] - public void TestAlterAdd() - { - Client cl = GetClient(); - - Schema sc = new Schema().AddTextField("title", 1.0); - - Assert.True(cl.CreateIndex(sc, new ConfiguredIndexOptions())); - var fields = new Dictionary - { - { "title", "hello world" } - }; - for (int i = 0; i < 100; i++) - { - Assert.True(cl.AddDocument($"doc{i}", fields)); - } - - SearchResult res = cl.Search(new Query("hello world")); - Assert.Equal(100, res.TotalResults); - - Assert.True(cl.AlterIndex(new TagField("tags", ","), new TextField("name", 0.5))); - for (int i = 0; i < 100; i++) - { - var fields2 = new Dictionary(); - fields2.Add("name", $"name{i}"); - fields2.Add("tags", $"tagA,tagB,tag{i}"); - Assert.True(cl.UpdateDocument($"doc{i}", fields2, 1.0)); - } - SearchResult res2 = cl.Search(new Query("@tags:{tagA}")); - Assert.Equal(100, res2.TotalResults); - - var info = cl.GetInfoParsed(); - Assert.Equal(cl.IndexName, info.IndexName); - - Assert.True(info.Fields.ContainsKey("tags")); - Assert.Equal("TAG", (string)info.Fields["tags"][2]); - } - - [Fact] - public void TestNoStem() - { - Client cl = GetClient(); - - Schema sc = new Schema().AddTextField("stemmed", 1.0).AddField(new TextField("notStemmed", 1.0, false, true)); - Assert.True(cl.CreateIndex(sc, new ConfiguredIndexOptions())); - - var doc = new Dictionary - { - { "stemmed", "located" }, - { "notStemmed", "located" } - }; - // Store it - Assert.True(cl.AddDocument("doc", doc)); - - // Query - SearchResult res = cl.Search(new Query("@stemmed:location")); - Assert.Equal(1, res.TotalResults); - - res = cl.Search(new Query("@notStemmed:location")); - Assert.Equal(0, res.TotalResults); - } - - [Fact] - public void TestInfoParsed() - { - Client cl = GetClient(); - - Schema sc = new Schema().AddTextField("title", 1.0); - Assert.True(cl.CreateIndex(sc, new ConfiguredIndexOptions())); - - var info = cl.GetInfoParsed(); - Assert.Equal(cl.IndexName, info.IndexName); - } - - [Fact] - public void TestInfo() - { - Client cl = GetClient(); - - Schema sc = new Schema().AddTextField("title", 1.0); - Assert.True(cl.CreateIndex(sc, new ConfiguredIndexOptions())); - - var info = cl.GetInfo(); - Assert.Equal(cl.IndexName, info["index_name"]); - } - - [Fact] - public void TestNoIndex() - { - Client cl = GetClient(); - - Schema sc = new Schema() - .AddField(new TextField("f1", 1.0, true, false, true)) - .AddField(new TextField("f2", 1.0)); - cl.CreateIndex(sc, new ConfiguredIndexOptions()); - - var mm = new Dictionary - { - { "f1", "MarkZZ" }, - { "f2", "MarkZZ" } - }; - cl.AddDocument("doc1", mm); - - mm.Clear(); - mm.Add("f1", "MarkAA"); - mm.Add("f2", "MarkBB"); - cl.AddDocument("doc2", mm); - - SearchResult res = cl.Search(new Query("@f1:Mark*")); - Assert.Equal(0, res.TotalResults); - - res = cl.Search(new Query("@f2:Mark*")); - Assert.Equal(2, res.TotalResults); - - res = cl.Search(new Query("@f2:Mark*").SetSortBy("f1", false)); - Assert.Equal(2, res.TotalResults); - - Assert.Equal("doc1", res.Documents[0].Id); - - res = cl.Search(new Query("@f2:Mark*").SetSortBy("f1", true)); - Assert.Equal("doc2", res.Documents[0].Id); - } - - [Fact] - public void TestReplacePartial() - { - Client cl = GetClient(); - - Schema sc = new Schema() - .AddTextField("f1", 1.0) - .AddTextField("f2", 1.0) - .AddTextField("f3", 1.0); - cl.CreateIndex(sc, new ConfiguredIndexOptions()); - - var mm = new Dictionary - { - { "f1", "f1_val" }, - { "f2", "f2_val" } - }; - - cl.AddDocument("doc1", mm); - cl.AddDocument("doc2", mm); - - mm.Clear(); - mm.Add("f3", "f3_val"); - - cl.UpdateDocument("doc1", mm, 1.0); - cl.ReplaceDocument("doc2", mm, 1.0); - - // Search for f3 value. All documents should have it. - SearchResult res = cl.Search(new Query("@f3:f3_Val")); - Assert.Equal(2, res.TotalResults); - - res = cl.Search(new Query("@f3:f3_val @f2:f2_val @f1:f1_val")); - Assert.Equal(1, res.TotalResults); - } - - [Fact] - public void TestExplain() - { - Client cl = GetClient(); - - Schema sc = new Schema() - .AddTextField("f1", 1.0) - .AddTextField("f2", 1.0) - .AddTextField("f3", 1.0); - cl.CreateIndex(sc, new ConfiguredIndexOptions()); - - var res = cl.Explain(new Query("@f3:f3_val @f2:f2_val @f1:f1_val")); - Assert.NotNull(res); - Assert.False(res.Length == 0); - Output.WriteLine(res); - } - - [Fact] - public void TestHighlightSummarize() - { - Client cl = GetClient(); - Schema sc = new Schema().AddTextField("text", 1.0); - cl.CreateIndex(sc, new ConfiguredIndexOptions()); - - var doc = new Dictionary - { - { "text", "Redis is often referred as a data structures server. What this means is that Redis provides access to mutable data structures via a set of commands, which are sent using a server-client model with TCP sockets and a simple protocol. So different processes can query and modify the same data structures in a shared way" } - }; - // Add a document - cl.AddDocument("foo", doc, 1.0); - Query q = new Query("data").HighlightFields().SummarizeFields(); - SearchResult res = cl.Search(q); - - Assert.Equal("is often referred as a data structures server. What this means is that Redis provides... What this means is that Redis provides access to mutable data structures via a set of commands, which are sent using a... So different processes can query and modify the same data structures in a shared... ", - res.Documents[0]["text"]); - - q = new Query("data").HighlightFields(new Query.HighlightTags("", "")).SummarizeFields(); - res = cl.Search(q); - - Assert.Equal("is often referred as a data structures server. What this means is that Redis provides... What this means is that Redis provides access to mutable data structures via a set of commands, which are sent using a... So different processes can query and modify the same data structures in a shared... ", - res.Documents[0]["text"]); - } - - [Fact] - public void TestLanguage() - { - Client cl = GetClient(); - Schema sc = new Schema().AddTextField("text", 1.0); - cl.CreateIndex(sc, new ConfiguredIndexOptions()); - - Document d = new Document("doc1").Set("text", "hello"); - AddOptions options = new AddOptions().SetLanguage("spanish"); - Assert.True(cl.AddDocument(d, options)); - - options.SetLanguage("ybreski"); - cl.DeleteDocument(d.Id); - - var ex = Assert.Throws(() => cl.AddDocument(d, options)); - Assert.Equal("Unsupported language", ex.Message, ignoreCase: true); - } - - [Fact] - public void TestDropMissing() - { - Client cl = GetClient(); - var ex = Assert.Throws(() => cl.DropIndex()); - Assert.True(IsMissingIndexException(ex)); - } - - [Fact] - public void TestGet() - { - Client cl = GetClient(); - cl.CreateIndex(new Schema().AddTextField("txt1", 1.0), new ConfiguredIndexOptions()); - cl.AddDocument(new Document("doc1").Set("txt1", "Hello World!"), new AddOptions()); - Document d = cl.GetDocument("doc1"); - Assert.NotNull(d); - Assert.Equal("Hello World!", d["txt1"]); - - // Get something that does not exist. Shouldn't explode - Assert.Null(cl.GetDocument("nonexist")); - } - - [Fact] - public void TestMGet() - { - Client cl = GetClient(); - - cl.CreateIndex(new Schema().AddTextField("txt1", 1.0), new ConfiguredIndexOptions()); - cl.AddDocument(new Document("doc1").Set("txt1", "Hello World!1"), new AddOptions()); - cl.AddDocument(new Document("doc2").Set("txt1", "Hello World!2"), new AddOptions()); - cl.AddDocument(new Document("doc3").Set("txt1", "Hello World!3"), new AddOptions()); - - var docs = cl.GetDocuments(); - Assert.Empty(docs); - - docs = cl.GetDocuments("doc1", "doc3", "doc4"); - Assert.Equal(3, docs.Length); - Assert.Equal("Hello World!1", docs[0]["txt1"]); - Assert.Equal("Hello World!3", docs[1]["txt1"]); - Assert.Null(docs[2]); - } - - [Fact] - public void TestAddSuggestionGetSuggestionFuzzy() - { - Client cl = GetClient(); - Suggestion suggestion = Suggestion.Builder.String("TOPIC OF WORDS").Score(1).Build(); - // test can add a suggestion string - Assert.True(cl.AddSuggestion(suggestion, true) > 0, $"{suggestion} insert should of returned at least 1"); - // test that the partial part of that string will be returned using fuzzy - - //Assert.Equal(suggestion.ToString() + " suppose to be returned", suggestion, cl.GetSuggestion(suggestion.String.Substring(0, 3), SuggestionOptions.GetBuilder().Build()).get(0)); - Assert.Equal(suggestion.ToString(), cl.GetSuggestions(suggestion.String.Substring(0, 3), SuggestionOptions.Builder.Build())[0].ToString()); - } - - [Fact] - public void TestAddSuggestionGetSuggestion() - { - Client cl = GetClient(); - Suggestion suggestion = Suggestion.Builder.String("ANOTHER_WORD").Score(1).Build(); - Suggestion noMatch = Suggestion.Builder.String("_WORD MISSED").Score(1).Build(); - - Assert.True(cl.AddSuggestion(suggestion, false) > 0, $"{suggestion} should of inserted at least 1"); - Assert.True(cl.AddSuggestion(noMatch, false) > 0, $"{noMatch} should of inserted at least 1"); - - // test that with a partial part of that string will have the entire word returned SuggestionOptions.builder().build() - Assert.Single(cl.GetSuggestions(suggestion.String.Substring(0, 3), SuggestionOptions.Builder.Fuzzy().Build())); - - // turn off fuzzy start at second word no hit - Assert.Empty(cl.GetSuggestions(noMatch.String.Substring(1, 6), SuggestionOptions.Builder.Build())); - // my attempt to trigger the fuzzy by 1 character - Assert.Single(cl.GetSuggestions(noMatch.String.Substring(1, 6), SuggestionOptions.Builder.Fuzzy().Build())); - } - - [Fact] - public void TestAddSuggestionGetSuggestionPayloadScores() - { - Client cl = GetClient(); - - Suggestion suggestion = Suggestion.Builder.String("COUNT_ME TOO").Payload("PAYLOADS ROCK ").Score(0.2).Build(); - Assert.True(cl.AddSuggestion(suggestion, false) > 0, $"{suggestion} insert should of at least returned 1"); - Assert.True(cl.AddSuggestion(suggestion.ToBuilder().String("COUNT").Payload("My PAYLOAD is better").Build(), false) > 1, "Count single added should return more than 1"); - Assert.True(cl.AddSuggestion(suggestion.ToBuilder().String("COUNT_ANOTHER").Score(1).Payload(null).Build(), false) > 1, "Count single added should return more than 1"); - - Suggestion noScoreOrPayload = Suggestion.Builder.String("COUNT NO PAYLOAD OR COUNT").Build(); - Assert.True(cl.AddSuggestion(noScoreOrPayload, true) > 1, "Count single added should return more than 1"); - - var payloads = cl.GetSuggestions(suggestion.String.Substring(0, 3), SuggestionOptions.Builder.With(WithOptions.PayloadsAndScores).Build()); - Assert.Equal(4, payloads.Length); - Assert.True(payloads[2].Payload.Length > 0); - Assert.True(payloads[1].Score < .299, "Actual score: " + payloads[1].Score); - } - - [Fact] - public void TestAddSuggestionGetSuggestionPayload() - { - Client cl = GetClient(); - cl.AddSuggestion(Suggestion.Builder.String("COUNT_ME TOO").Payload("PAYLOADS ROCK ").Build(), false); - cl.AddSuggestion(Suggestion.Builder.String("COUNT").Payload("ANOTHER PAYLOAD ").Build(), false); - cl.AddSuggestion(Suggestion.Builder.String("COUNTNO PAYLOAD OR COUNT").Build(), false); - - // test that with a partial part of that string will have the entire word returned - var payloads = cl.GetSuggestions("COU", SuggestionOptions.Builder.Max(3).Fuzzy().With(WithOptions.Payloads).Build()); - Assert.Equal(3, payloads.Length); - } - - [Fact] - public void TestGetSuggestionNoPayloadTwoOnly() - { - Client cl = GetClient(); - - cl.AddSuggestion(Suggestion.Builder.String("DIFF_WORD").Score(0.4).Payload("PAYLOADS ROCK ").Build(), false); - cl.AddSuggestion(Suggestion.Builder.String("DIFF wording").Score(0.5).Payload("ANOTHER PAYLOAD ").Build(), false); - cl.AddSuggestion(Suggestion.Builder.String("DIFFERENT").Score(0.7).Payload("I am a payload").Build(), false); - - var payloads = cl.GetSuggestions("DIF", SuggestionOptions.Builder.Max(2).Build()); - Assert.Equal(2, payloads.Length); - - var three = cl.GetSuggestions("DIF", SuggestionOptions.Builder.Max(3).Build()); - Assert.Equal(3, three.Length); - } - - [Fact] - public void TestGetSuggestionsAsStringArray() - { - Client cl = GetClient(); - - cl.AddSuggestion(Suggestion.Builder.String("DIFF_WORD").Score(0.4).Payload("PAYLOADS ROCK ").Build(), false); - cl.AddSuggestion(Suggestion.Builder.String("DIFF wording").Score(0.5).Payload("ANOTHER PAYLOAD ").Build(), false); - cl.AddSuggestion(Suggestion.Builder.String("DIFFERENT").Score(0.7).Payload("I am a payload").Build(), false); - - var payloads = cl.GetSuggestions("DIF", max: 2); - Assert.Equal(2, payloads.Length); - - var three = cl.GetSuggestions("DIF", max: 3); - Assert.Equal(3, three.Length); - } - - [Fact] - public void TestGetSuggestionWithScore() - { - Client cl = GetClient(); - - cl.AddSuggestion(Suggestion.Builder.String("DIFF_WORD").Score(0.4).Payload("PAYLOADS ROCK ").Build(), true); - var list = cl.GetSuggestions("DIF", SuggestionOptions.Builder.Max(2).With(WithOptions.Scores).Build()); - Assert.True(list[0].Score <= .2, "Actual score: " + list[0].Score); - } - - [Fact] - public void TestGetSuggestionAllNoHit() - { - Client cl = GetClient(); - - cl.AddSuggestion(Suggestion.Builder.String("NO WORD").Score(0.4).Build(), false); - - var none = cl.GetSuggestions("DIF", SuggestionOptions.Builder.Max(3).With(WithOptions.Scores).Build()); - Assert.Empty(none); - } - - [Fact] - public void TestGetTagField() - { - Client cl = GetClient(); - Schema sc = new Schema() - .AddTextField("title", 1.0) - .AddTagField("category"); - - Assert.True(cl.CreateIndex(sc, new ConfiguredIndexOptions())); - - var search = cl.Search(new Query("hello")); - Output.WriteLine("Initial search: " + search.TotalResults); - Assert.Equal(0, search.TotalResults); - - var fields1 = new Dictionary(); - fields1.Add("title", "hello world"); - fields1.Add("category", "red"); - Assert.True(cl.AddDocument("foo", fields1)); - var fields2 = new Dictionary(); - fields2.Add("title", "hello world"); - fields2.Add("category", "blue"); - Assert.True(cl.AddDocument("bar", fields2)); - var fields3 = new Dictionary(); - fields3.Add("title", "hello world"); - fields3.Add("category", "green,yellow"); - Assert.True(cl.AddDocument("baz", fields3)); - var fields4 = new Dictionary(); - fields4.Add("title", "hello world"); - fields4.Add("category", "orange;purple"); - Assert.True(cl.AddDocument("qux", fields4)); - - Assert.Equal(1, cl.Search(new Query("@category:{red}")).TotalResults); - Assert.Equal(1, cl.Search(new Query("@category:{blue}")).TotalResults); - Assert.Equal(1, cl.Search(new Query("hello @category:{red}")).TotalResults); - Assert.Equal(1, cl.Search(new Query("hello @category:{blue}")).TotalResults); - Assert.Equal(1, cl.Search(new Query("@category:{yellow}")).TotalResults); - Assert.Equal(0, cl.Search(new Query("@category:{purple}")).TotalResults); - Assert.Equal(1, cl.Search(new Query("@category:{orange\\;purple}")).TotalResults); - search = cl.Search(new Query("hello")); - Output.WriteLine("Post-search: " + search.TotalResults); - foreach (var doc in search.Documents) - { - Output.WriteLine("Found: " + doc.Id); - } - Assert.Equal(4, search.TotalResults); - } - - [Fact] - public void TestGetTagFieldWithNonDefaultSeparator() - { - Client cl = GetClient(); - Schema sc = new Schema() - .AddTextField("title", 1.0) - .AddTagField("category", ";"); - - Assert.True(cl.CreateIndex(sc, new ConfiguredIndexOptions())); - var fields1 = new Dictionary(); - fields1.Add("title", "hello world"); - fields1.Add("category", "red"); - Assert.True(cl.AddDocument("foo", fields1)); - var fields2 = new Dictionary(); - fields2.Add("title", "hello world"); - fields2.Add("category", "blue"); - Assert.True(cl.AddDocument("bar", fields2)); - var fields3 = new Dictionary(); - fields3.Add("title", "hello world"); - fields3.Add("category", "green;yellow"); - Assert.True(cl.AddDocument("baz", fields3)); - var fields4 = new Dictionary(); - fields4.Add("title", "hello world"); - fields4.Add("category", "orange,purple"); - Assert.True(cl.AddDocument("qux", fields4)); - - Assert.Equal(1, cl.Search(new Query("@category:{red}")).TotalResults); - Assert.Equal(1, cl.Search(new Query("@category:{blue}")).TotalResults); - Assert.Equal(1, cl.Search(new Query("hello @category:{red}")).TotalResults); - Assert.Equal(1, cl.Search(new Query("hello @category:{blue}")).TotalResults); - Assert.Equal(1, cl.Search(new Query("hello @category:{yellow}")).TotalResults); - Assert.Equal(0, cl.Search(new Query("@category:{purple}")).TotalResults); - Assert.Equal(1, cl.Search(new Query("@category:{orange\\,purple}")).TotalResults); - Assert.Equal(4, cl.Search(new Query("hello")).TotalResults); - } - - [Fact] - public void TestMultiDocuments() - { - Client cl = GetClient(); - Schema sc = new Schema().AddTextField("title", 1.0).AddTextField("body", 1.0); - - Assert.True(cl.CreateIndex(sc, new ConfiguredIndexOptions())); - - var fields = new Dictionary(); - fields.Add("title", "hello world"); - fields.Add("body", "lorem ipsum"); - - var results = cl.AddDocuments(new Document("doc1", fields), new Document("doc2", fields), new Document("doc3", fields)); - - Assert.Equal(new[] { true, true, true }, results); - - Assert.Equal(3, cl.Search(new Query("hello world")).TotalResults); - - results = cl.AddDocuments(new Document("doc4", fields), new Document("doc2", fields), new Document("doc5", fields)); - Assert.Equal(new[] { true, false, true }, results); - - results = cl.DeleteDocuments(true, "doc1", "doc2", "doc36"); - Assert.Equal(new[] { true, true, false }, results); - } - - [Fact] - public void TestReturnFields() - { - Client cl = GetClient(); - - Schema sc = new Schema().AddTextField("field1", 1.0).AddTextField("field2", 1.0); - Assert.True(cl.CreateIndex(sc, new ConfiguredIndexOptions())); - - - var doc = new Dictionary(); - doc.Add("field1", "value1"); - doc.Add("field2", "value2"); - // Store it - Assert.True(cl.AddDocument("doc", doc)); - - // Query - SearchResult res = cl.Search(new Query("*").ReturnFields("field1")); - Assert.Equal(1, res.TotalResults); - Assert.Equal("value1", res.Documents[0]["field1"]); - Assert.Null((string)res.Documents[0]["field2"]); - } - - [Fact] - public void TestInKeys() - { - Client cl = GetClient(); - Schema sc = new Schema().AddTextField("field1", 1.0).AddTextField("field2", 1.0); - Assert.True(cl.CreateIndex(sc, new ConfiguredIndexOptions())); - - var doc = new Dictionary(); - doc.Add("field1", "value"); - doc.Add("field2", "not"); - - // Store it - Assert.True(cl.AddDocument("doc1", doc)); - Assert.True(cl.AddDocument("doc2", doc)); - - // Query - SearchResult res = cl.Search(new Query("value").LimitKeys("doc1")); - Assert.Equal(1, res.TotalResults); - Assert.Equal("doc1", res.Documents[0].Id); - Assert.Equal("value", res.Documents[0]["field1"]); - Assert.Null((string)res.Documents[0]["value"]); - } - } -} diff --git a/tests/NRediSearch.Test/ExampleUsage.cs b/tests/NRediSearch.Test/ExampleUsage.cs deleted file mode 100644 index ad1ae7270..000000000 --- a/tests/NRediSearch.Test/ExampleUsage.cs +++ /dev/null @@ -1,191 +0,0 @@ -using System.Collections.Generic; -using System.Linq; -using StackExchange.Redis; -using Xunit; -using Xunit.Abstractions; -using static NRediSearch.Client; - -namespace NRediSearch.Test -{ - public class ExampleUsage : RediSearchTestBase - { - public ExampleUsage(ITestOutputHelper output) : base(output) { } - - [Fact] - public void BasicUsage() - { - var client = GetClient(); - - try { client.DropIndex(); } catch { /* Intentionally ignored */ } // reset DB - - // Defining a schema for an index and creating it: - var sc = new Schema() - .AddTextField("title", 5.0) - .AddTextField("body", 1.0) - .AddNumericField("price"); - - bool result = false; - try - { - result = client.CreateIndex(sc, new ConfiguredIndexOptions()); - } - catch (RedisServerException ex) - { - // TODO: Convert to Skip - if (ex.Message == "ERR unknown command 'FT.CREATE'") - { - Output.WriteLine(ex.Message); - Output.WriteLine("Module not installed, aborting"); - } - throw; - } - - Assert.True(result); - - // note: using java API equivalent here; it would be nice to - // use meta-programming / reflection instead in .NET - - // Adding documents to the index: - var fields = new Dictionary - { - ["title"] = "hello world", - ["body"] = "lorem ipsum", - ["price"] = 1337 - }; - - Assert.True(client.AddDocument("doc1", fields)); - - // Creating a complex query - var q = new Query("hello world") - .AddFilter(new Query.NumericFilter("price", 1300, 1350)) - .Limit(0, 5); - - // actual search - var res = client.Search(q); - - Assert.Equal(1, res.TotalResults); - var item = res.Documents.Single(); - Assert.Equal("doc1", item.Id); - - Assert.True(item.HasProperty("title")); - Assert.True(item.HasProperty("body")); - Assert.True(item.HasProperty("price")); - Assert.False(item.HasProperty("blap")); - - Assert.Equal("hello world", item["title"]); - Assert.Equal("lorem ipsum", item["body"]); - Assert.Equal(1337, (int)item["price"]); - } - - [Fact] - public void BasicScoringUsage() - { - var client = GetClient(); - - try { client.DropIndex(); } catch { /* Intentionally ignored */ } // reset DB - - CreateSchema(client); - - var term = "petit*"; - - var query = new Query(term); - query.Limit(0, 10); - query.WithScores = true; - - var searchResult = client.Search(query); - - var docResult = searchResult.Documents.FirstOrDefault(); - - Assert.Equal(1, searchResult.TotalResults); - Assert.NotEqual(0, docResult.Score); - Assert.Equal("1", docResult.Id); - Assert.Null(docResult.ScoreExplained); - } - - [Fact] - public void BasicScoringUsageWithExplainScore() - { - var client = GetClient(); - - try { client.DropIndex(); } catch { /* Intentionally ignored */ } // reset DB - - CreateSchema(client); - - var term = "petit*"; - - var query = new Query(term); - query.Limit(0, 10); - query.WithScores = true; - query.Scoring = "TFIDF"; - query.ExplainScore = true; - - var searchResult = client.Search(query); - - var docResult = searchResult.Documents.FirstOrDefault(); - - Assert.Equal(1, searchResult.TotalResults); - Assert.NotEqual(0, docResult.Score); - Assert.Equal("1", docResult.Id); - Assert.NotEmpty(docResult.ScoreExplained); - Assert.Equal("Final TFIDF : words TFIDF 1.00 * document score 1.00 / norm 2 / slop 1", docResult.ScoreExplained[0]); - Assert.Equal("(Weight 1.00 * total children TFIDF 1.00)", docResult.ScoreExplained[1]); - Assert.Equal("(TFIDF 1.00 = Weight 1.00 * TF 1 * IDF 1.00)", docResult.ScoreExplained[2]); - } - - [Fact] - public void BasicScoringUsageWithExplainScoreDifferentScorer() - { - var client = GetClient(); - - try { client.DropIndex(); } catch { /* Intentionally ignored */ } // reset DB - - CreateSchema(client); - - var term = "petit*"; - - var query = new Query(term); - query.Limit(0, 10); - query.WithScores = true; - query.Scoring = "TFIDF.DOCNORM"; - query.ExplainScore = true; - - var searchResult = client.Search(query); - - var docResult = searchResult.Documents.FirstOrDefault(); - - Assert.Equal(1, searchResult.TotalResults); - Assert.NotEqual(0, docResult.Score); - Assert.Equal("1", docResult.Id); - Assert.NotEmpty(docResult.ScoreExplained); - Assert.Equal("Final TFIDF : words TFIDF 1.00 * document score 1.00 / norm 20 / slop 1", docResult.ScoreExplained[0]); - Assert.Equal("(Weight 1.00 * total children TFIDF 1.00)", docResult.ScoreExplained[1]); - Assert.Equal("(TFIDF 1.00 = Weight 1.00 * TF 1 * IDF 1.00)", docResult.ScoreExplained[2]); - } - - private void CreateSchema(Client client) - { - var schema = new Schema(); - - schema - .AddSortableTextField("title") - .AddTextField("country") - .AddTextField("author") - .AddTextField("aka") - .AddTagField("language"); - - client.CreateIndex(schema, new ConfiguredIndexOptions()); - - var doc = new Document("1"); - - doc - .Set("title", "Le Petit Prince") - .Set("country", "France") - .Set("author", "Antoine de Saint-Exupéry") - .Set("language", "fr_FR") - .Set("aka", "The Little Prince, El Principito"); - - client.AddDocument(doc); - } - } -} - diff --git a/tests/NRediSearch.Test/Issues/Issue940.cs b/tests/NRediSearch.Test/Issues/Issue940.cs deleted file mode 100644 index df68f67a4..000000000 --- a/tests/NRediSearch.Test/Issues/Issue940.cs +++ /dev/null @@ -1,17 +0,0 @@ -using Xunit; - -namespace NRediSearch.Test -{ - public class Issue940 - { - [Fact] - public void Paging_Boxing() - { - for(int i = -20; i < 100; i++) - { - var boxed = i.Boxed(); - Assert.Equal(i, (int)boxed); - } - } - } -} diff --git a/tests/NRediSearch.Test/NRediSearch.Test.csproj b/tests/NRediSearch.Test/NRediSearch.Test.csproj deleted file mode 100644 index 50512a200..000000000 --- a/tests/NRediSearch.Test/NRediSearch.Test.csproj +++ /dev/null @@ -1,14 +0,0 @@ - - - Library - netcoreapp3.1 - true - true - - - - - - - - \ No newline at end of file diff --git a/tests/NRediSearch.Test/QueryBuilder/BuilderTest.cs b/tests/NRediSearch.Test/QueryBuilder/BuilderTest.cs deleted file mode 100644 index cd15c136b..000000000 --- a/tests/NRediSearch.Test/QueryBuilder/BuilderTest.cs +++ /dev/null @@ -1,140 +0,0 @@ -using System; -using System.Collections.Generic; -using NRediSearch.Aggregation; -using NRediSearch.QueryBuilder; -using StackExchange.Redis; -using Xunit; -using Xunit.Abstractions; -using static NRediSearch.Aggregation.Reducers.Reducers; -using static NRediSearch.Aggregation.SortedField; -using static NRediSearch.QueryBuilder.QueryBuilder; -using static NRediSearch.QueryBuilder.Values; - -namespace NRediSearch.Test.QueryBuilder -{ - public class BuilderTest : RediSearchTestBase - { - public BuilderTest(ITestOutputHelper output) : base(output) { } - - [Fact] - public void TestTag() - { - Value v = Tags("foo"); - Assert.Equal("{foo}", v.ToString()); - v = Tags("foo", "bar"); - Assert.Equal("{foo | bar}", v.ToString()); - } - - [Fact] - public void TestEmptyTag() - { - Assert.Throws(() => Tags()); - } - - [Fact] - public void TestRange() - { - Value v = Between(1, 10); - Assert.Equal("[1.0 10.0]", v.ToString()); - v = Between(1, 10).InclusiveMax(false); - Assert.Equal("[1.0 (10.0]", v.ToString()); - v = Between(1, 10).InclusiveMin(false); - Assert.Equal("[(1.0 10.0]", v.ToString()); - - // le, gt, etc. - Assert.Equal("[42.0 42.0]", Equal(42).ToString()); - Assert.Equal("[-inf (42.0]", LessThan(42).ToString()); - Assert.Equal("[-inf 42.0]", LessThanOrEqual(42).ToString()); - Assert.Equal("[(42.0 inf]", GreaterThan(42).ToString()); - Assert.Equal("[42.0 inf]", GreaterThanOrEqual(42).ToString()); - - // string value - Assert.Equal("s", Value("s").ToString()); - - // Geo value - Assert.Equal("[1.0 2.0 3.0 km]", - new GeoValue(1.0, 2.0, 3.0, GeoUnit.Kilometers).ToString()); - } - - [Fact] - public void TestIntersectionBasic() - { - INode n = Intersect().Add("name", "mark"); - Assert.Equal("@name:mark", n.ToString()); - - n = Intersect().Add("name", "mark", "dvir"); - Assert.Equal("@name:(mark dvir)", n.ToString()); - } - - [Fact] - public void TestIntersectionNested() - { - INode n = Intersect(). - Add(Union("name", Value("mark"), Value("dvir"))). - Add("time", Between(100, 200)). - Add(Disjunct("created", LessThan(1000))); - Assert.Equal("(@name:(mark|dvir) @time:[100.0 200.0] -@created:[-inf (1000.0])", n.ToString()); - } - - private static string GetArgsString(AggregationRequest request) - { - var args = new List(); - request.SerializeRedisArgs(args); - return string.Join(" ", args); - } - - [Fact] - public void TestAggregation() - { - Assert.Equal("*", GetArgsString(new AggregationRequest())); - AggregationRequest r = new AggregationRequest(). - GroupBy("@actor", Count().As("cnt")). - SortBy(Descending("@cnt")); - Assert.Equal("* GROUPBY 1 @actor REDUCE COUNT 0 AS cnt SORTBY 2 @cnt DESC", GetArgsString(r)); - - r = new AggregationRequest().GroupBy("@brand", - Quantile("@price", 0.50).As("q50"), - Quantile("@price", 0.90).As("q90"), - Quantile("@price", 0.95).As("q95"), - Avg("@price"), - Count().As("count")). - SortByDescending("@count"). - Limit(10); - Assert.Equal("* GROUPBY 1 @brand REDUCE QUANTILE 2 @price 0.5 AS q50 REDUCE QUANTILE 2 @price 0.9 AS q90 REDUCE QUANTILE 2 @price 0.95 AS q95 REDUCE AVG 1 @price REDUCE COUNT 0 AS count LIMIT 0 10 SORTBY 2 @count DESC", - GetArgsString(r)); - } - - [Fact] - public void TestAggregationBuilder() - { - Assert.Equal("*", new AggregationBuilder().GetArgsString()); - - AggregationBuilder r1 = new AggregationBuilder() - .GroupBy("@actor", Count().As("cnt")) - .SortBy(Descending("@cnt")); - - Assert.Equal("* GROUPBY 1 @actor REDUCE COUNT 0 AS cnt SORTBY 2 @cnt DESC", r1.GetArgsString()); - - Group group = new Group("@brand") - .Reduce(Quantile("@price", 0.50).As("q50")) - .Reduce(Quantile("@price", 0.90).As("q90")) - .Reduce(Quantile("@price", 0.95).As("q95")) - .Reduce(Avg("@price")) - .Reduce(Count().As("count")); - AggregationBuilder r2 = new AggregationBuilder() - .GroupBy(group) - .Limit(10) - .SortByDescending("@count"); - - Assert.Equal("* GROUPBY 1 @brand REDUCE QUANTILE 2 @price 0.5 AS q50 REDUCE QUANTILE 2 @price 0.9 AS q90 REDUCE QUANTILE 2 @price 0.95 AS q95 REDUCE AVG 1 @price REDUCE COUNT 0 AS count LIMIT 0 10 SORTBY 2 @count DESC", - r2.GetArgsString()); - - AggregationBuilder r3 = new AggregationBuilder() - .Load("@count") - .Apply("@count%1000", "thousands") - .SortBy(Descending("@count")) - .Limit(0, 2); - Assert.Equal("* LOAD 1 @count APPLY @count%1000 AS thousands SORTBY 2 @count DESC LIMIT 0 2", r3.GetArgsString()); - } - } -} diff --git a/tests/NRediSearch.Test/QueryTest.cs b/tests/NRediSearch.Test/QueryTest.cs deleted file mode 100644 index 53e033202..000000000 --- a/tests/NRediSearch.Test/QueryTest.cs +++ /dev/null @@ -1,189 +0,0 @@ -using System.Collections.Generic; -using Xunit; - -namespace NRediSearch.Test -{ - public class QueryTest - { - public static Query GetQuery() => new Query("hello world"); - - [Fact] - public void GetNoContent() - { - var query = GetQuery(); - Assert.False(query.NoContent); - Assert.Same(query, query.SetNoContent()); - Assert.True(query.NoContent); - } - - [Fact] - public void GetWithScores() - { - var query = GetQuery(); - Assert.False(query.WithScores); - Assert.Same(query, query.SetWithScores()); - Assert.True(query.WithScores); - } - - [Fact] - public void SerializeRedisArgs() - { - var query = new Query("hello world") - { - NoContent = true, - Language = "", - NoStopwords = true, - Verbatim = true, - WithPayloads = true, - WithScores = true, - Scoring = "TFIDF.DOCNORM", - ExplainScore = true - }; - - var args = new List(); - query.SerializeRedisArgs(args); - - Assert.Equal(11, args.Count); - Assert.Equal(query.QueryString, (string)args[0]); - Assert.Contains("NOCONTENT".Literal(), args); - Assert.Contains("NOSTOPWORDS".Literal(), args); - Assert.Contains("VERBATIM".Literal(), args); - Assert.Contains("WITHPAYLOADS".Literal(), args); - Assert.Contains("WITHSCORES".Literal(), args); - Assert.Contains("LANGUAGE".Literal(), args); - Assert.Contains("", args); - Assert.Contains("SCORER".Literal(), args); - Assert.Contains("TFIDF.DOCNORM", args); - Assert.Contains("EXPLAINSCORE".Literal(), args); - - var languageIndex = args.IndexOf("LANGUAGE".Literal()); - Assert.Equal("", args[languageIndex + 1]); - - var scoringIndex = args.IndexOf("SCORER".Literal()); - Assert.Equal("TFIDF.DOCNORM", args[scoringIndex + 1]); - } - - [Fact] - public void Limit() - { - var query = GetQuery(); - Assert.Equal(0, query._paging.Offset); - Assert.Equal(10, query._paging.Count); - Assert.Same(query, query.Limit(1, 30)); - Assert.Equal(1, query._paging.Offset); - Assert.Equal(30, query._paging.Count); - } - - [Fact] - public void AddFilter() - { - var query = GetQuery(); - Assert.Empty(query._filters); - Query.NumericFilter f = new Query.NumericFilter("foo", 0, 100); - Assert.Same(query, query.AddFilter(f)); - Assert.Same(f, query._filters[0]); - } - - [Fact] - public void SetVerbatim() - { - var query = GetQuery(); - Assert.False(query.Verbatim); - Assert.Same(query, query.SetVerbatim()); - Assert.True(query.Verbatim); - } - - [Fact] - public void SetNoStopwords() - { - var query = GetQuery(); - Assert.False(query.NoStopwords); - Assert.Same(query, query.SetNoStopwords()); - Assert.True(query.NoStopwords); - } - - [Fact] - public void SetLanguage() - { - var query = GetQuery(); - Assert.Null(query.Language); - Assert.Same(query, query.SetLanguage("chinese")); - Assert.Equal("chinese", query.Language); - } - - [Fact] - public void LimitFields() - { - var query = GetQuery(); - Assert.Null(query._fields); - Assert.Same(query, query.LimitFields("foo", "bar")); - Assert.Equal(2, query._fields.Length); - } - - [Fact] - public void ReturnFields() - { - var query = GetQuery(); - - Assert.Null(query._returnFields); - Assert.Same(query, query.ReturnFields("foo", "bar")); - Assert.Equal(2, query._returnFields.Length); - } - - [Fact] - public void HighlightFields() - { - var query = GetQuery(); - Assert.False(query._wantsHighlight); - Assert.Null(query._highlightFields); - - query = new Query("Hello"); - Assert.Same(query, query.HighlightFields("foo", "bar")); - Assert.Equal(2, query._highlightFields.Length); - Assert.Null(query._highlightTags); - Assert.True(query._wantsHighlight); - - query = new Query("Hello").HighlightFields(); - Assert.Null(query._highlightFields); - Assert.Null(query._highlightTags); - Assert.True(query._wantsHighlight); - - Assert.Same(query, query.HighlightFields(new Query.HighlightTags("", ""))); - Assert.Null(query._highlightFields); - Assert.NotNull(query._highlightTags); - Assert.Equal("", query._highlightTags.Value.Open); - Assert.Equal("", query._highlightTags.Value.Close); - } - - [Fact] - public void SummarizeFields() - { - var query = GetQuery(); - Assert.False(query._wantsSummarize); - Assert.Null(query._summarizeFields); - - query = new Query("Hello"); - Assert.Equal(query, query.SummarizeFields()); - Assert.True(query._wantsSummarize); - Assert.Null(query._summarizeFields); - Assert.Equal(-1, query._summarizeFragmentLen); - Assert.Equal(-1, query._summarizeNumFragments); - - query = new Query("Hello"); - Assert.Equal(query, query.SummarizeFields("someField")); - Assert.True(query._wantsSummarize); - Assert.Single(query._summarizeFields); - Assert.Equal(-1, query._summarizeFragmentLen); - Assert.Equal(-1, query._summarizeNumFragments); - } - - [Fact] - public void SetScoring() - { - var query = GetQuery(); - Assert.Null(query.Scoring); - Assert.Same(query, query.SetScoring("TFIDF.DOCNORM")); - Assert.Equal("TFIDF.DOCNORM", query.Scoring); - } - } -} diff --git a/tests/NRediSearch.Test/RediSearchTestBase.cs b/tests/NRediSearch.Test/RediSearchTestBase.cs deleted file mode 100644 index ede002f1e..000000000 --- a/tests/NRediSearch.Test/RediSearchTestBase.cs +++ /dev/null @@ -1,165 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Runtime.CompilerServices; -using StackExchange.Redis; -using StackExchange.Redis.Tests; -using Xunit; -using Xunit.Abstractions; - -namespace NRediSearch.Test -{ - [Collection(nameof(NonParallelCollection))] - public abstract class RediSearchTestBase : IDisposable - { - protected readonly ITestOutputHelper Output; - protected RediSearchTestBase(ITestOutputHelper output) - { - muxer = GetWithFT(output); - Output = output; - Db = muxer.GetDatabase(); - var server = muxer.GetServer(muxer.GetEndPoints()[0]); - server.FlushDatabase(); - } - private ConnectionMultiplexer muxer; - protected IDatabase Db { get; private set; } - - public void Dispose() - { - muxer?.Dispose(); - muxer = null; - Db = null; - } - - protected Client GetClient([CallerFilePath] string filePath = null, [CallerMemberName] string caller = null) - { - // Remove all that extra pathing - var offset = filePath?.IndexOf("NRediSearch.Test"); - if (offset > -1) - { - filePath = filePath.Substring(offset.Value + "NRediSearch.Test".Length + 1); - } - - var indexName = $"{filePath}:{caller}"; - Output.WriteLine("Using Index: " + indexName); - var exists = Db.KeyExists("idx:" + indexName); - Output.WriteLine("Key existed: " + exists); - - var client = new Client(indexName, Db); - var wasReset = Reset(client); - Output.WriteLine("Index was reset?: " + wasReset); - return client; - } - - protected bool Reset(Client client) - { - Output.WriteLine("Resetting index"); - try - { - var result = client.DropIndex(); // tests create them - Output.WriteLine(" Result: " + result); - return result; - } - catch (RedisServerException ex) - { - if (string.Equals("Unknown Index name", ex.Message, StringComparison.InvariantCultureIgnoreCase)) - { - Output.WriteLine(" Unknown index name"); - return true; - } - if (string.Equals("no such index", ex.Message, StringComparison.InvariantCultureIgnoreCase)) - { - Output.WriteLine(" No such index"); - return true; - } - else - { - throw; - } - } - } - - private static bool instanceMissing; - - internal static ConnectionMultiplexer GetWithFT(ITestOutputHelper output) - { - var options = new ConfigurationOptions - { - EndPoints = { TestConfig.Current.RediSearchServerAndPort }, - AllowAdmin = true, - ConnectTimeout = 2000, - SyncTimeout = 15000, - }; - static void InstanceMissing() => Skip.Inconclusive("NRedisSearch instance available at " + TestConfig.Current.RediSearchServerAndPort); - // Don't timeout every single test - optimization - if (instanceMissing) - { - InstanceMissing(); - } - - ConnectionMultiplexer conn = null; - try - { - conn = ConnectionMultiplexer.Connect(options); - conn.MessageFaulted += (msg, ex, origin) => output.WriteLine($"Faulted from '{origin}': '{msg}' - '{(ex == null ? "(null)" : ex.Message)}'"); - conn.Connecting += (e, t) => output.WriteLine($"Connecting to {Format.ToString(e)} as {t}"); - conn.Closing += complete => output.WriteLine(complete ? "Closed" : "Closing..."); - } - catch (RedisConnectionException) - { - instanceMissing = true; - InstanceMissing(); - } - - // If say we're on a 3.x Redis server...bomb out. - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Module), r => r.Module); - - var server = conn.GetServer(TestConfig.Current.RediSearchServerAndPort); - var arr = (RedisResult[])server.Execute("module", "list"); - bool found = false; - foreach (var module in arr) - { - var parsed = Parse(module); - if (parsed.TryGetValue("name", out var val) && (val == "ft" || val == "search")) - { - found = true; - if (parsed.TryGetValue("ver", out val)) - output?.WriteLine($"Version: {val}"); - break; - } - } - - if (!found) - { - output?.WriteLine("Module not found."); - throw new RedisException("NRedisSearch module missing on " + TestConfig.Current.RediSearchServerAndPort); - } - return conn; - } - - private static Dictionary Parse(RedisResult module) - { - var data = new Dictionary(); - var lines = (RedisResult[])module; - for (int i = 0; i < lines.Length;) - { - var key = (string)lines[i++]; - var value = (RedisValue)lines[i++]; - data[key] = value; - } - return data; - } - - protected bool IsMissingIndexException(Exception ex) - { - if (ex.Message == null) - { - return false; - } - return ex.Message.Contains("Unknown Index name", StringComparison.InvariantCultureIgnoreCase) - || ex.Message.Contains("no such index", StringComparison.InvariantCultureIgnoreCase); - } - } - - [CollectionDefinition(nameof(NonParallelCollection), DisableParallelization = true)] - public class NonParallelCollection { } -} diff --git a/tests/RESPite.Tests/CycleBufferTests.cs b/tests/RESPite.Tests/CycleBufferTests.cs new file mode 100644 index 000000000..14dcd6f13 --- /dev/null +++ b/tests/RESPite.Tests/CycleBufferTests.cs @@ -0,0 +1,87 @@ +using System; +using RESPite.Buffers; +using Xunit; + +namespace RESPite.Tests; + +public class CycleBufferTests() +{ + public enum Timing + { + CommitEverythingBeforeDiscard, + CommitAfterFirstDiscard, + } + + [Theory] + [InlineData(Timing.CommitEverythingBeforeDiscard)] + [InlineData(Timing.CommitAfterFirstDiscard)] + public void CanDiscardSafely(Timing timing) + { + var buffer = CycleBuffer.Create(); + buffer.GetUncommittedSpan(10).Slice(0, 10).Fill(1); + Assert.Equal(0, buffer.GetCommittedLength()); + buffer.Commit(10); + Assert.Equal(10, buffer.GetCommittedLength()); + buffer.GetUncommittedSpan(15).Slice(0, 15).Fill(2); + + if (timing is Timing.CommitEverythingBeforeDiscard) buffer.Commit(15); + + Assert.True(buffer.TryGetFirstCommittedSpan(1, out var committed)); + switch (timing) + { + case Timing.CommitEverythingBeforeDiscard: + Assert.Equal(25, committed.Length); + for (int i = 0; i < 10; i++) + { + if (1 != committed[i]) + { + Assert.Fail($"committed[{i}]={committed[i]}"); + } + } + for (int i = 10; i < 25; i++) + { + if (2 != committed[i]) + { + Assert.Fail($"committed[{i}]={committed[i]}"); + } + } + break; + case Timing.CommitAfterFirstDiscard: + Assert.Equal(10, committed.Length); + for (int i = 0; i < committed.Length; i++) + { + if (1 != committed[i]) + { + Assert.Fail($"committed[{i}]={committed[i]}"); + } + } + break; + } + + buffer.DiscardCommitted(committed.Length); + Assert.Equal(0, buffer.GetCommittedLength()); + + // now (simulating concurrent) we commit the second span + if (timing is Timing.CommitAfterFirstDiscard) + { + buffer.Commit(15); + + Assert.Equal(15, buffer.GetCommittedLength()); + + // and we should be able to read those bytes + Assert.True(buffer.TryGetFirstCommittedSpan(1, out committed)); + Assert.Equal(15, committed.Length); + for (int i = 0; i < committed.Length; i++) + { + if (2 != committed[i]) + { + Assert.Fail($"committed[{i}]={committed[i]}"); + } + } + + buffer.DiscardCommitted(committed.Length); + } + + Assert.Equal(0, buffer.GetCommittedLength()); + } +} diff --git a/tests/RESPite.Tests/RESPite.Tests.csproj b/tests/RESPite.Tests/RESPite.Tests.csproj new file mode 100644 index 000000000..eb40683a7 --- /dev/null +++ b/tests/RESPite.Tests/RESPite.Tests.csproj @@ -0,0 +1,22 @@ + + + + net481;net8.0;net10.0 + enable + false + true + Exe + + + + + + + + + + + + + + diff --git a/tests/RESPite.Tests/RespReaderTests.cs b/tests/RESPite.Tests/RespReaderTests.cs new file mode 100644 index 000000000..b80c840f4 --- /dev/null +++ b/tests/RESPite.Tests/RespReaderTests.cs @@ -0,0 +1,1077 @@ +using System; +using System.Buffers; +using System.Collections.Generic; +using System.Globalization; +using System.Linq; +using System.Numerics; +using System.Reflection; +using System.Text; +using System.Threading.Tasks; +using RESPite.Internal; +using RESPite.Messages; +using Xunit; +using Xunit.Sdk; +using Xunit.v3; + +namespace RESPite.Tests; + +public class RespReaderTests(ITestOutputHelper logger) +{ + public readonly struct RespPayload(string label, ReadOnlySequence payload, byte[] expected, bool? outOfBand, int count) + { + public override string ToString() => Label; + public string Label { get; } = label; + public ReadOnlySequence PayloadRaw { get; } = payload; + public int Length { get; } = CheckPayload(payload, expected, outOfBand, count); + private static int CheckPayload(scoped in ReadOnlySequence actual, byte[] expected, bool? outOfBand, int count) + { + Assert.Equal(expected.LongLength, actual.Length); + var pool = ArrayPool.Shared.Rent(expected.Length); + actual.CopyTo(pool); + bool isSame = pool.AsSpan(0, expected.Length).SequenceEqual(expected); + ArrayPool.Shared.Return(pool); + Assert.True(isSame, "Data mismatch"); + + // verify that the data exactly passes frame-scanning + long totalBytes = 0; + RespReader reader = new(actual); + while (count > 0) + { + RespScanState state = default; + Assert.True(state.TryRead(ref reader, out long bytesRead)); + totalBytes += bytesRead; + Assert.True(state.IsComplete, nameof(state.IsComplete)); + if (outOfBand.HasValue) + { + if (outOfBand.Value) + { + Assert.Equal(RespPrefix.Push, state.Prefix); + } + else + { + Assert.NotEqual(RespPrefix.Push, state.Prefix); + } + } + count--; + } + Assert.Equal(expected.Length, totalBytes); + reader.DemandEnd(); + return expected.Length; + } + + public RespReader Reader() => new(PayloadRaw); + } + + public sealed class RespAttribute : DataAttribute + { + public override bool SupportsDiscoveryEnumeration() => true; + + private readonly object _value; + public bool OutOfBand { get; set; } = false; + + private bool? EffectiveOutOfBand => Count == 1 ? OutOfBand : default(bool?); + public int Count { get; set; } = 1; + + public RespAttribute(string value) => _value = value; + public RespAttribute(params string[] values) => _value = values; + + public override ValueTask> GetData(MethodInfo testMethod, DisposalTracker disposalTracker) + => new(GetData(testMethod).ToArray()); + + public IEnumerable GetData(MethodInfo testMethod) + { + switch (_value) + { + case string s: + foreach (var item in GetVariants(s, EffectiveOutOfBand, Count)) + { + yield return new TheoryDataRow(item); + } + break; + case string[] arr: + foreach (string s in arr) + { + foreach (var item in GetVariants(s, EffectiveOutOfBand, Count)) + { + yield return new TheoryDataRow(item); + } + } + break; + } + } + + private static IEnumerable GetVariants(string value, bool? outOfBand, int count) + { + var bytes = Encoding.UTF8.GetBytes(value); + + // all in one + yield return new("Right-sized", new(bytes), bytes, outOfBand, count); + + var bigger = new byte[bytes.Length + 4]; + bytes.CopyTo(bigger.AsSpan(2, bytes.Length)); + bigger.AsSpan(0, 2).Fill(0xFF); + bigger.AsSpan(bytes.Length + 2, 2).Fill(0xFF); + + // all in one, oversized + yield return new("Oversized", new(bigger, 2, bytes.Length), bytes, outOfBand, count); + + // two-chunks + for (int i = 0; i <= bytes.Length; i++) + { + int offset = 2 + i; + var left = new Segment(new ReadOnlyMemory(bigger, 0, offset), null); + var right = new Segment(new ReadOnlyMemory(bigger, offset, bigger.Length - offset), left); + yield return new($"Split:{i}", new ReadOnlySequence(left, 2, right, right.Length - 2), bytes, outOfBand, count); + } + + // N-chunks + Segment head = new(new(bytes, 0, 1), null), tail = head; + for (int i = 1; i < bytes.Length; i++) + { + tail = new(new(bytes, i, 1), tail); + } + yield return new("Chunk-per-byte", new(head, 0, tail, 1), bytes, outOfBand, count); + } + } + + [Theory, Resp("$3\r\n128\r\n")] + public void HandleSplitTokens(RespPayload payload) + { + RespReader reader = payload.Reader(); + RespScanState scan = default; + bool readResult = scan.TryRead(ref reader, out _); + logger.WriteLine(scan.ToString()); + Assert.Equal(payload.Length, reader.BytesConsumed); + Assert.True(readResult); + } + + // the examples from https://github.com/redis/redis-specifications/blob/master/protocol/RESP3.md + [Theory, Resp("$11\r\nhello world\r\n", "$?\r\n;6\r\nhello \r\n;5\r\nworld\r\n;0\r\n")] + public void BlobString(RespPayload payload) + { + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.BulkString); + Assert.True(reader.Is("hello world"u8)); + Assert.Equal("hello world", reader.ReadString()); + Assert.Equal("hello world", reader.ReadString(out var prefix)); + Assert.Equal("", prefix); +#if NET8_0_OR_GREATER + Assert.Equal("hello world", reader.ParseChars()); + /* interestingly, string does not implement IUtf8SpanParsable + Assert.Equal("hello world", reader.ParseBytes()); + */ +#endif + reader.DemandEnd(); + } + + [Theory, Resp("$0\r\n\r\n", "$?\r\n;0\r\n")] + public void EmptyBlobString(RespPayload payload) + { + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.BulkString); + Assert.True(reader.Is(""u8)); + Assert.Equal("", reader.ReadString()); + reader.DemandEnd(); + } + + [Theory, Resp("+hello world\r\n")] + public void SimpleString(RespPayload payload) + { + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.SimpleString); + Assert.True(reader.Is("hello world"u8)); + Assert.Equal("hello world", reader.ReadString()); + Assert.Equal("hello world", reader.ReadString(out var prefix)); + Assert.Equal("", prefix); + reader.DemandEnd(); + } + + [Theory, Resp("-ERR this is the error description\r\n")] + public void SimpleError_ImplicitErrors(RespPayload payload) + { + var ex = Assert.Throws(() => + { + var reader = payload.Reader(); + reader.MoveNext(); + }); + Assert.Equal("ERR this is the error description", ex.Message); + } + + [Theory, Resp("-ERR this is the error description\r\n")] + public void SimpleError_Careful(RespPayload payload) + { + var reader = payload.Reader(); + Assert.True(reader.TryMoveNext(checkError: false)); + Assert.Equal(RespPrefix.SimpleError, reader.Prefix); + Assert.True(reader.Is("ERR this is the error description"u8)); + Assert.Equal("ERR this is the error description", reader.ReadString()); + reader.DemandEnd(); + } + + [Theory, Resp(":1234\r\n")] + public void Number(RespPayload payload) + { + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.Integer); + Assert.True(reader.Is("1234"u8)); + Assert.Equal("1234", reader.ReadString()); + Assert.Equal(1234, reader.ReadInt32()); + Assert.Equal(1234D, reader.ReadDouble()); + Assert.Equal(1234M, reader.ReadDecimal()); +#if NET8_0_OR_GREATER + Assert.Equal(1234, reader.ParseChars()); + Assert.Equal(1234D, reader.ParseChars()); + Assert.Equal(1234M, reader.ParseChars()); + Assert.Equal(1234, reader.ParseBytes()); + Assert.Equal(1234D, reader.ParseBytes()); + Assert.Equal(1234M, reader.ParseBytes()); +#endif + reader.DemandEnd(); + } + + [Theory, Resp("_\r\n")] + public void Null(RespPayload payload) + { + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.Null); + Assert.True(reader.Is(""u8)); + Assert.Null(reader.ReadString()); + reader.DemandEnd(); + } + + [Theory, Resp("$-1\r\n")] + public void NullString(RespPayload payload) + { + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.BulkString); + Assert.True(reader.IsNull); + Assert.Null(reader.ReadString()); + Assert.Equal(0, reader.ScalarLength()); + Assert.True(reader.Is(""u8)); + Assert.True(reader.ScalarIsEmpty()); + + var iterator = reader.ScalarChunks(); + Assert.False(iterator.MoveNext()); + iterator.MovePast(out reader); + reader.DemandEnd(); + } + + [Theory, Resp(",1.23\r\n")] + public void Double(RespPayload payload) + { + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.Double); + Assert.True(reader.Is("1.23"u8)); + Assert.Equal("1.23", reader.ReadString()); + Assert.Equal(1.23D, reader.ReadDouble()); + Assert.Equal(1.23M, reader.ReadDecimal()); + reader.DemandEnd(); + } + + [Theory, Resp(":10\r\n")] + public void Integer_Simple(RespPayload payload) + { + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.Integer); + Assert.True(reader.Is("10"u8)); + Assert.Equal("10", reader.ReadString()); + Assert.Equal(10, reader.ReadInt32()); + Assert.Equal(10D, reader.ReadDouble()); + Assert.Equal(10M, reader.ReadDecimal()); + reader.DemandEnd(); + } + + [Theory, Resp(",10\r\n")] + public void Double_Simple(RespPayload payload) + { + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.Double); + Assert.True(reader.Is("10"u8)); + Assert.Equal("10", reader.ReadString()); + Assert.Equal(10, reader.ReadInt32()); + Assert.Equal(10D, reader.ReadDouble()); + Assert.Equal(10M, reader.ReadDecimal()); + reader.DemandEnd(); + } + + [Theory, Resp(",inf\r\n")] + public void Double_Infinity(RespPayload payload) + { + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.Double); + Assert.True(reader.Is("inf"u8)); + Assert.Equal("inf", reader.ReadString()); + var val = reader.ReadDouble(); + Assert.True(double.IsInfinity(val)); + Assert.True(double.IsPositiveInfinity(val)); + reader.DemandEnd(); + } + + [Theory, Resp(",+inf\r\n")] + public void Double_PosInfinity(RespPayload payload) + { + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.Double); + Assert.True(reader.Is("+inf"u8)); + Assert.Equal("+inf", reader.ReadString()); + var val = reader.ReadDouble(); + Assert.True(double.IsInfinity(val)); + Assert.True(double.IsPositiveInfinity(val)); + reader.DemandEnd(); + } + + [Theory, Resp(",-inf\r\n")] + public void Double_NegInfinity(RespPayload payload) + { + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.Double); + Assert.True(reader.Is("-inf"u8)); + Assert.Equal("-inf", reader.ReadString()); + var val = reader.ReadDouble(); + Assert.True(double.IsInfinity(val)); + Assert.True(double.IsNegativeInfinity(val)); + reader.DemandEnd(); + } + + [Theory, Resp(",nan\r\n")] + public void Double_NaN(RespPayload payload) + { + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.Double); + Assert.True(reader.Is("nan"u8)); + Assert.Equal("nan", reader.ReadString()); + var val = reader.ReadDouble(); + Assert.True(double.IsNaN(val)); + reader.DemandEnd(); + } + + [Theory, Resp("#t\r\n")] + public void Boolean_T(RespPayload payload) + { + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.Boolean); + Assert.True(reader.ReadBoolean()); + reader.DemandEnd(); + } + + [Theory, Resp("#f\r\n")] + public void Boolean_F(RespPayload payload) + { + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.Boolean); + Assert.False(reader.ReadBoolean()); + reader.DemandEnd(); + } + + [Theory, Resp(":1\r\n")] + public void Boolean_1(RespPayload payload) + { + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.Integer); + Assert.True(reader.ReadBoolean()); + reader.DemandEnd(); + } + + [Theory, Resp(":0\r\n")] + public void Boolean_0(RespPayload payload) + { + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.Integer); + Assert.False(reader.ReadBoolean()); + reader.DemandEnd(); + } + + [Theory, Resp("!21\r\nSYNTAX invalid syntax\r\n", "!?\r\n;6\r\nSYNTAX\r\n;15\r\n invalid syntax\r\n;0\r\n")] + public void BlobError_ImplicitErrors(RespPayload payload) + { + var ex = Assert.Throws(() => + { + var reader = payload.Reader(); + reader.MoveNext(); + }); + Assert.Equal("SYNTAX invalid syntax", ex.Message); + } + + [Theory, Resp("!21\r\nSYNTAX invalid syntax\r\n", "!?\r\n;6\r\nSYNTAX\r\n;15\r\n invalid syntax\r\n;0\r\n")] + public void BlobError_Careful(RespPayload payload) + { + var reader = payload.Reader(); + Assert.True(reader.TryMoveNext(checkError: false)); + Assert.Equal(RespPrefix.BulkError, reader.Prefix); + Assert.True(reader.Is("SYNTAX invalid syntax"u8)); + Assert.Equal("SYNTAX invalid syntax", reader.ReadString()); + reader.DemandEnd(); + } + + [Theory, Resp("=15\r\ntxt:Some string\r\n", "=?\r\n;4\r\ntxt:\r\n;11\r\nSome string\r\n;0\r\n")] + public void VerbatimString(RespPayload payload) + { + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.VerbatimString); + Assert.Equal("Some string", reader.ReadString()); + Assert.Equal("Some string", reader.ReadString(out var prefix)); + Assert.Equal("txt", prefix); + + Assert.Equal("Some string", reader.ReadString(out var prefix2)); + Assert.Same(prefix, prefix2); // check prefix recognized and reuse literal + reader.DemandEnd(); + } + + [Theory, Resp("(3492890328409238509324850943850943825024385\r\n")] + public void BigIntegers(RespPayload payload) + { + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.BigInteger); + Assert.Equal("3492890328409238509324850943850943825024385", reader.ReadString()); +#if NET8_0_OR_GREATER + var actual = reader.ParseChars(chars => BigInteger.Parse(chars, CultureInfo.InvariantCulture)); + + var expected = BigInteger.Parse("3492890328409238509324850943850943825024385"); + Assert.Equal(expected, actual); +#endif + } + + [Theory, Resp("*3\r\n:1\r\n:2\r\n:3\r\n", "*?\r\n:1\r\n:2\r\n:3\r\n.\r\n")] + public void Array(RespPayload payload) + { + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.Array); + Assert.Equal(3, reader.AggregateLength()); + var iterator = reader.AggregateChildren(); + Assert.True(iterator.MoveNext(RespPrefix.Integer)); + Assert.Equal(1, iterator.Value.ReadInt32()); + iterator.Value.DemandEnd(); + + Assert.True(iterator.MoveNext(RespPrefix.Integer)); + Assert.Equal(2, iterator.Value.ReadInt32()); + iterator.Value.DemandEnd(); + + Assert.True(iterator.MoveNext(RespPrefix.Integer)); + Assert.Equal(3, iterator.Value.ReadInt32()); + iterator.Value.DemandEnd(); + + Assert.False(iterator.MoveNext(RespPrefix.Integer)); + iterator.MovePast(out reader); + reader.DemandEnd(); + + reader = payload.Reader(); + reader.MoveNext(RespPrefix.Array); + int[] arr = new int[reader.AggregateLength()]; + int i = 0; +#pragma warning disable SERDBG // warning about .Current vs .Value + foreach (var sub in reader.AggregateChildren()) +#pragma warning restore SERDBG + { + sub.Demand(RespPrefix.Integer); + arr[i++] = sub.ReadInt32(); + sub.DemandEnd(); + } + iterator.MovePast(out reader); + reader.DemandEnd(); + + Assert.Equal([1, 2, 3], arr); + } + + [Theory, Resp("*-1\r\n")] + public void NullArray(RespPayload payload) + { + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.Array); + Assert.True(reader.IsNull); + Assert.Equal(0, reader.AggregateLength()); + var iterator = reader.AggregateChildren(); + Assert.False(iterator.MoveNext()); + iterator.MovePast(out reader); + reader.DemandEnd(); + } + + [Theory, Resp("*2\r\n*3\r\n:1\r\n$5\r\nhello\r\n:2\r\n#f\r\n", "*?\r\n*?\r\n:1\r\n$5\r\nhello\r\n:2\r\n.\r\n#f\r\n.\r\n")] + public void NestedArray(RespPayload payload) + { + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.Array); + + Assert.Equal(2, reader.AggregateLength()); + + var iterator = reader.AggregateChildren(); + Assert.True(iterator.MoveNext(RespPrefix.Array)); + + Assert.Equal(3, iterator.Value.AggregateLength()); + var subIterator = iterator.Value.AggregateChildren(); + Assert.True(subIterator.MoveNext(RespPrefix.Integer)); + Assert.Equal(1, subIterator.Value.ReadInt64()); + subIterator.Value.DemandEnd(); + + Assert.True(subIterator.MoveNext(RespPrefix.BulkString)); + Assert.True(subIterator.Value.Is("hello"u8)); + subIterator.Value.DemandEnd(); + + Assert.True(subIterator.MoveNext(RespPrefix.Integer)); + Assert.Equal(2, subIterator.Value.ReadInt64()); + subIterator.Value.DemandEnd(); + + Assert.False(subIterator.MoveNext()); + + Assert.True(iterator.MoveNext(RespPrefix.Boolean)); + Assert.False(iterator.Value.ReadBoolean()); + iterator.Value.DemandEnd(); + + Assert.False(iterator.MoveNext()); + iterator.MovePast(out reader); + + reader.DemandEnd(); + } + + [Theory, Resp("%2\r\n+first\r\n:1\r\n+second\r\n:2\r\n", "%?\r\n+first\r\n:1\r\n+second\r\n:2\r\n.\r\n")] + public void Map(RespPayload payload) + { + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.Map); + + Assert.Equal(4, reader.AggregateLength()); + + var iterator = reader.AggregateChildren(); + + Assert.True(iterator.MoveNext(RespPrefix.SimpleString)); + Assert.True(iterator.Value.Is("first".AsSpan())); + iterator.Value.DemandEnd(); + + Assert.True(iterator.MoveNext(RespPrefix.Integer)); + Assert.Equal(1, iterator.Value.ReadInt32()); + iterator.Value.DemandEnd(); + + Assert.True(iterator.MoveNext(RespPrefix.SimpleString)); + Assert.True(iterator.Value.Is("second"u8)); + iterator.Value.DemandEnd(); + + Assert.True(iterator.MoveNext(RespPrefix.Integer)); + Assert.Equal(2, iterator.Value.ReadInt32()); + iterator.Value.DemandEnd(); + + Assert.False(iterator.MoveNext()); + + iterator.MovePast(out reader); + reader.DemandEnd(); + } + + [Theory, Resp("~5\r\n+orange\r\n+apple\r\n#t\r\n:100\r\n:999\r\n", "~?\r\n+orange\r\n+apple\r\n#t\r\n:100\r\n:999\r\n.\r\n")] + public void Set(RespPayload payload) + { + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.Set); + + Assert.Equal(5, reader.AggregateLength()); + + var iterator = reader.AggregateChildren(); + + Assert.True(iterator.MoveNext(RespPrefix.SimpleString)); + Assert.True(iterator.Value.Is("orange".AsSpan())); + iterator.Value.DemandEnd(); + + Assert.True(iterator.MoveNext(RespPrefix.SimpleString)); + Assert.True(iterator.Value.Is("apple"u8)); + iterator.Value.DemandEnd(); + + Assert.True(iterator.MoveNext(RespPrefix.Boolean)); + Assert.True(iterator.Value.ReadBoolean()); + iterator.Value.DemandEnd(); + + Assert.True(iterator.MoveNext(RespPrefix.Integer)); + Assert.Equal(100, iterator.Value.ReadInt32()); + iterator.Value.DemandEnd(); + + Assert.True(iterator.MoveNext(RespPrefix.Integer)); + Assert.Equal(999, iterator.Value.ReadInt32()); + iterator.Value.DemandEnd(); + + Assert.False(iterator.MoveNext()); + + iterator.MovePast(out reader); + reader.DemandEnd(); + } + + private sealed class TestAttributeReader : RespAttributeReader<(int Count, int Ttl, decimal A, decimal B)> + { + public override void Read(ref RespReader reader, ref (int Count, int Ttl, decimal A, decimal B) value) + { + value.Count += ReadKeyValuePairs(ref reader, ref value); + } + private TestAttributeReader() { } + public static readonly TestAttributeReader Instance = new(); + public static (int Count, int Ttl, decimal A, decimal B) Zero = (0, 0, 0, 0); + public override bool ReadKeyValuePair(scoped ReadOnlySpan key, ref RespReader reader, ref (int Count, int Ttl, decimal A, decimal B) value) + { + if (key.SequenceEqual("ttl"u8) && reader.IsScalar) + { + value.Ttl = reader.ReadInt32(); + } + else if (key.SequenceEqual("key-popularity"u8) && reader.IsAggregate) + { + ReadKeyValuePairs(ref reader, ref value); // recurse to process a/b below + } + else if (key.SequenceEqual("a"u8) && reader.IsScalar) + { + value.A = reader.ReadDecimal(); + } + else if (key.SequenceEqual("b"u8) && reader.IsScalar) + { + value.B = reader.ReadDecimal(); + } + else + { + return false; // not recognized + } + return true; // recognized + } + } + + [Theory, Resp( + "|1\r\n+key-popularity\r\n%2\r\n$1\r\na\r\n,0.1923\r\n$1\r\nb\r\n,0.0012\r\n*2\r\n:2039123\r\n:9543892\r\n", + "|1\r\n+key-popularity\r\n%2\r\n$1\r\na\r\n,0.1923\r\n$1\r\nb\r\n,0.0012\r\n*?\r\n:2039123\r\n:9543892\r\n.\r\n")] + public void AttributeRoot(RespPayload payload) + { + // ignore the attribute data + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.Array); + Assert.Equal(2, reader.AggregateLength()); + var iterator = reader.AggregateChildren(); + + Assert.True(iterator.MoveNext(RespPrefix.Integer)); + Assert.Equal(2039123, iterator.Value.ReadInt32()); + iterator.Value.DemandEnd(); + + Assert.True(iterator.MoveNext(RespPrefix.Integer)); + Assert.Equal(9543892, iterator.Value.ReadInt32()); + iterator.Value.DemandEnd(); + + Assert.False(iterator.MoveNext()); + iterator.MovePast(out reader); + reader.DemandEnd(); + + // process the attribute data + var state = TestAttributeReader.Zero; + reader = payload.Reader(); + reader.MoveNext(RespPrefix.Array, TestAttributeReader.Instance, ref state); + Assert.Equal(1, state.Count); + Assert.Equal(0.1923M, state.A); + Assert.Equal(0.0012M, state.B); + state = TestAttributeReader.Zero; + + Assert.Equal(2, reader.AggregateLength()); + iterator = reader.AggregateChildren(); + + Assert.True(iterator.MoveNext(RespPrefix.Integer, TestAttributeReader.Instance, ref state)); + Assert.Equal(2039123, iterator.Value.ReadInt32()); + Assert.Equal(0, state.Count); + iterator.Value.DemandEnd(); + + Assert.True(iterator.MoveNext(RespPrefix.Integer, TestAttributeReader.Instance, ref state)); + Assert.Equal(9543892, iterator.Value.ReadInt32()); + Assert.Equal(0, state.Count); + iterator.Value.DemandEnd(); + + Assert.False(iterator.MoveNext()); + iterator.MovePast(out reader); + reader.DemandEnd(); + } + + [Theory, Resp("*3\r\n:1\r\n:2\r\n|1\r\n+ttl\r\n:3600\r\n:3\r\n", "*?\r\n:1\r\n:2\r\n|1\r\n+ttl\r\n:3600\r\n:3\r\n.\r\n")] + public void AttributeInner(RespPayload payload) + { + // ignore the attribute data + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.Array); + Assert.Equal(3, reader.AggregateLength()); + var iterator = reader.AggregateChildren(); + + Assert.True(iterator.MoveNext(RespPrefix.Integer)); + Assert.Equal(1, iterator.Value.ReadInt32()); + iterator.Value.DemandEnd(); + + Assert.True(iterator.MoveNext(RespPrefix.Integer)); + Assert.Equal(2, iterator.Value.ReadInt32()); + iterator.Value.DemandEnd(); + + Assert.True(iterator.MoveNext(RespPrefix.Integer)); + Assert.Equal(3, iterator.Value.ReadInt32()); + iterator.Value.DemandEnd(); + + Assert.False(iterator.MoveNext()); + iterator.MovePast(out reader); + reader.DemandEnd(); + + // process the attribute data + var state = TestAttributeReader.Zero; + reader = payload.Reader(); + reader.MoveNext(RespPrefix.Array, TestAttributeReader.Instance, ref state); + Assert.Equal(0, state.Count); + Assert.Equal(3, reader.AggregateLength()); + iterator = reader.AggregateChildren(); + + Assert.True(iterator.MoveNext(RespPrefix.Integer, TestAttributeReader.Instance, ref state)); + Assert.Equal(0, state.Count); + Assert.Equal(1, iterator.Value.ReadInt32()); + iterator.Value.DemandEnd(); + + Assert.True(iterator.MoveNext(RespPrefix.Integer, TestAttributeReader.Instance, ref state)); + Assert.Equal(0, state.Count); + Assert.Equal(2, iterator.Value.ReadInt32()); + iterator.Value.DemandEnd(); + + Assert.True(iterator.MoveNext(RespPrefix.Integer, TestAttributeReader.Instance, ref state)); + Assert.Equal(1, state.Count); + Assert.Equal(3600, state.Ttl); + state = TestAttributeReader.Zero; // reset + Assert.Equal(3, iterator.Value.ReadInt32()); + iterator.Value.DemandEnd(); + + Assert.False(iterator.MoveNextRaw(TestAttributeReader.Instance, ref state)); + Assert.Equal(0, state.Count); + iterator.MovePast(out reader); + reader.DemandEnd(); + } + + [Theory, Resp(">3\r\n+message\r\n+somechannel\r\n+this is the message\r\n", OutOfBand = true)] + public void Push(RespPayload payload) + { + // ignore the attribute data + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.Push); + Assert.Equal(3, reader.AggregateLength()); + var iterator = reader.AggregateChildren(); + + Assert.True(iterator.MoveNext(RespPrefix.SimpleString)); + Assert.True(iterator.Value.Is("message"u8)); + iterator.Value.DemandEnd(); + + Assert.True(iterator.MoveNext(RespPrefix.SimpleString)); + Assert.True(iterator.Value.Is("somechannel"u8)); + iterator.Value.DemandEnd(); + + Assert.True(iterator.MoveNext(RespPrefix.SimpleString)); + Assert.True(iterator.Value.Is("this is the message"u8)); + iterator.Value.DemandEnd(); + + Assert.False(iterator.MoveNext()); + iterator.MovePast(out reader); + reader.DemandEnd(); + } + + [Theory, Resp(">3\r\n+message\r\n+somechannel\r\n+this is the message\r\n$9\r\nGet-Reply\r\n", Count = 2)] + public void PushThenGetReply(RespPayload payload) + { + // ignore the attribute data + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.Push); + Assert.Equal(3, reader.AggregateLength()); + var iterator = reader.AggregateChildren(); + + Assert.True(iterator.MoveNext(RespPrefix.SimpleString)); + Assert.True(iterator.Value.Is("message"u8)); + iterator.Value.DemandEnd(); + + Assert.True(iterator.MoveNext(RespPrefix.SimpleString)); + Assert.True(iterator.Value.Is("somechannel"u8)); + iterator.Value.DemandEnd(); + + Assert.True(iterator.MoveNext(RespPrefix.SimpleString)); + Assert.True(iterator.Value.Is("this is the message"u8)); + iterator.Value.DemandEnd(); + + Assert.False(iterator.MoveNext()); + iterator.MovePast(out reader); + + reader.MoveNext(RespPrefix.BulkString); + Assert.True(reader.Is("Get-Reply"u8)); + reader.DemandEnd(); + } + + [Theory, Resp("$9\r\nGet-Reply\r\n>3\r\n+message\r\n+somechannel\r\n+this is the message\r\n", Count = 2)] + public void GetReplyThenPush(RespPayload payload) + { + // ignore the attribute data + var reader = payload.Reader(); + + reader.MoveNext(RespPrefix.BulkString); + Assert.True(reader.Is("Get-Reply"u8)); + + reader.MoveNext(RespPrefix.Push); + Assert.Equal(3, reader.AggregateLength()); + var iterator = reader.AggregateChildren(); + + Assert.True(iterator.MoveNext(RespPrefix.SimpleString)); + Assert.True(iterator.Value.Is("message"u8)); + iterator.Value.DemandEnd(); + + Assert.True(iterator.MoveNext(RespPrefix.SimpleString)); + Assert.True(iterator.Value.Is("somechannel"u8)); + iterator.Value.DemandEnd(); + + Assert.True(iterator.MoveNext(RespPrefix.SimpleString)); + Assert.True(iterator.Value.Is("this is the message"u8)); + iterator.Value.DemandEnd(); + + Assert.False(iterator.MoveNext()); + iterator.MovePast(out reader); + + reader.DemandEnd(); + } + + [Theory, Resp("*0\r\n$4\r\npass\r\n", "*1\r\n+ok\r\n$4\r\npass\r\n", "*-1\r\n$4\r\npass\r\n", "*?\r\n.\r\n$4\r\npass\r\n", Count = 2)] + public void ArrayThenString(RespPayload payload) + { + var reader = payload.Reader(); + Assert.True(reader.TryMoveNext(RespPrefix.Array)); + reader.SkipChildren(); + + Assert.True(reader.TryMoveNext(RespPrefix.BulkString)); + Assert.True(reader.Is("pass"u8)); + + reader.DemandEnd(); + + // and the same using child iterator + reader = payload.Reader(); + Assert.True(reader.TryMoveNext(RespPrefix.Array)); + var iterator = reader.AggregateChildren(); + iterator.MovePast(out reader); + + Assert.True(reader.TryMoveNext(RespPrefix.BulkString)); + Assert.True(reader.Is("pass"u8)); + + reader.DemandEnd(); + } + + // Tests for ScalarLengthIs + [Theory, Resp("$-1\r\n")] // null bulk string + public void ScalarLengthIs_NullBulkString(RespPayload payload) + { + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.BulkString); + Assert.True(reader.ScalarLengthIs(0)); + Assert.False(reader.ScalarLengthIs(1)); + Assert.False(reader.ScalarLengthIs(5)); + reader.DemandEnd(); + } + + // Note: Null prefix (_\r\n) is tested in the existing Null() test above + [Theory, Resp("$0\r\n\r\n", "$?\r\n;0\r\n")] // empty scalar (simple and streaming) + public void ScalarLengthIs_Empty(RespPayload payload) + { + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.BulkString); + Assert.True(reader.ScalarLengthIs(0)); + Assert.False(reader.ScalarLengthIs(1)); + Assert.False(reader.ScalarLengthIs(5)); + reader.DemandEnd(); + } + + [Theory, Resp("$5\r\nhello\r\n")] // simple scalar + public void ScalarLengthIs_Simple(RespPayload payload) + { + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.BulkString); + Assert.True(reader.ScalarLengthIs(5)); + Assert.False(reader.ScalarLengthIs(0)); + Assert.False(reader.ScalarLengthIs(4)); + Assert.False(reader.ScalarLengthIs(6)); + Assert.False(reader.ScalarLengthIs(10)); + reader.DemandEnd(); + } + + [Theory, Resp("$?\r\n;2\r\nhe\r\n;3\r\nllo\r\n;0\r\n")] // streaming scalar + public void ScalarLengthIs_Streaming(RespPayload payload) + { + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.BulkString); + Assert.True(reader.ScalarLengthIs(5)); + Assert.False(reader.ScalarLengthIs(0)); + Assert.False(reader.ScalarLengthIs(2)); // short-circuit: stops early + Assert.False(reader.ScalarLengthIs(3)); // short-circuit: stops early + Assert.False(reader.ScalarLengthIs(6)); // short-circuit: stops early + Assert.False(reader.ScalarLengthIs(10)); // short-circuit: stops early + reader.DemandEnd(); + } + + [Fact] // streaming scalar - verify short-circuiting stops before reading malformed data + public void ScalarLengthIs_Streaming_ShortCircuits() + { + // Streaming scalar: 2 bytes "he", then 3 bytes "llo", then 1 byte "X", then MALFORMED + // To check if length == N, we need to read N+1 bytes to verify there isn't more + // So malformed data must come AFTER the N+1 threshold + var data = "$?\r\n;2\r\nhe\r\n;3\r\nllo\r\n;1\r\nX\r\nMALFORMED"u8.ToArray(); + var reader = new RespReader(new ReadOnlySequence(data)); + reader.MoveNext(RespPrefix.BulkString); + + // When checking length < 6, we read up to 6 bytes (he+llo+X), see 6 > expected, stop + Assert.False(reader.ScalarLengthIs(0)); // reads "he" (2), 2 > 0, stops before "llo" + Assert.False(reader.ScalarLengthIs(2)); // reads "he" (2), "llo" (5 total), 5 > 2, stops before "X" + Assert.False(reader.ScalarLengthIs(4)); // reads "he" (2), "llo" (5 total), 5 > 4, stops before "X" + Assert.False(reader.ScalarLengthIs(5)); // reads "he" (2), "llo" (5), "X" (6 total), 6 > 5, stops before MALFORMED + + // All of the above should succeed without hitting MALFORMED because we short-circuit + } + + [Fact] // streaming scalar - verify TryGetSpan fails and Buffer works correctly + public void StreamingScalar_BufferPartial() + { + // 32 bytes total: "abcdefgh" (8) + "ijklmnop" (8) + "qrstuvwx" (8) + "yz012345" (8) + "6789" (4) + var data = "$?\r\n;8\r\nabcdefgh\r\n;8\r\nijklmnop\r\n;8\r\nqrstuvwx\r\n;8\r\nyz012345\r\n;4\r\n6789\r\n;0\r\n"u8.ToArray(); + var reader = new RespReader(new ReadOnlySequence(data)); + reader.MoveNext(RespPrefix.BulkString); + + Assert.True(reader.IsScalar); + Assert.False(reader.TryGetSpan(out _)); // Should fail - data is non-contiguous + + // Buffer should fetch just the first 16 bytes + Span buffer = stackalloc byte[16]; + var buffered = reader.Buffer(buffer); + Assert.Equal(16, buffered.Length); + Assert.True(buffered.SequenceEqual("abcdefghijklmnop"u8)); + } + + [Theory, Resp("+hello\r\n")] // simple string + public void ScalarLengthIs_SimpleString(RespPayload payload) + { + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.SimpleString); + Assert.True(reader.ScalarLengthIs(5)); + Assert.False(reader.ScalarLengthIs(0)); + Assert.False(reader.ScalarLengthIs(4)); + reader.DemandEnd(); + } + + // Tests for AggregateLengthIs + [Theory, Resp("*-1\r\n")] // null array + public void AggregateLengthIs_NullArray(RespPayload payload) + { + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.Array); + Assert.True(reader.IsNull); + // Note: AggregateLength() would throw on null, but AggregateLengthIs should handle it + reader.DemandEnd(); + } + + [Theory, Resp("*0\r\n", "*?\r\n.\r\n")] // empty array (simple and streaming) + public void AggregateLengthIs_Empty(RespPayload payload) + { + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.Array); + Assert.True(reader.AggregateLengthIs(0)); + Assert.False(reader.AggregateLengthIs(1)); + Assert.False(reader.AggregateLengthIs(3)); + reader.SkipChildren(); + reader.DemandEnd(); + } + + [Theory, Resp("*3\r\n:1\r\n:2\r\n:3\r\n")] // simple array + public void AggregateLengthIs_Simple(RespPayload payload) + { + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.Array); + Assert.True(reader.AggregateLengthIs(3)); + Assert.False(reader.AggregateLengthIs(0)); + Assert.False(reader.AggregateLengthIs(2)); + Assert.False(reader.AggregateLengthIs(4)); + Assert.False(reader.AggregateLengthIs(10)); + reader.SkipChildren(); + reader.DemandEnd(); + } + + [Theory, Resp("*?\r\n:1\r\n:2\r\n:3\r\n.\r\n")] // streaming array + public void AggregateLengthIs_Streaming(RespPayload payload) + { + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.Array); + Assert.True(reader.AggregateLengthIs(3)); + Assert.False(reader.AggregateLengthIs(0)); + Assert.False(reader.AggregateLengthIs(2)); // short-circuit: stops early + Assert.False(reader.AggregateLengthIs(4)); // short-circuit: stops early + Assert.False(reader.AggregateLengthIs(10)); // short-circuit: stops early + reader.SkipChildren(); + reader.DemandEnd(); + } + + [Fact] // streaming array - verify short-circuiting works even with extra data present + public void AggregateLengthIs_Streaming_ShortCircuits() + { + // Streaming array: 3 elements (:1, :2, :3), then extra elements + // Short-circuiting means we can return false without reading all elements + var data = "*?\r\n:1\r\n:2\r\n:3\r\n:999\r\n:888\r\n.\r\n"u8.ToArray(); + var reader = new RespReader(new ReadOnlySequence(data)); + reader.MoveNext(RespPrefix.Array); + + // These should all return false via short-circuiting + // (we know the answer before reading all elements) + Assert.False(reader.AggregateLengthIs(0)); // can tell after 1 element + Assert.False(reader.AggregateLengthIs(2)); // can tell after 3 elements + Assert.False(reader.AggregateLengthIs(4)); // can tell after 4 elements (count > expected) + Assert.False(reader.AggregateLengthIs(10)); // can tell after 4 elements (count > expected) + + // The actual length is 5 (:1, :2, :3, :999, :888) + Assert.True(reader.AggregateLengthIs(5)); + } + + [Fact] // streaming array - verify short-circuiting stops before reading malformed data + public void AggregateLengthIs_Streaming_MalformedAfterShortCircuit() + { + // Streaming array: 3 elements (:1, :2, :3), then :4, then MALFORMED + // To check if length == N, we need to read N+1 elements to verify there isn't more + // So malformed data must come AFTER the N+1 threshold + var data = "*?\r\n:1\r\n:2\r\n:3\r\n:4\r\nGARBAGE_NOT_A_VALID_ELEMENT"u8.ToArray(); + var reader = new RespReader(new ReadOnlySequence(data)); + reader.MoveNext(RespPrefix.Array); + + // When checking length < 4, we read up to 4 elements, see 4 > expected, stop + Assert.False(reader.AggregateLengthIs(0)); // reads :1 (1 element), 1 > 0, stops before :2 + Assert.False(reader.AggregateLengthIs(2)); // reads :1, :2, :3 (3 elements), 3 > 2, stops before :4 + Assert.False(reader.AggregateLengthIs(3)); // reads :1, :2, :3, :4 (4 elements), 4 > 3, stops before MALFORMED + + // All of the above should succeed without hitting MALFORMED because we short-circuit + } + + [Theory, Resp("%2\r\n+first\r\n:1\r\n+second\r\n:2\r\n", "%?\r\n+first\r\n:1\r\n+second\r\n:2\r\n.\r\n")] // map (simple and streaming) + public void AggregateLengthIs_Map(RespPayload payload) + { + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.Map); + // Map length is doubled (2 pairs = 4 elements) + Assert.True(reader.AggregateLengthIs(4)); + Assert.False(reader.AggregateLengthIs(0)); + Assert.False(reader.AggregateLengthIs(2)); + Assert.False(reader.AggregateLengthIs(3)); + Assert.False(reader.AggregateLengthIs(5)); + reader.SkipChildren(); + reader.DemandEnd(); + } + + [Theory, Resp("~5\r\n+orange\r\n+apple\r\n#t\r\n:100\r\n:999\r\n", "~?\r\n+orange\r\n+apple\r\n#t\r\n:100\r\n:999\r\n.\r\n")] // set (simple and streaming) + public void AggregateLengthIs_Set(RespPayload payload) + { + var reader = payload.Reader(); + reader.MoveNext(RespPrefix.Set); + Assert.True(reader.AggregateLengthIs(5)); + Assert.False(reader.AggregateLengthIs(0)); + Assert.False(reader.AggregateLengthIs(4)); + Assert.False(reader.AggregateLengthIs(6)); + reader.SkipChildren(); + reader.DemandEnd(); + } + + private sealed class Segment : ReadOnlySequenceSegment + { + public override string ToString() => RespConstants.UTF8.GetString(Memory.Span) + .Replace("\r", "\\r").Replace("\n", "\\n"); + + public Segment(ReadOnlyMemory value, Segment? head) + { + Memory = value; + if (head is not null) + { + RunningIndex = head.RunningIndex + head.Memory.Length; + head.Next = this; + } + } + public bool IsEmpty => Memory.IsEmpty; + public int Length => Memory.Length; + } +} diff --git a/tests/RESPite.Tests/RespScannerTests.cs b/tests/RESPite.Tests/RespScannerTests.cs new file mode 100644 index 000000000..0028f0b3a --- /dev/null +++ b/tests/RESPite.Tests/RespScannerTests.cs @@ -0,0 +1,18 @@ +using RESPite.Messages; +using Xunit; + +namespace RESPite.Tests; + +public class RespScannerTests +{ + [Fact] + public void ScanNull() + { + RespScanState scanner = default; + Assert.True(scanner.TryRead("_\r\n"u8, out var consumed)); + + Assert.Equal(3, consumed); + Assert.Equal(3, scanner.TotalBytes); + Assert.Equal(RespPrefix.Null, scanner.Prefix); + } +} diff --git a/tests/RESPite.Tests/TestDuplexStream.cs b/tests/RESPite.Tests/TestDuplexStream.cs new file mode 100644 index 000000000..3456f0cb7 --- /dev/null +++ b/tests/RESPite.Tests/TestDuplexStream.cs @@ -0,0 +1,229 @@ +using System; +using System.Buffers; +using System.IO; +using System.IO.Pipelines; +using System.Text; +using System.Threading; +using System.Threading.Tasks; + +namespace RESPite.Tests; + +/// +/// A controllable duplex stream for testing Redis protocol interactions. +/// Captures outbound data (client-to-redis) and allows controlled inbound data (redis-to-client). +/// +public sealed class TestDuplexStream : Stream +{ + private static readonly PipeOptions s_pipeOptions = new(useSynchronizationContext: false); + + private readonly MemoryStream _outbound; + private readonly Pipe _inbound; + private readonly Stream _inboundStream; + + public TestDuplexStream() + { + _outbound = new MemoryStream(); + _inbound = new Pipe(s_pipeOptions); + _inboundStream = _inbound.Reader.AsStream(); + } + + /// + /// Gets the data that has been written to the stream (client-to-redis). + /// + public ReadOnlySpan GetOutboundData() + { + if (_outbound.TryGetBuffer(out var buffer)) + { + return buffer.AsSpan(); + } + return _outbound.GetBuffer().AsSpan(0, (int)_outbound.Length); + } + + /// + /// Clears the outbound data buffer. + /// + public void FlushOutboundData() + { + _outbound.Position = 0; + _outbound.SetLength(0); + } + + /// + /// Adds data to the inbound buffer (redis-to-client) that will be available for reading. + /// + public async ValueTask AddInboundAsync(ReadOnlyMemory data, CancellationToken cancellationToken = default) + { + await _inbound.Writer.WriteAsync(data, cancellationToken).ConfigureAwait(false); + await _inbound.Writer.FlushAsync(cancellationToken).ConfigureAwait(false); + } + + /// + /// Adds data to the inbound buffer (redis-to-client) that will be available for reading. + /// Supports the "return pending.IsCompletedSynchronously ? default : AwaitAsync(pending)" pattern. + /// + public ValueTask AddInboundAsync(ReadOnlySpan data, CancellationToken cancellationToken = default) + { + // Use the Write extension method to write the span synchronously + _inbound.Writer.Write(data); + + // Flush and return based on completion status + var flushPending = _inbound.Writer.FlushAsync(cancellationToken); + return flushPending.IsCompletedSuccessfully ? default : AwaitFlushAsync(flushPending); + + static async ValueTask AwaitFlushAsync(ValueTask flushPending) + { + await flushPending.ConfigureAwait(false); + } + } + + /// + /// Adds UTF8-encoded string data to the inbound buffer (redis-to-client) that will be available for reading. + /// Uses stack allocation for small strings (≤256 bytes) and ArrayPool for larger strings. + /// Supports the "return pending.IsCompletedSynchronously ? default : AwaitAsync(pending)" pattern. + /// + public ValueTask AddInboundAsync(string data, CancellationToken cancellationToken = default) + { + const int StackAllocThreshold = 256; + + // Get the max byte count for UTF8 encoding + var maxByteCount = Encoding.UTF8.GetMaxByteCount(data.Length); + + if (maxByteCount <= StackAllocThreshold) + { + // Use stack allocation for small strings + Span buffer = stackalloc byte[maxByteCount]; + var actualByteCount = Encoding.UTF8.GetBytes(data, buffer); + _inbound.Writer.Write(buffer.Slice(0, actualByteCount)); + } + else + { + // Use ArrayPool for larger strings + var buffer = ArrayPool.Shared.Rent(maxByteCount); + try + { + var actualByteCount = Encoding.UTF8.GetBytes(data, buffer); + _inbound.Writer.Write(buffer.AsSpan(0, actualByteCount)); + } + finally + { + ArrayPool.Shared.Return(buffer); // can't have been captured during write, because span + } + } + + // Flush and return based on completion status + var flushPending = _inbound.Writer.FlushAsync(cancellationToken); + return flushPending.IsCompletedSuccessfully ? default : AwaitFlushAsync(flushPending); + + static async ValueTask AwaitFlushAsync(ValueTask flushPending) + { + await flushPending.ConfigureAwait(false); + } + } + + /// + /// Completes the inbound stream, signaling no more data will be written. + /// + public void CompleteInbound() + { + _inbound.Writer.Complete(); + } + + // Stream implementation - Read operations proxy to the inbound stream + public override bool CanRead => true; + public override bool CanSeek => false; + public override bool CanWrite => true; + public override long Length => throw new NotSupportedException(); + public override long Position + { + get => throw new NotSupportedException(); + set => throw new NotSupportedException(); + } + + public override int Read(byte[] buffer, int offset, int count) + { + return _inboundStream.Read(buffer, offset, count); + } + + public override Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + { + return _inboundStream.ReadAsync(buffer, offset, count, cancellationToken); + } + +#if NET + public override int Read(Span buffer) + { + return _inboundStream.Read(buffer); + } + + public override ValueTask ReadAsync(Memory buffer, CancellationToken cancellationToken = default) + { + return _inboundStream.ReadAsync(buffer, cancellationToken); + } +#endif + + // Stream implementation - Write operations capture to the outbound stream + public override void Write(byte[] buffer, int offset, int count) + { + _outbound.Write(buffer, offset, count); + } + + public override Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + { + return _outbound.WriteAsync(buffer, offset, count, cancellationToken); + } + +#if NET + public override void Write(ReadOnlySpan buffer) + { + _outbound.Write(buffer); + } + + public override ValueTask WriteAsync(ReadOnlyMemory buffer, CancellationToken cancellationToken = default) + { + return _outbound.WriteAsync(buffer, cancellationToken); + } +#endif + + public override void Flush() + { + _outbound.Flush(); + } + + public override Task FlushAsync(CancellationToken cancellationToken) + { + return _outbound.FlushAsync(cancellationToken); + } + + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException(); + } + + public override void SetLength(long value) + { + throw new NotSupportedException(); + } + + protected override void Dispose(bool disposing) + { + if (disposing) + { + _inbound.Writer.Complete(); + _inbound.Reader.Complete(); + _inboundStream.Dispose(); + _outbound.Dispose(); + } + base.Dispose(disposing); + } + +#if NET + public override async ValueTask DisposeAsync() + { + _inbound.Writer.Complete(); + _inbound.Reader.Complete(); + await _inboundStream.DisposeAsync().ConfigureAwait(false); + await _outbound.DisposeAsync().ConfigureAwait(false); + await base.DisposeAsync().ConfigureAwait(false); + } +#endif +} diff --git a/tests/RedisConfigs/.docker/Envoy/Dockerfile b/tests/RedisConfigs/.docker/Envoy/Dockerfile new file mode 100644 index 000000000..5c20d350c --- /dev/null +++ b/tests/RedisConfigs/.docker/Envoy/Dockerfile @@ -0,0 +1,6 @@ +FROM envoyproxy/envoy:v1.31-latest + +COPY envoy.yaml /etc/envoy/envoy.yaml +RUN chmod go+r /etc/envoy/envoy.yaml + +EXPOSE 7015 diff --git a/tests/RedisConfigs/.docker/Envoy/envoy.yaml b/tests/RedisConfigs/.docker/Envoy/envoy.yaml new file mode 100644 index 000000000..fe57c8c1f --- /dev/null +++ b/tests/RedisConfigs/.docker/Envoy/envoy.yaml @@ -0,0 +1,35 @@ +admin: + address: { socket_address: { protocol: TCP, address: 0.0.0.0, port_value: 8001 } } +static_resources: + listeners: + - name: redis_listener + address: { socket_address: { protocol: TCP, address: 0.0.0.0, port_value: 7015 } } + filter_chains: + - filters: + - name: envoy.filters.network.redis_proxy + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.redis_proxy.v3.RedisProxy + stat_prefix: envoy_redis_stats + settings: + op_timeout: 3s + dns_cache_config: + name: dynamic_forward_proxy_cache_config + dns_lookup_family: V4_ONLY + prefix_routes: + catch_all_route: + cluster: redis_cluster + clusters: + - name: redis_cluster + connect_timeout: 3s + type: STRICT_DNS + dns_lookup_family: V4_ONLY + load_assignment: + cluster_name: redis_cluster + endpoints: + - lb_endpoints: + - endpoint: { address: { socket_address: { address: redis, port_value: 7000 } } } + - endpoint: { address: { socket_address: { address: redis, port_value: 7001 } } } + - endpoint: { address: { socket_address: { address: redis, port_value: 7002 } } } + - endpoint: { address: { socket_address: { address: redis, port_value: 7003 } } } + - endpoint: { address: { socket_address: { address: redis, port_value: 7004 } } } + - endpoint: { address: { socket_address: { address: redis, port_value: 7005 } } } \ No newline at end of file diff --git a/tests/RedisConfigs/.docker/Redis/Dockerfile b/tests/RedisConfigs/.docker/Redis/Dockerfile new file mode 100644 index 000000000..363edde51 --- /dev/null +++ b/tests/RedisConfigs/.docker/Redis/Dockerfile @@ -0,0 +1,23 @@ +FROM redislabs/client-libs-test:8.6.0 + +COPY --from=configs ./Basic /data/Basic/ +COPY --from=configs ./Failover /data/Failover/ +COPY --from=configs ./Cluster /data/Cluster/ +COPY --from=configs ./Sentinel /data/Sentinel/ +COPY --from=configs ./Certs /Certs/ + +RUN chown -R redis:redis /data +RUN chown -R redis:redis /Certs + +COPY docker-entrypoint.sh /usr/local/bin/ +RUN chmod +x /usr/local/bin/docker-entrypoint.sh + +RUN apt-get -y update && apt-get install supervisor -y + +RUN apt-get clean + +ADD supervisord.conf /etc/ + +ENTRYPOINT ["docker-entrypoint.sh"] + +EXPOSE 6379 6380 6381 6382 6383 6384 7000 7001 7002 7003 7004 7005 7010 7011 26379 26380 26381 diff --git a/tests/RedisConfigs/Docker/docker-entrypoint.sh b/tests/RedisConfigs/.docker/Redis/docker-entrypoint.sh similarity index 100% rename from tests/RedisConfigs/Docker/docker-entrypoint.sh rename to tests/RedisConfigs/.docker/Redis/docker-entrypoint.sh diff --git a/tests/RedisConfigs/Docker/supervisord.conf b/tests/RedisConfigs/.docker/Redis/supervisord.conf similarity index 90% rename from tests/RedisConfigs/Docker/supervisord.conf rename to tests/RedisConfigs/.docker/Redis/supervisord.conf index 2b21cff9e..e0bd20571 100644 --- a/tests/RedisConfigs/Docker/supervisord.conf +++ b/tests/RedisConfigs/.docker/Redis/supervisord.conf @@ -1,8 +1,8 @@ [supervisord] nodaemon=false -[program:master-6379] -command=/usr/local/bin/redis-server /data/Basic/master-6379.conf +[program:primary-6379] +command=/usr/local/bin/redis-server /data/Basic/primary-6379.conf directory=/data/Basic stdout_logfile=/var/log/supervisor/%(program_name)s.log stderr_logfile=/var/log/supervisor/%(program_name)s.log @@ -22,8 +22,15 @@ stdout_logfile=/var/log/supervisor/%(program_name)s.log stderr_logfile=/var/log/supervisor/%(program_name)s.log autorestart=true -[program:master-6382] -command=/usr/local/bin/redis-server /data/Failover/master-6382.conf +[program:tls-6384] +command=/usr/local/bin/redis-server /data/Basic/tls-ciphers-6384.conf +directory=/data/Basic +stdout_logfile=/var/log/supervisor/%(program_name)s.log +stderr_logfile=/var/log/supervisor/%(program_name)s.log +autorestart=true + +[program:primary-6382] +command=/usr/local/bin/redis-server /data/Failover/primary-6382.conf directory=/data/Failover stdout_logfile=/var/log/supervisor/%(program_name)s.log stderr_logfile=/var/log/supervisor/%(program_name)s.log diff --git a/tests/RedisConfigs/3.0.503/redis.windows-service.conf b/tests/RedisConfigs/3.0.503/redis.windows-service.conf index 16f41ee5a..b374dad58 100644 --- a/tests/RedisConfigs/3.0.503/redis.windows-service.conf +++ b/tests/RedisConfigs/3.0.503/redis.windows-service.conf @@ -516,7 +516,7 @@ slave-priority 100 # If the AOF is enabled on startup Redis will load the AOF, that is the file # with the better durability guarantees. # -# Please check http://redis.io/topics/persistence for more information. +# Please check https://redis.io/topics/persistence for more information. appendonly no @@ -738,7 +738,7 @@ lua-time-limit 5000 # cluster-require-full-coverage yes # In order to setup your cluster make sure to read the documentation -# available at http://redis.io web site. +# available at https://redis.io web site. ################################## SLOW LOG ################################### @@ -788,7 +788,7 @@ latency-monitor-threshold 0 ############################# Event notification ############################## # Redis can notify Pub/Sub clients about events happening in the key space. -# This feature is documented at http://redis.io/topics/notifications +# This feature is documented at https://redis.io/topics/notifications # # For instance if keyspace events notification is enabled, and a client # performs a DEL operation on key "foo" stored in the Database 0, two @@ -829,7 +829,7 @@ latency-monitor-threshold 0 # By default all notifications are disabled because most users don't need # this feature and the feature has some overhead. Note that if you don't # specify at least one of K or E, no events will be delivered. -notify-keyspace-events "" +notify-keyspace-events "AKE" ############################### ADVANCED CONFIG ############################### diff --git a/tests/RedisConfigs/3.0.503/redis.windows.conf b/tests/RedisConfigs/3.0.503/redis.windows.conf index 21915cce1..4a99b8fdb 100644 --- a/tests/RedisConfigs/3.0.503/redis.windows.conf +++ b/tests/RedisConfigs/3.0.503/redis.windows.conf @@ -516,7 +516,7 @@ slave-priority 100 # If the AOF is enabled on startup Redis will load the AOF, that is the file # with the better durability guarantees. # -# Please check http://redis.io/topics/persistence for more information. +# Please check https://redis.io/topics/persistence for more information. appendonly no @@ -738,7 +738,7 @@ lua-time-limit 5000 # cluster-require-full-coverage yes # In order to setup your cluster make sure to read the documentation -# available at http://redis.io web site. +# available at https://redis.io web site. ################################## SLOW LOG ################################### @@ -788,7 +788,7 @@ latency-monitor-threshold 0 ############################# Event notification ############################## # Redis can notify Pub/Sub clients about events happening in the key space. -# This feature is documented at http://redis.io/topics/notifications +# This feature is documented at https://redis.io/topics/notifications # # For instance if keyspace events notification is enabled, and a client # performs a DEL operation on key "foo" stored in the Database 0, two @@ -829,7 +829,7 @@ latency-monitor-threshold 0 # By default all notifications are disabled because most users don't need # this feature and the feature has some overhead. Note that if you don't # specify at least one of K or E, no events will be delivered. -notify-keyspace-events "" +notify-keyspace-events "AKE" ############################### ADVANCED CONFIG ############################### diff --git a/tests/RedisConfigs/Basic/master-6379.conf b/tests/RedisConfigs/Basic/primary-6379-3.0.conf similarity index 64% rename from tests/RedisConfigs/Basic/master-6379.conf rename to tests/RedisConfigs/Basic/primary-6379-3.0.conf index 4ea261c1f..889756fec 100644 --- a/tests/RedisConfigs/Basic/master-6379.conf +++ b/tests/RedisConfigs/Basic/primary-6379-3.0.conf @@ -5,5 +5,6 @@ databases 2000 maxmemory 6gb dir "../Temp" appendonly no -dbfilename "master-6379.rdb" -save "" \ No newline at end of file +dbfilename "primary-6379.rdb" +save "" +notify-keyspace-events AKE \ No newline at end of file diff --git a/tests/RedisConfigs/Basic/primary-6379.conf b/tests/RedisConfigs/Basic/primary-6379.conf new file mode 100644 index 000000000..2da592601 --- /dev/null +++ b/tests/RedisConfigs/Basic/primary-6379.conf @@ -0,0 +1,11 @@ +port 6379 +repl-diskless-sync yes +repl-diskless-sync-delay 0 +databases 2000 +maxmemory 6gb +dir "../Temp" +appendonly no +dbfilename "primary-6379.rdb" +save "" +enable-debug-command yes +notify-keyspace-events AKE \ No newline at end of file diff --git a/tests/RedisConfigs/Basic/replica-6380.conf b/tests/RedisConfigs/Basic/replica-6380.conf index 8d87e54c2..0c1650513 100644 --- a/tests/RedisConfigs/Basic/replica-6380.conf +++ b/tests/RedisConfigs/Basic/replica-6380.conf @@ -7,4 +7,5 @@ maxmemory 2gb appendonly no dir "../Temp" dbfilename "replica-6380.rdb" -save "" \ No newline at end of file +save "" +notify-keyspace-events AKE \ No newline at end of file diff --git a/tests/RedisConfigs/Basic/secure-6381.conf b/tests/RedisConfigs/Basic/secure-6381.conf index bd9359244..ad2e380ad 100644 --- a/tests/RedisConfigs/Basic/secure-6381.conf +++ b/tests/RedisConfigs/Basic/secure-6381.conf @@ -4,4 +4,5 @@ databases 2000 maxmemory 512mb dir "../Temp" dbfilename "secure-6381.rdb" -save "" \ No newline at end of file +save "" +notify-keyspace-events AKE \ No newline at end of file diff --git a/tests/RedisConfigs/Basic/tls-ciphers-6384.conf b/tests/RedisConfigs/Basic/tls-ciphers-6384.conf new file mode 100644 index 000000000..857d5c741 --- /dev/null +++ b/tests/RedisConfigs/Basic/tls-ciphers-6384.conf @@ -0,0 +1,12 @@ +port 0 +tls-port 6384 +timeout 0 +protected-mode no +tls-auth-clients no +tls-ciphers ECDHE-RSA-AES256-GCM-SHA384 +tls-ciphersuites TLS_AES_256_GCM_SHA384 +tls-protocols "TLSv1.2 TLSv1.3" +tls-cert-file /Certs/redis.crt +tls-key-file /Certs/redis.key +tls-ca-cert-file /Certs/ca.crt +notify-keyspace-events AKE diff --git a/tests/RedisConfigs/Certs/ca.crt b/tests/RedisConfigs/Certs/ca.crt new file mode 100755 index 000000000..4eee6fbfe --- /dev/null +++ b/tests/RedisConfigs/Certs/ca.crt @@ -0,0 +1,29 @@ +-----BEGIN CERTIFICATE----- +MIIE5jCCAs4CCQCCCQ8gWCbLVjANBgkqhkiG9w0BAQsFADA1MRMwEQYDVQQKDApS +ZWRpcyBUZXN0MR4wHAYDVQQDDBVDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMjIw +ODI0MTMwOTEwWhcNMzIwODIxMTMwOTEwWjA1MRMwEQYDVQQKDApSZWRpcyBUZXN0 +MR4wHAYDVQQDDBVDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQDDuKJ+rUI5/MmOdc7PmhKlJrceqwnDsgyVtvV5X6Xn +P8OG2jSXSqpmQn32WCDOL0EldTYS5UPIE0dS7XOYKFJaZnlSZtKBVZaam1+T2mMv +/rNjk3qmJpNiFpjJbktEchiwrsF6l91gsNfRdc1XXku9nvLhjEyhpNRZ7NKLT+Vx +F7h3wkEqLJFwzaAxIPPyvt6aQsip5dRfExFSwCLY4PTGzsvfNNauWASFvgh+zk80 +FFTeDm6AZRmMIgizUc+0JK46QposPZHZA4N9/wmNZ3gAGzIEXvIZ1A5Nn/xMmU/7 +3IRdFkE6pZmaCLA5CwE2M8Z8WyYtPTwLGU9c5yjTKrcX69Dy1hzjyk3H+DsqObuR +rpEcCx6x9SlrJQb0zLcumeqNsXSLdLlUwOgGX/d78J3jYEMSwatnU9wTP26nWhXH +b37sQZz+kh9ZM9rlfhzij4eq/4QtDRzLN0G+y6uveujW+s2LXlhY73K6DP7ujUUW +tCYy0X+iw8YfXHYgYyoby84gYETg0kpR1bjUKQL2PNNf0BOKUjQF9K9IPIiQX0v7 +0YFg/2Fs3fidTVPFCwiLGCQzmy6P9VZQ3EkblHcLtoNAaPieoXdX/s/wMXTqj/hU +b9jwmrqJ2sbEb6VBMrrIgCJqz52zQzE+64KgHCmrQR/ABTCUWhgnsDsUGmaHs8y6 +cwIDAQABMA0GCSqGSIb3DQEBCwUAA4ICAQCSg6CZEvBoRqrOCZ+m4z3pQzt/oRxZ +sUz9ZM94pumEOnbEV0AjObqq2LnNTfuyexguH+5HH4Dr2OTvDIL7xNMor7NSUXGA +yhiu+BxiS1tB1o6IyExwyjpbS61iqtUX09abSWP/+2UW1i8oEIuwUmkRlAxbxFWI +HN6+LFe1L32fbrbp90sRS96bljvxNBxGpYqcooLAHCbK2T6jHDAZF0cK5ByWZoJ4 +FcD3tRYWelj8k80ZeoG4PIsCZylsSMPWeglbFqDV4gSpWx7nb4Pgpzs9REp02Cp0 +4MWxAt2fmvPFn9xypeyo6gxZ+R2cmSKiu0sdVnp3u1RscH1aGnVJTpdygpuDYJQ7 +hxn1Wv91zRi+h4MfVywSO/3gMIvdiJIiV7avgNEWiLXYUn2bb4gHzEMOrp2Z7BUp +/SwNHmikaWQj0MY6sOW7pOaistbokyyZw8TjgrTnS4GocN91h16JbuSgAI+Nrwsa +VcFvDCd7qSmJgeMfGhhlOdNenDGXsA9UVyTTkGfw3mgcm62uvewmj1I70ogk8M3z +khwAMI2OeagmHtXtjtg2BULM53IwFHJKV41B4rwekcMkboCsOfbhZwz42aLpT0YG +d0btKJkNcL7n8QiGtFmvreizrdyC5r23GNVnNdn2dhuJBqN65xJQoXh0x1PTnK7/ +4IWfRo8kosNhmw== +-----END CERTIFICATE----- diff --git a/tests/RedisConfigs/Certs/redis.crt b/tests/RedisConfigs/Certs/redis.crt new file mode 100755 index 000000000..cb69138e7 --- /dev/null +++ b/tests/RedisConfigs/Certs/redis.crt @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIID3TCCAcUCCQDIqu2SpngxXjANBgkqhkiG9w0BAQsFADA1MRMwEQYDVQQKDApS +ZWRpcyBUZXN0MR4wHAYDVQQDDBVDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMjIw +ODI0MTMwOTEwWhcNMjMwODI0MTMwOTEwWjAsMRMwEQYDVQQKDApSZWRpcyBUZXN0 +MRUwEwYDVQQDDAxHZW5lcmljLWNlcnQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQDCtcwbyBNSBgM4Ym72Sz7grrpYtx43JQtgiz4o7rYfCjoxkEcWic2J +3/UC2nqbtmb2vUOTyqxe5VUh6bXHB3OaZfLkyyGJM8dJN3p3rC8Ef4zr2CkCzpAK +jquXz0do9epXrUZCsYSdw1pOZDsRXx9ZgImtvB5Yj0UXfatAQvt9xk7UdxIrNDs4 +r0V34gZVvU4OhFnTEQVwLYqi0VOiknKRtW9BaD0niObjdc+aMmVYBo00G0UmFO4A +UuanO6PJz3FiX+ejY+aViBCD4lJUbuH719/EwWXYNxXbZasC5I0EE6zU0PEOcECm +cbWlSS23eid06HuaqRmcEwTNKPk0/CVjAgMBAAEwDQYJKoZIhvcNAQELBQADggIB +AJOtU8CutoZncckfG+6YL1raeqAcnCYvm0bL3RTs4OYMwHDPOIBCSG6kqyIKiTc2 +uU2G2XUZcWs2a3MxhoOgZhl0TDQBgSQMnMcr/d91IBNwUnRWA553KSpBhOn31SGr +fo8U4IOMz9I/wJ05AFt0bE4WDfm73tiwsIx/2SMn75/d5UgY+II7epx+MpIrWGpT +SwBbm7is9Go/Mwr1bdNy35lrUAL+Si80aHhVPWa+bIFqyqsWal+iZZND+NrqilJe +y27Syhikq0R+U8gPjSdIT2OYj7kwrUZI1exOzpUDa9gUjfy13+lLJWxPbgQEc7Uq +hyu6+CaY9q9YNT6eIIymdLtGTSs/rMYLACHylS/J4WNXr/YCmk2xhGqGDlPq3wjw +Q5WtmdHDaSQXo2+H9fQbw2N2loQ29Gcz4FEgF1CVhbuCZUstelDl6F38cvgRHPrY +gLro6ijlxtfvka6GOZZeVksJWaW9ikAz+aw3yqKQoFMnILjvwxpuCTphvgvlKIb4 +TFg5DU+a+RHW/S3qP3PCatw+f/FaFkRavD2P9oNz0XAcmLld0iWbDXHntDBF1q0N +c9bgdoP9pVS+NKb6Hq/zf2kUC7AseUiLAju5iMQVglunhNcbm/H6RnxfnYekUMkp +DdenAmOqjXa/n5IQkfwOxW97EJyI9SGo3Is+DKgUEmd4 +-----END CERTIFICATE----- diff --git a/tests/RedisConfigs/Certs/redis.key b/tests/RedisConfigs/Certs/redis.key new file mode 100755 index 000000000..56f301528 --- /dev/null +++ b/tests/RedisConfigs/Certs/redis.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAwrXMG8gTUgYDOGJu9ks+4K66WLceNyULYIs+KO62Hwo6MZBH +FonNid/1Atp6m7Zm9r1Dk8qsXuVVIem1xwdzmmXy5MshiTPHSTd6d6wvBH+M69gp +As6QCo6rl89HaPXqV61GQrGEncNaTmQ7EV8fWYCJrbweWI9FF32rQEL7fcZO1HcS +KzQ7OK9Fd+IGVb1ODoRZ0xEFcC2KotFTopJykbVvQWg9J4jm43XPmjJlWAaNNBtF +JhTuAFLmpzujyc9xYl/no2PmlYgQg+JSVG7h+9ffxMFl2DcV22WrAuSNBBOs1NDx +DnBApnG1pUktt3ondOh7mqkZnBMEzSj5NPwlYwIDAQABAoIBAQC8kO2r1hcH77S8 +rW+C7Rpm5DCp7CXCCAk9pXw8jfook3IKQAzogephZVhWPBpTpNGQkXjZr4VBnd3V +qw4VQ10soSEbfLHsuw18FdNwBHvAYnqqiTwmcL/Eyajaq64fs1EROkj6HAsv8loJ +4z3lM/cbacVsUOwenhmuh1ELOhNvGKQuCzSpCoVykP2cWCMnqHEl43Ilqm5tga23 +PtMJS1jM6IazE6EzfelwuGGCEmKK5EKeDHB+3PU4sUSHfXv/l17adSJtDbiK/JiH +2Op3DzSGWZ2xhkYt35Oj7auxJ90f3BoG0/JZABdiaZu3DOgp9JNzqr1sH4rFPWfe +dWBk665JAoGBAPrRhcjkHmRVvQwViGrzE8RUqblW6Wd6xVcf7KbparKjcFbPkoo1 +3NJpKonkvyLj3WQdltXNXuvM+QcdT2TNFv+hywkCC7Wxb/MPZ2eF+nRBMg59209T +eAWjq9GflPn7uO/4jnfCLCR+DNiEvctJ1nHf0qBTVC+s+QyhO9ilZd1PAoGBAMa7 +imK7XH7kX0zpsafPLiORr7XvOzDo/NE/kpKHisdre8VL397KlVsQmQldx33zRa7g +ctCIGjQcsnitpa24vS2G4wru3fqGbKqf3tASoC9yNMRxIBDxlhsASe0TczRw4HKT +i2HMlb7rDZdXa9mY+eDszOUUnGtkmX/D372fcTmtAoGBAOOpFoQX+zYbVLMJQH/D +D2gfaMbgCo9wsnq4cXe3Wq+3Bhrl4h8tcLhT2Na9GHi015kt+mEqPkROEqPQiOX3 ++i4iT0Zn4vUSj4jRrIwc4g5vtt3Mgynnm4OS4jwtW23kfCLlO3ucdbDR8Rr+sb85 +0DogbPA1cq6rlItQNiAZUPKlAoGAKqEL/EXIf4epUaxHaYGtmf+kO1iHz+QKZzBF +1pywjjpmIFo4OWgnRZN34GR3aHMInYyT1Ft9k3QcbHqDMZKRMfTfOvcmMpknMip8 +9xEnv0W2P/UsNbY8xqn3MZ2cdsFHxAwWN/JUpNFy5uXfwptn7nGdOf6D1x2LN7bi +haBv/zkCgYAqSHcp5ETqJMMz/v3H3eLDu/Xc//SdyqldKEjmv+kd0BYQNqHB9rEB +B4rtRVeWUZ6TA5x1T8dK5OaDZ+W+vdnzmOGw27eFuD+203m76+3cJS37mroEcNPt +5npe1IydjS2qU8iA8lhDeIWr2dTnrQnBtgkKiJvYbP2XG5/LahxixA== +-----END RSA PRIVATE KEY----- diff --git a/tests/RedisConfigs/Cluster/cluster-7000.conf b/tests/RedisConfigs/Cluster/cluster-7000.conf index f250a3db3..ad11a23fd 100644 --- a/tests/RedisConfigs/Cluster/cluster-7000.conf +++ b/tests/RedisConfigs/Cluster/cluster-7000.conf @@ -6,4 +6,5 @@ cluster-node-timeout 5000 appendonly yes dbfilename "dump-7000.rdb" appendfilename "appendonly-7000.aof" -save "" \ No newline at end of file +save "" +notify-keyspace-events AKE \ No newline at end of file diff --git a/tests/RedisConfigs/Cluster/cluster-7001.conf b/tests/RedisConfigs/Cluster/cluster-7001.conf index 1ae0c6f83..589f9ea23 100644 --- a/tests/RedisConfigs/Cluster/cluster-7001.conf +++ b/tests/RedisConfigs/Cluster/cluster-7001.conf @@ -6,4 +6,5 @@ cluster-node-timeout 5000 appendonly yes dbfilename "dump-7001.rdb" appendfilename "appendonly-7001.aof" -save "" \ No newline at end of file +save "" +notify-keyspace-events AKE \ No newline at end of file diff --git a/tests/RedisConfigs/Cluster/cluster-7002.conf b/tests/RedisConfigs/Cluster/cluster-7002.conf index 897301f59..66a376865 100644 --- a/tests/RedisConfigs/Cluster/cluster-7002.conf +++ b/tests/RedisConfigs/Cluster/cluster-7002.conf @@ -6,4 +6,5 @@ cluster-node-timeout 5000 appendonly yes dbfilename "dump-7002.rdb" appendfilename "appendonly-7002.aof" -save "" \ No newline at end of file +save "" +notify-keyspace-events AKE \ No newline at end of file diff --git a/tests/RedisConfigs/Cluster/cluster-7003.conf b/tests/RedisConfigs/Cluster/cluster-7003.conf index 0b51677fd..1f4883023 100644 --- a/tests/RedisConfigs/Cluster/cluster-7003.conf +++ b/tests/RedisConfigs/Cluster/cluster-7003.conf @@ -6,4 +6,5 @@ cluster-node-timeout 5000 appendonly yes dbfilename "dump-7003.rdb" appendfilename "appendonly-7003.aof" -save "" \ No newline at end of file +save "" +notify-keyspace-events AKE \ No newline at end of file diff --git a/tests/RedisConfigs/Cluster/cluster-7004.conf b/tests/RedisConfigs/Cluster/cluster-7004.conf index 9a49d21f5..93d75f38a 100644 --- a/tests/RedisConfigs/Cluster/cluster-7004.conf +++ b/tests/RedisConfigs/Cluster/cluster-7004.conf @@ -6,4 +6,5 @@ cluster-node-timeout 5000 appendonly yes dbfilename "dump-7004.rdb" appendfilename "appendonly-7004.aof" -save "" \ No newline at end of file +save "" +notify-keyspace-events AKE \ No newline at end of file diff --git a/tests/RedisConfigs/Cluster/cluster-7005.conf b/tests/RedisConfigs/Cluster/cluster-7005.conf index b333a4b44..c9b5d55e2 100644 --- a/tests/RedisConfigs/Cluster/cluster-7005.conf +++ b/tests/RedisConfigs/Cluster/cluster-7005.conf @@ -6,4 +6,5 @@ cluster-node-timeout 5000 appendonly yes dbfilename "dump-7005.rdb" appendfilename "appendonly-7005.aof" -save "" \ No newline at end of file +save "" +notify-keyspace-events AKE \ No newline at end of file diff --git a/tests/RedisConfigs/Dockerfile b/tests/RedisConfigs/Dockerfile deleted file mode 100644 index 969497b7d..000000000 --- a/tests/RedisConfigs/Dockerfile +++ /dev/null @@ -1,19 +0,0 @@ -FROM redis:5 - -COPY Basic /data/Basic/ -COPY Failover /data/Failover/ -COPY Cluster /data/Cluster/ -COPY Sentinel /data/Sentinel/ - -RUN chown -R redis:redis /data - -COPY Docker/docker-entrypoint.sh /usr/local/bin/ -RUN chmod +x /usr/local/bin/docker-entrypoint.sh - -RUN apt-get -y update && apt-get install -y git gcc make supervisor && apt-get clean - -ADD Docker/supervisord.conf /etc/ - -ENTRYPOINT ["docker-entrypoint.sh"] - -EXPOSE 6379 6380 6381 6382 6383 7000 7001 7002 7003 7004 7005 7010 7011 26379 26380 26381 diff --git a/tests/RedisConfigs/Failover/master-6382.conf b/tests/RedisConfigs/Failover/primary-6382.conf similarity index 64% rename from tests/RedisConfigs/Failover/master-6382.conf rename to tests/RedisConfigs/Failover/primary-6382.conf index e57c55190..6055c0347 100644 --- a/tests/RedisConfigs/Failover/master-6382.conf +++ b/tests/RedisConfigs/Failover/primary-6382.conf @@ -5,5 +5,6 @@ databases 2000 maxmemory 2gb dir "../Temp" appendonly no -dbfilename "master-6382.rdb" -save "" \ No newline at end of file +dbfilename "primary-6382.rdb" +save "" +notify-keyspace-events AKE \ No newline at end of file diff --git a/tests/RedisConfigs/Failover/replica-6383.conf b/tests/RedisConfigs/Failover/replica-6383.conf index 6f1a0fc7d..e07f5a69d 100644 --- a/tests/RedisConfigs/Failover/replica-6383.conf +++ b/tests/RedisConfigs/Failover/replica-6383.conf @@ -7,4 +7,5 @@ maxmemory 2gb appendonly no dir "../Temp" dbfilename "replica-6383.rdb" -save "" \ No newline at end of file +save "" +notify-keyspace-events AKE \ No newline at end of file diff --git a/tests/RedisConfigs/Sentinel/redis-7010.conf b/tests/RedisConfigs/Sentinel/redis-7010.conf index 0e27680b2..878160632 100644 --- a/tests/RedisConfigs/Sentinel/redis-7010.conf +++ b/tests/RedisConfigs/Sentinel/redis-7010.conf @@ -5,4 +5,5 @@ maxmemory 100mb appendonly no dir "../Temp" dbfilename "sentinel-target-7010.rdb" -save "" \ No newline at end of file +save "" +notify-keyspace-events AKE \ No newline at end of file diff --git a/tests/RedisConfigs/Sentinel/redis-7011.conf b/tests/RedisConfigs/Sentinel/redis-7011.conf index 6d02eb150..08b8dad1a 100644 --- a/tests/RedisConfigs/Sentinel/redis-7011.conf +++ b/tests/RedisConfigs/Sentinel/redis-7011.conf @@ -6,4 +6,5 @@ maxmemory 100mb appendonly no dir "../Temp" dbfilename "sentinel-target-7011.rdb" -save "" \ No newline at end of file +save "" +notify-keyspace-events AKE \ No newline at end of file diff --git a/tests/RedisConfigs/Sentinel/sentinel-26379.conf b/tests/RedisConfigs/Sentinel/sentinel-26379.conf index 6d10f6030..27cefe69a 100644 --- a/tests/RedisConfigs/Sentinel/sentinel-26379.conf +++ b/tests/RedisConfigs/Sentinel/sentinel-26379.conf @@ -1,6 +1,6 @@ port 26379 -sentinel monitor mymaster 127.0.0.1 7010 1 -sentinel down-after-milliseconds mymaster 1000 -sentinel failover-timeout mymaster 1000 -sentinel config-epoch mymaster 0 +sentinel monitor myprimary 127.0.0.1 7010 1 +sentinel down-after-milliseconds myprimary 1000 +sentinel failover-timeout myprimary 1000 +sentinel config-epoch myprimary 0 dir "../Temp" \ No newline at end of file diff --git a/tests/RedisConfigs/Sentinel/sentinel-26380.conf b/tests/RedisConfigs/Sentinel/sentinel-26380.conf index fa044227e..b01a4d080 100644 --- a/tests/RedisConfigs/Sentinel/sentinel-26380.conf +++ b/tests/RedisConfigs/Sentinel/sentinel-26380.conf @@ -1,6 +1,6 @@ port 26380 -sentinel monitor mymaster 127.0.0.1 7010 1 -sentinel down-after-milliseconds mymaster 1000 -sentinel failover-timeout mymaster 1000 -sentinel config-epoch mymaster 0 +sentinel monitor myprimary 127.0.0.1 7010 1 +sentinel down-after-milliseconds myprimary 1000 +sentinel failover-timeout myprimary 1000 +sentinel config-epoch myprimary 0 dir "../Temp" \ No newline at end of file diff --git a/tests/RedisConfigs/Sentinel/sentinel-26381.conf b/tests/RedisConfigs/Sentinel/sentinel-26381.conf index fa49c9e14..ee8022a5a 100644 --- a/tests/RedisConfigs/Sentinel/sentinel-26381.conf +++ b/tests/RedisConfigs/Sentinel/sentinel-26381.conf @@ -1,6 +1,6 @@ port 26381 -sentinel monitor mymaster 127.0.0.1 7010 1 -sentinel down-after-milliseconds mymaster 1000 -sentinel failover-timeout mymaster 1000 -sentinel config-epoch mymaster 0 +sentinel monitor myprimary 127.0.0.1 7010 1 +sentinel down-after-milliseconds myprimary 1000 +sentinel failover-timeout myprimary 1000 +sentinel config-epoch myprimary 0 dir "../Temp" diff --git a/tests/RedisConfigs/docker-compose.yml b/tests/RedisConfigs/docker-compose.yml index 79dcabdf3..84cb3cc75 100644 --- a/tests/RedisConfigs/docker-compose.yml +++ b/tests/RedisConfigs/docker-compose.yml @@ -1,19 +1,28 @@ -version: '2.5' +version: '2.7' services: - redisearch: - image: redislabs/redisearch:latest - ports: - - 6385:6379 redis: build: - context: . - image: stackexchange/redis-tests:latest + context: .docker/Redis + additional_contexts: + configs: . platform: linux ports: - - 6379-6383:6379-6383 - - 7000-7006:7000-7006 - - 7010-7011:7010-7011 - - 26379-26381:26379-26381 + - 6379-6384:6379-6384 # Misc + - 7000-7006:7000-7006 # Cluster + - 7010-7011:7010-7011 # Sentinel Controllers + - 26379-26381:26379-26381 # Sentinel Data sysctls : net.core.somaxconn: '511' + envoy: + build: + context: .docker/Envoy + platform: linux + environment: + loglevel: warning + depends_on: + redis: + condition: service_started + ports: + - 7015:7015 # Cluster + - 8001:8001 # Admin diff --git a/tests/RedisConfigs/start-all.sh b/tests/RedisConfigs/start-all.sh old mode 100644 new mode 100755 index 67a58f53f..792d75a5f --- a/tests/RedisConfigs/start-all.sh +++ b/tests/RedisConfigs/start-all.sh @@ -4,19 +4,21 @@ echo "Starting Redis servers for testing..." #Basic Servers echo "Starting Basic: 6379-6382" pushd Basic > /dev/null -echo "${INDENT}Master: 6379" -redis-server master-6379.conf &>/dev/null & +echo "${INDENT}Primary: 6379" +redis-server primary-6379.conf &>/dev/null & echo "${INDENT}Replica: 6380" redis-server replica-6380.conf &>/dev/null & echo "${INDENT}Secure: 6381" redis-server secure-6381.conf &>/dev/null & +echo "${INDENT}Tls: 6384" +redis-server tls-ciphers-6384.conf &>/dev/null & popd > /dev/null #Failover Servers echo Starting Failover: 6382-6383 pushd Failover > /dev/null -echo "${INDENT}Master: 6382" -redis-server master-6382.conf &>/dev/null & +echo "${INDENT}Primary: 6382" +redis-server primary-6382.conf &>/dev/null & echo "${INDENT}Replica: 6383" redis-server replica-6383.conf &>/dev/null & popd > /dev/null @@ -44,4 +46,13 @@ redis-server sentinel-26380.conf --sentinel &>/dev/null & redis-server sentinel-26381.conf --sentinel &>/dev/null & popd > /dev/null -echo Servers started. \ No newline at end of file +#Envoy Servers +# Installation: https://www.envoyproxy.io/docs/envoy/latest/start/install +# Use Envoy on Ubuntu Linux to install on WSL2 +echo Starting Envoy: 7015 +pushd Envoy > /dev/null +echo "${INDENT}Envoy: 7015" +envoy -c envoy.yaml &> /dev/null & +popd > /dev/null + +echo Servers started. diff --git a/tests/RedisConfigs/start-basic.cmd b/tests/RedisConfigs/start-basic.cmd index 558499eb5..16bec8780 100644 --- a/tests/RedisConfigs/start-basic.cmd +++ b/tests/RedisConfigs/start-basic.cmd @@ -1,10 +1,13 @@ @echo off echo Starting Basic: pushd %~dp0\Basic -echo Master: 6379 -@start "Redis (Master): 6379" /min ..\3.0.503\redis-server.exe master-6379.conf +echo Primary: 6379 +@start "Redis (Primary): 6379" /min ..\3.0.503\redis-server.exe primary-6379-3.0.conf echo Replica: 6380 @start "Redis (Replica): 6380" /min ..\3.0.503\redis-server.exe replica-6380.conf echo Secure: 6381 @start "Redis (Secure): 6381" /min ..\3.0.503\redis-server.exe secure-6381.conf +@REM TLS config doesn't work in 3.x - don't even start it +@REM echo TLS: 6384 +@REM @start "Redis (TLS): 6384" /min ..\3.0.503\redis-server.exe tls-ciphers-6384.conf popd \ No newline at end of file diff --git a/tests/RedisConfigs/start-basic.sh b/tests/RedisConfigs/start-basic.sh old mode 100644 new mode 100755 index cd76034c1..4c35ea5c1 --- a/tests/RedisConfigs/start-basic.sh +++ b/tests/RedisConfigs/start-basic.sh @@ -4,12 +4,14 @@ echo "Starting Redis servers for testing..." #Basic Servers echo "Starting Basic: 6379-6382" pushd Basic > /dev/null -echo "${INDENT}Master: 6379" -redis-server master-6379.conf &>/dev/null & +echo "${INDENT}Primary: 6379" +redis-server primary-6379.conf &>/dev/null & echo "${INDENT}Replica: 6380" redis-server replica-6380.conf &>/dev/null & echo "${INDENT}Secure: 6381" redis-server secure-6381.conf &>/dev/null & +echo "${INDENT}Tls: 6384" +redis-server tls-ciphers-6384.conf &>/dev/null & popd > /dev/null echo Servers started. \ No newline at end of file diff --git a/tests/RedisConfigs/start-cluster.cmd b/tests/RedisConfigs/start-cluster.cmd index 3ae4f4475..2db6be94a 100644 --- a/tests/RedisConfigs/start-cluster.cmd +++ b/tests/RedisConfigs/start-cluster.cmd @@ -7,4 +7,6 @@ pushd %~dp0\Cluster @start "Redis (Cluster): 7003" /min ..\3.0.503\redis-server.exe cluster-7003.conf @start "Redis (Cluster): 7004" /min ..\3.0.503\redis-server.exe cluster-7004.conf @start "Redis (Cluster): 7005" /min ..\3.0.503\redis-server.exe cluster-7005.conf -popd \ No newline at end of file +popd +REM envoy doesnt have an windows image, only a docker +REM need to explore if we can setup host networking \ No newline at end of file diff --git a/tests/RedisConfigs/start-failover.cmd b/tests/RedisConfigs/start-failover.cmd index 513d1b337..e696bdfa3 100644 --- a/tests/RedisConfigs/start-failover.cmd +++ b/tests/RedisConfigs/start-failover.cmd @@ -2,7 +2,7 @@ echo Starting Failover: pushd %~dp0\Failover echo Master: 6382 -@start "Redis (Failover Master): 6382" /min ..\3.0.503\redis-server.exe master-6382.conf +@start "Redis (Failover Master): 6382" /min ..\3.0.503\redis-server.exe primary-6382.conf echo Replica: 6383 @start "Redis (Failover Replica): 6383" /min ..\3.0.503\redis-server.exe replica-6383.conf popd \ No newline at end of file diff --git a/tests/StackExchange.Redis.Benchmarks/AsciiHashBenchmarks.cs b/tests/StackExchange.Redis.Benchmarks/AsciiHashBenchmarks.cs new file mode 100644 index 000000000..57677f705 --- /dev/null +++ b/tests/StackExchange.Redis.Benchmarks/AsciiHashBenchmarks.cs @@ -0,0 +1,149 @@ +using System; +using System.Buffers; +using System.Collections.Generic; +using System.Text; +using BenchmarkDotNet.Attributes; +using RESPite; + +namespace StackExchange.Redis.Benchmarks; + +// [Config(typeof(CustomConfig))] +[ShortRunJob, MemoryDiagnoser] +public class AsciiHashBenchmarks +{ + private const string SharedString = "some-typical-data-for-comparisons-that-needs-to-be-at-least-64-characters"; + private static readonly byte[] SharedUtf8; + private static readonly ReadOnlySequence SharedMultiSegment; + + static AsciiHashBenchmarks() + { + SharedUtf8 = Encoding.UTF8.GetBytes(SharedString); + + var first = new Segment(SharedUtf8.AsMemory(0, 1), null); + var second = new Segment(SharedUtf8.AsMemory(1), first); + SharedMultiSegment = new ReadOnlySequence(first, 0, second, second.Memory.Length); + } + + private sealed class Segment : ReadOnlySequenceSegment + { + public Segment(ReadOnlyMemory memory, Segment? previous) + { + Memory = memory; + if (previous is { }) + { + RunningIndex = previous.RunningIndex + previous.Memory.Length; + previous.Next = this; + } + } + } + + private string _sourceString = SharedString; + private ReadOnlyMemory _sourceBytes = SharedUtf8; + private ReadOnlySequence _sourceMultiSegmentBytes = SharedMultiSegment; + private ReadOnlySequence SingleSegmentBytes => new(_sourceBytes); + + [GlobalSetup] + public void Setup() + { + _sourceString = SharedString.Substring(0, Size); + _sourceBytes = SharedUtf8.AsMemory(0, Size); + _sourceMultiSegmentBytes = SharedMultiSegment.Slice(0, Size); + + var bytes = _sourceBytes.Span; + var expected = AsciiHash.HashCS(bytes); + + Assert(AsciiHash.HashCS(bytes), nameof(AsciiHash.HashCS) + ":byte"); + Assert(AsciiHash.HashCS(_sourceString.AsSpan()), nameof(AsciiHash.HashCS) + ":char"); + + /* + Assert(AsciiHash.HashCS(SingleSegmentBytes), nameof(AsciiHash.HashCS) + " (single segment)"); + Assert(AsciiHash.HashCS(_sourceMultiSegmentBytes), nameof(AsciiHash.HashCS) + " (multi segment)"); + */ + + void Assert(long actual, string name) + { + if (actual != expected) + { + throw new InvalidOperationException($"Hash mismatch for {name}, {expected} != {actual}"); + } + } + } + + [ParamsSource(nameof(Sizes))] + public int Size { get; set; } = 7; + + public IEnumerable Sizes => [0, 1, 2, 3, 4, 5, 6, 7, 8, 16, 64]; + + private const int OperationsPerInvoke = 1024; + + // [Benchmark(OperationsPerInvoke = OperationsPerInvoke, Baseline = true)] + public int StringGetHashCode() + { + int hash = 0; + var val = _sourceString; + for (int i = 0; i < OperationsPerInvoke; i++) + { + hash = val.GetHashCode(); + } + + return hash; + } + + [BenchmarkCategory("byte")] + [Benchmark(OperationsPerInvoke = OperationsPerInvoke)] + public long HashCS_B() + { + long hash = 0; + var val = _sourceBytes.Span; + for (int i = 0; i < OperationsPerInvoke; i++) + { + hash = AsciiHash.HashCS(val); + } + + return hash; + } + + [BenchmarkCategory("char")] + [Benchmark(OperationsPerInvoke = OperationsPerInvoke)] + public long HashCS_C() + { + long hash = 0; + var val = _sourceString.AsSpan(); + for (int i = 0; i < OperationsPerInvoke; i++) + { +#pragma warning disable CS0618 // Type or member is obsolete + hash = AsciiHash.HashCS(val); +#pragma warning restore CS0618 // Type or member is obsolete + } + + return hash; + } + + /* + // [Benchmark(OperationsPerInvoke = OperationsPerInvoke)] + public long Hash64_SingleSegment() + { + long hash = 0; + var val = SingleSegmentBytes; + for (int i = 0; i < OperationsPerInvoke; i++) + { + hash = AsciiHash.HashCS(val); + } + + return hash; + } + + // [Benchmark(OperationsPerInvoke = OperationsPerInvoke)] + public long Hash64_MultiSegment() + { + long hash = 0; + var val = _sourceMultiSegmentBytes; + for (int i = 0; i < OperationsPerInvoke; i++) + { + hash = AsciiHash.HashCS(val); + } + + return hash; + } + */ +} diff --git a/tests/StackExchange.Redis.Benchmarks/AsciiHashSwitch.cs b/tests/StackExchange.Redis.Benchmarks/AsciiHashSwitch.cs new file mode 100644 index 000000000..2409362ce --- /dev/null +++ b/tests/StackExchange.Redis.Benchmarks/AsciiHashSwitch.cs @@ -0,0 +1,517 @@ +using System; +using System.Text; +using BenchmarkDotNet.Attributes; +using RESPite; +// ReSharper disable InconsistentNaming +// ReSharper disable ArrangeTypeMemberModifiers +// ReSharper disable MemberCanBePrivate.Local +#pragma warning disable SA1300, SA1134, CS8981, SA1400 +namespace StackExchange.Redis.Benchmarks; + +[ShortRunJob, MemoryDiagnoser] +public class AsciiHashSwitch +{ + // conclusion: it doesn't matter; switch on the hash or length is fine, just: remember to do the Is check + // CS vs CI: CI misses are cheap, because of the hash fail; CI hits of values <= 8 characters are cheap if + // it turns out to be a CS match, because of the CS hash check which can cheaply test CS equality; CI inequality + // and CI equality over 8 characters has a bit more overhead, but still fine + public enum Field + { + key, + abc, + port, + test, + tracking_active, + sample_ratio, + selected_slots, + all_commands_all_slots_us, + all_commands_selected_slots_us, + sampled_command_selected_slots_us, + sampled_commands_selected_slots_us, + net_bytes_all_commands_all_slots, + net_bytes_all_commands_selected_slots, + net_bytes_sampled_commands_selected_slots, + collection_start_time_unix_ms, + collection_duration_ms, + collection_duration_us, + total_cpu_time_user_ms, + total_cpu_time_user_us, + total_cpu_time_sys_ms, + total_cpu_time_sys_us, + total_net_bytes, + by_cpu_time_us, + by_net_bytes, + + Unknown = -1, + } + + private byte[] _bytes = []; + [GlobalSetup] + public void Init() => _bytes = Encoding.UTF8.GetBytes(Value); + + public static string[] GetValues() => + [ + key.Text, + abc.Text, + port.Text, + test.Text, + tracking_active.Text, + sample_ratio.Text, + selected_slots.Text, + all_commands_all_slots_us.Text, + net_bytes_sampled_commands_selected_slots.Text, + total_cpu_time_sys_us.Text, + total_net_bytes.Text, + by_cpu_time_us.Text, + by_net_bytes.Text, + "miss", + "PORT", + "much longer miss", + ]; + + [ParamsSource(nameof(GetValues))] + public string Value { get; set; } = ""; + + [Benchmark] + public Field SwitchOnHash() + { + ReadOnlySpan span = _bytes; + var hash = AsciiHash.HashCS(span); + return hash switch + { + key.HashCS when key.IsCS(hash, span) => Field.key, + abc.HashCS when abc.IsCS(hash, span) => Field.abc, + port.HashCS when port.IsCS(hash, span) => Field.port, + test.HashCS when test.IsCS(hash, span) => Field.test, + tracking_active.HashCS when tracking_active.IsCS(hash, span) => Field.tracking_active, + sample_ratio.HashCS when sample_ratio.IsCS(hash, span) => Field.sample_ratio, + selected_slots.HashCS when selected_slots.IsCS(hash, span) => Field.selected_slots, + all_commands_all_slots_us.HashCS when all_commands_all_slots_us.IsCS(hash, span) => Field.all_commands_all_slots_us, + all_commands_selected_slots_us.HashCS when all_commands_selected_slots_us.IsCS(hash, span) => Field.all_commands_selected_slots_us, + sampled_command_selected_slots_us.HashCS when sampled_command_selected_slots_us.IsCS(hash, span) => Field.sampled_command_selected_slots_us, + sampled_commands_selected_slots_us.HashCS when sampled_commands_selected_slots_us.IsCS(hash, span) => Field.sampled_commands_selected_slots_us, + net_bytes_all_commands_all_slots.HashCS when net_bytes_all_commands_all_slots.IsCS(hash, span) => Field.net_bytes_all_commands_all_slots, + net_bytes_all_commands_selected_slots.HashCS when net_bytes_all_commands_selected_slots.IsCS(hash, span) => Field.net_bytes_all_commands_selected_slots, + net_bytes_sampled_commands_selected_slots.HashCS when net_bytes_sampled_commands_selected_slots.IsCS(hash, span) => Field.net_bytes_sampled_commands_selected_slots, + collection_start_time_unix_ms.HashCS when collection_start_time_unix_ms.IsCS(hash, span) => Field.collection_start_time_unix_ms, + collection_duration_ms.HashCS when collection_duration_ms.IsCS(hash, span) => Field.collection_duration_ms, + collection_duration_us.HashCS when collection_duration_us.IsCS(hash, span) => Field.collection_duration_us, + total_cpu_time_user_ms.HashCS when total_cpu_time_user_ms.IsCS(hash, span) => Field.total_cpu_time_user_ms, + total_cpu_time_user_us.HashCS when total_cpu_time_user_us.IsCS(hash, span) => Field.total_cpu_time_user_us, + total_cpu_time_sys_ms.HashCS when total_cpu_time_sys_ms.IsCS(hash, span) => Field.total_cpu_time_sys_ms, + total_cpu_time_sys_us.HashCS when total_cpu_time_sys_us.IsCS(hash, span) => Field.total_cpu_time_sys_us, + total_net_bytes.HashCS when total_net_bytes.IsCS(hash, span) => Field.total_net_bytes, + by_cpu_time_us.HashCS when by_cpu_time_us.IsCS(hash, span) => Field.by_cpu_time_us, + by_net_bytes.HashCS when by_net_bytes.IsCS(hash, span) => Field.by_net_bytes, + _ => Field.Unknown, + }; + } + + [Benchmark] + public Field SequenceEqual() + { + ReadOnlySpan span = _bytes; + if (span.SequenceEqual(key.U8)) return Field.key; + if (span.SequenceEqual(abc.U8)) return Field.abc; + if (span.SequenceEqual(port.U8)) return Field.port; + if (span.SequenceEqual(test.U8)) return Field.test; + if (span.SequenceEqual(tracking_active.U8)) return Field.tracking_active; + if (span.SequenceEqual(sample_ratio.U8)) return Field.sample_ratio; + if (span.SequenceEqual(selected_slots.U8)) return Field.selected_slots; + if (span.SequenceEqual(all_commands_all_slots_us.U8)) return Field.all_commands_all_slots_us; + if (span.SequenceEqual(all_commands_selected_slots_us.U8)) return Field.all_commands_selected_slots_us; + if (span.SequenceEqual(sampled_command_selected_slots_us.U8)) return Field.sampled_command_selected_slots_us; + if (span.SequenceEqual(sampled_commands_selected_slots_us.U8)) return Field.sampled_commands_selected_slots_us; + if (span.SequenceEqual(net_bytes_all_commands_all_slots.U8)) return Field.net_bytes_all_commands_all_slots; + if (span.SequenceEqual(net_bytes_all_commands_selected_slots.U8)) return Field.net_bytes_all_commands_selected_slots; + if (span.SequenceEqual(net_bytes_sampled_commands_selected_slots.U8)) return Field.net_bytes_sampled_commands_selected_slots; + if (span.SequenceEqual(collection_start_time_unix_ms.U8)) return Field.collection_start_time_unix_ms; + if (span.SequenceEqual(collection_duration_ms.U8)) return Field.collection_duration_ms; + if (span.SequenceEqual(collection_duration_us.U8)) return Field.collection_duration_us; + if (span.SequenceEqual(total_cpu_time_user_ms.U8)) return Field.total_cpu_time_user_ms; + if (span.SequenceEqual(total_cpu_time_user_us.U8)) return Field.total_cpu_time_user_us; + if (span.SequenceEqual(total_cpu_time_sys_ms.U8)) return Field.total_cpu_time_sys_ms; + if (span.SequenceEqual(total_cpu_time_sys_us.U8)) return Field.total_cpu_time_sys_us; + if (span.SequenceEqual(total_net_bytes.U8)) return Field.total_net_bytes; + if (span.SequenceEqual(by_cpu_time_us.U8)) return Field.by_cpu_time_us; + if (span.SequenceEqual(by_net_bytes.U8)) return Field.by_net_bytes; + + return Field.Unknown; + } + + [Benchmark] + public Field SwitchOnLength() + { + ReadOnlySpan span = _bytes; + var hash = AsciiHash.HashCS(span); + return span.Length switch + { + key.Length when key.IsCS(hash, span) => Field.key, + abc.Length when abc.IsCS(hash, span) => Field.abc, + port.Length when port.IsCS(hash, span) => Field.port, + test.Length when test.IsCS(hash, span) => Field.test, + tracking_active.Length when tracking_active.IsCS(hash, span) => Field.tracking_active, + sample_ratio.Length when sample_ratio.IsCS(hash, span) => Field.sample_ratio, + selected_slots.Length when selected_slots.IsCS(hash, span) => Field.selected_slots, + all_commands_all_slots_us.Length when all_commands_all_slots_us.IsCS(hash, span) => Field.all_commands_all_slots_us, + all_commands_selected_slots_us.Length when all_commands_selected_slots_us.IsCS(hash, span) => Field.all_commands_selected_slots_us, + sampled_command_selected_slots_us.Length when sampled_command_selected_slots_us.IsCS(hash, span) => Field.sampled_command_selected_slots_us, + sampled_commands_selected_slots_us.Length when sampled_commands_selected_slots_us.IsCS(hash, span) => Field.sampled_commands_selected_slots_us, + net_bytes_all_commands_all_slots.Length when net_bytes_all_commands_all_slots.IsCS(hash, span) => Field.net_bytes_all_commands_all_slots, + net_bytes_all_commands_selected_slots.Length when net_bytes_all_commands_selected_slots.IsCS(hash, span) => Field.net_bytes_all_commands_selected_slots, + net_bytes_sampled_commands_selected_slots.Length when net_bytes_sampled_commands_selected_slots.IsCS(hash, span) => Field.net_bytes_sampled_commands_selected_slots, + collection_start_time_unix_ms.Length when collection_start_time_unix_ms.IsCS(hash, span) => Field.collection_start_time_unix_ms, + collection_duration_ms.Length when collection_duration_ms.IsCS(hash, span) => Field.collection_duration_ms, + collection_duration_us.Length when collection_duration_us.IsCS(hash, span) => Field.collection_duration_us, + total_cpu_time_user_ms.Length when total_cpu_time_user_ms.IsCS(hash, span) => Field.total_cpu_time_user_ms, + total_cpu_time_user_us.Length when total_cpu_time_user_us.IsCS(hash, span) => Field.total_cpu_time_user_us, + total_cpu_time_sys_ms.Length when total_cpu_time_sys_ms.IsCS(hash, span) => Field.total_cpu_time_sys_ms, + total_cpu_time_sys_us.Length when total_cpu_time_sys_us.IsCS(hash, span) => Field.total_cpu_time_sys_us, + total_net_bytes.Length when total_net_bytes.IsCS(hash, span) => Field.total_net_bytes, + by_cpu_time_us.Length when by_cpu_time_us.IsCS(hash, span) => Field.by_cpu_time_us, + by_net_bytes.Length when by_net_bytes.IsCS(hash, span) => Field.by_net_bytes, + _ => Field.Unknown, + }; + } + + [Benchmark] + public Field SwitchOnHash_CI() + { + ReadOnlySpan span = _bytes; + var hash = AsciiHash.HashUC(span); + return hash switch + { + key.HashCI when key.IsCI(hash, span) => Field.key, + abc.HashCI when abc.IsCI(hash, span) => Field.abc, + port.HashCI when port.IsCI(hash, span) => Field.port, + test.HashCI when test.IsCI(hash, span) => Field.test, + tracking_active.HashCI when tracking_active.IsCI(hash, span) => Field.tracking_active, + sample_ratio.HashCI when sample_ratio.IsCI(hash, span) => Field.sample_ratio, + selected_slots.HashCI when selected_slots.IsCI(hash, span) => Field.selected_slots, + all_commands_all_slots_us.HashCI when all_commands_all_slots_us.IsCI(hash, span) => Field.all_commands_all_slots_us, + all_commands_selected_slots_us.HashCI when all_commands_selected_slots_us.IsCI(hash, span) => Field.all_commands_selected_slots_us, + sampled_command_selected_slots_us.HashCI when sampled_command_selected_slots_us.IsCI(hash, span) => Field.sampled_command_selected_slots_us, + sampled_commands_selected_slots_us.HashCI when sampled_commands_selected_slots_us.IsCI(hash, span) => Field.sampled_commands_selected_slots_us, + net_bytes_all_commands_all_slots.HashCI when net_bytes_all_commands_all_slots.IsCI(hash, span) => Field.net_bytes_all_commands_all_slots, + net_bytes_all_commands_selected_slots.HashCI when net_bytes_all_commands_selected_slots.IsCI(hash, span) => Field.net_bytes_all_commands_selected_slots, + net_bytes_sampled_commands_selected_slots.HashCI when net_bytes_sampled_commands_selected_slots.IsCI(hash, span) => Field.net_bytes_sampled_commands_selected_slots, + collection_start_time_unix_ms.HashCI when collection_start_time_unix_ms.IsCI(hash, span) => Field.collection_start_time_unix_ms, + collection_duration_ms.HashCI when collection_duration_ms.IsCI(hash, span) => Field.collection_duration_ms, + collection_duration_us.HashCI when collection_duration_us.IsCI(hash, span) => Field.collection_duration_us, + total_cpu_time_user_ms.HashCI when total_cpu_time_user_ms.IsCI(hash, span) => Field.total_cpu_time_user_ms, + total_cpu_time_user_us.HashCI when total_cpu_time_user_us.IsCI(hash, span) => Field.total_cpu_time_user_us, + total_cpu_time_sys_ms.HashCI when total_cpu_time_sys_ms.IsCI(hash, span) => Field.total_cpu_time_sys_ms, + total_cpu_time_sys_us.HashCI when total_cpu_time_sys_us.IsCI(hash, span) => Field.total_cpu_time_sys_us, + total_net_bytes.HashCI when total_net_bytes.IsCI(hash, span) => Field.total_net_bytes, + by_cpu_time_us.HashCI when by_cpu_time_us.IsCI(hash, span) => Field.by_cpu_time_us, + by_net_bytes.HashCI when by_net_bytes.IsCI(hash, span) => Field.by_net_bytes, + _ => Field.Unknown, + }; + } + + [Benchmark] + public Field SwitchOnLength_CI() + { + ReadOnlySpan span = _bytes; + var hash = AsciiHash.HashUC(span); + return span.Length switch + { + key.Length when key.IsCI(hash, span) => Field.key, + abc.Length when abc.IsCI(hash, span) => Field.abc, + port.Length when port.IsCI(hash, span) => Field.port, + test.Length when test.IsCI(hash, span) => Field.test, + tracking_active.Length when tracking_active.IsCI(hash, span) => Field.tracking_active, + sample_ratio.Length when sample_ratio.IsCI(hash, span) => Field.sample_ratio, + selected_slots.Length when selected_slots.IsCI(hash, span) => Field.selected_slots, + all_commands_all_slots_us.Length when all_commands_all_slots_us.IsCI(hash, span) => Field.all_commands_all_slots_us, + all_commands_selected_slots_us.Length when all_commands_selected_slots_us.IsCI(hash, span) => Field.all_commands_selected_slots_us, + sampled_command_selected_slots_us.Length when sampled_command_selected_slots_us.IsCI(hash, span) => Field.sampled_command_selected_slots_us, + sampled_commands_selected_slots_us.Length when sampled_commands_selected_slots_us.IsCI(hash, span) => Field.sampled_commands_selected_slots_us, + net_bytes_all_commands_all_slots.Length when net_bytes_all_commands_all_slots.IsCI(hash, span) => Field.net_bytes_all_commands_all_slots, + net_bytes_all_commands_selected_slots.Length when net_bytes_all_commands_selected_slots.IsCI(hash, span) => Field.net_bytes_all_commands_selected_slots, + net_bytes_sampled_commands_selected_slots.Length when net_bytes_sampled_commands_selected_slots.IsCI(hash, span) => Field.net_bytes_sampled_commands_selected_slots, + collection_start_time_unix_ms.Length when collection_start_time_unix_ms.IsCI(hash, span) => Field.collection_start_time_unix_ms, + collection_duration_ms.Length when collection_duration_ms.IsCI(hash, span) => Field.collection_duration_ms, + collection_duration_us.Length when collection_duration_us.IsCI(hash, span) => Field.collection_duration_us, + total_cpu_time_user_ms.Length when total_cpu_time_user_ms.IsCI(hash, span) => Field.total_cpu_time_user_ms, + total_cpu_time_user_us.Length when total_cpu_time_user_us.IsCI(hash, span) => Field.total_cpu_time_user_us, + total_cpu_time_sys_ms.Length when total_cpu_time_sys_ms.IsCI(hash, span) => Field.total_cpu_time_sys_ms, + total_cpu_time_sys_us.Length when total_cpu_time_sys_us.IsCI(hash, span) => Field.total_cpu_time_sys_us, + total_net_bytes.Length when total_net_bytes.IsCI(hash, span) => Field.total_net_bytes, + by_cpu_time_us.Length when by_cpu_time_us.IsCI(hash, span) => Field.by_cpu_time_us, + by_net_bytes.Length when by_net_bytes.IsCI(hash, span) => Field.by_net_bytes, + _ => Field.Unknown, + }; + } + + /* + we're using raw output from the code-gen, because BDN kinda hates the tooling, because + of the complex build pipe; this is left for reference only + + [AsciiHash] internal static partial class key { } + [AsciiHash] internal static partial class abc { } + [AsciiHash] internal static partial class port { } + [AsciiHash] internal static partial class test { } + [AsciiHash] internal static partial class tracking_active { } + [AsciiHash] internal static partial class sample_ratio { } + [AsciiHash] internal static partial class selected_slots { } + [AsciiHash] internal static partial class all_commands_all_slots_us { } + [AsciiHash] internal static partial class all_commands_selected_slots_us { } + [AsciiHash] internal static partial class sampled_command_selected_slots_us { } + [AsciiHash] internal static partial class sampled_commands_selected_slots_us { } + [AsciiHash] internal static partial class net_bytes_all_commands_all_slots { } + [AsciiHash] internal static partial class net_bytes_all_commands_selected_slots { } + [AsciiHash] internal static partial class net_bytes_sampled_commands_selected_slots { } + [AsciiHash] internal static partial class collection_start_time_unix_ms { } + [AsciiHash] internal static partial class collection_duration_ms { } + [AsciiHash] internal static partial class collection_duration_us { } + [AsciiHash] internal static partial class total_cpu_time_user_ms { } + [AsciiHash] internal static partial class total_cpu_time_user_us { } + [AsciiHash] internal static partial class total_cpu_time_sys_ms { } + [AsciiHash] internal static partial class total_cpu_time_sys_us { } + [AsciiHash] internal static partial class total_net_bytes { } + [AsciiHash] internal static partial class by_cpu_time_us { } + [AsciiHash] internal static partial class by_net_bytes { } + */ + + static class key + { + public const int Length = 3; + public const long HashCS = 7955819; + public const long HashCI = 5850443; + public static ReadOnlySpan U8 => "key"u8; + public const string Text = "key"; + public static bool IsCS(long hash, ReadOnlySpan value) => hash == HashCS & value.Length == Length; + public static bool IsCI(long hash, ReadOnlySpan value) => (hash == HashCI & value.Length == Length) && (global::RESPite.AsciiHash.HashCS(value) == HashCS || global::RESPite.AsciiHash.EqualsCI(value, U8)); + } + static class abc + { + public const int Length = 3; + public const long HashCS = 6513249; + public const long HashCI = 4407873; + public static ReadOnlySpan U8 => "abc"u8; + public const string Text = "abc"; + public static bool IsCS(long hash, ReadOnlySpan value) => hash == HashCS & value.Length == Length; + public static bool IsCI(long hash, ReadOnlySpan value) => (hash == HashCI & value.Length == Length) && (global::RESPite.AsciiHash.HashCS(value) == HashCS || global::RESPite.AsciiHash.EqualsCI(value, U8)); + } + static class port + { + public const int Length = 4; + public const long HashCS = 1953656688; + public const long HashCI = 1414680400; + public static ReadOnlySpan U8 => "port"u8; + public const string Text = "port"; + public static bool IsCS(long hash, ReadOnlySpan value) => hash == HashCS & value.Length == Length; + public static bool IsCI(long hash, ReadOnlySpan value) => (hash == HashCI & value.Length == Length) && (global::RESPite.AsciiHash.HashCS(value) == HashCS || global::RESPite.AsciiHash.EqualsCI(value, U8)); + } + static class test + { + public const int Length = 4; + public const long HashCS = 1953719668; + public const long HashCI = 1414743380; + public static ReadOnlySpan U8 => "test"u8; + public const string Text = "test"; + public static bool IsCS(long hash, ReadOnlySpan value) => hash == HashCS & value.Length == Length; + public static bool IsCI(long hash, ReadOnlySpan value) => (hash == HashCI & value.Length == Length) && (global::RESPite.AsciiHash.HashCS(value) == HashCS || global::RESPite.AsciiHash.EqualsCI(value, U8)); + } + static class tracking_active + { + public const int Length = 15; + public const long HashCS = 7453010343294497396; + public const long HashCI = 5138124812476043860; + public static ReadOnlySpan U8 => "tracking-active"u8; + public const string Text = "tracking-active"; + public static bool IsCS(long hash, ReadOnlySpan value) => hash == HashCS && value.SequenceEqual(U8); + public static bool IsCI(long hash, ReadOnlySpan value) => (hash == HashCI & value.Length == Length) && global::RESPite.AsciiHash.EqualsCI(value, U8); + } + static class sample_ratio + { + public const int Length = 12; + public const long HashCS = 8227343610692854131; + public const long HashCI = 5912458079874400595; + public static ReadOnlySpan U8 => "sample-ratio"u8; + public const string Text = "sample-ratio"; + public static bool IsCS(long hash, ReadOnlySpan value) => hash == HashCS && value.SequenceEqual(U8); + public static bool IsCI(long hash, ReadOnlySpan value) => (hash == HashCI & value.Length == Length) && global::RESPite.AsciiHash.EqualsCI(value, U8); + } + static class selected_slots + { + public const int Length = 14; + public const long HashCS = 7234316346692756851; + public const long HashCI = 4919430815874303315; + public static ReadOnlySpan U8 => "selected-slots"u8; + public const string Text = "selected-slots"; + public static bool IsCS(long hash, ReadOnlySpan value) => hash == HashCS && value.SequenceEqual(U8); + public static bool IsCI(long hash, ReadOnlySpan value) => (hash == HashCI & value.Length == Length) && global::RESPite.AsciiHash.EqualsCI(value, U8); + } + static class all_commands_all_slots_us + { + public const int Length = 25; + public const long HashCS = 7885080994350132321; + public const long HashCI = 5570195463531678785; + public static ReadOnlySpan U8 => "all-commands-all-slots-us"u8; + public const string Text = "all-commands-all-slots-us"; + public static bool IsCS(long hash, ReadOnlySpan value) => hash == HashCS && value.SequenceEqual(U8); + public static bool IsCI(long hash, ReadOnlySpan value) => (hash == HashCI & value.Length == Length) && global::RESPite.AsciiHash.EqualsCI(value, U8); + } + static class all_commands_selected_slots_us + { + public const int Length = 30; + public const long HashCS = 7885080994350132321; + public const long HashCI = 5570195463531678785; + public static ReadOnlySpan U8 => "all-commands-selected-slots-us"u8; + public const string Text = "all-commands-selected-slots-us"; + public static bool IsCS(long hash, ReadOnlySpan value) => hash == HashCS && value.SequenceEqual(U8); + public static bool IsCI(long hash, ReadOnlySpan value) => (hash == HashCI & value.Length == Length) && global::RESPite.AsciiHash.EqualsCI(value, U8); + } + static class sampled_command_selected_slots_us + { + public const int Length = 33; + public const long HashCS = 3270850745794912627; + public const long HashCI = 955965214976459091; + public static ReadOnlySpan U8 => "sampled-command-selected-slots-us"u8; + public const string Text = "sampled-command-selected-slots-us"; + public static bool IsCS(long hash, ReadOnlySpan value) => hash == HashCS && value.SequenceEqual(U8); + public static bool IsCI(long hash, ReadOnlySpan value) => (hash == HashCI & value.Length == Length) && global::RESPite.AsciiHash.EqualsCI(value, U8); + } + static class sampled_commands_selected_slots_us + { + public const int Length = 34; + public const long HashCS = 3270850745794912627; + public const long HashCI = 955965214976459091; + public static ReadOnlySpan U8 => "sampled-commands-selected-slots-us"u8; + public const string Text = "sampled-commands-selected-slots-us"; + public static bool IsCS(long hash, ReadOnlySpan value) => hash == HashCS && value.SequenceEqual(U8); + public static bool IsCI(long hash, ReadOnlySpan value) => (hash == HashCI & value.Length == Length) && global::RESPite.AsciiHash.EqualsCI(value, U8); + } + static class net_bytes_all_commands_all_slots + { + public const int Length = 32; + public const long HashCS = 7310601557705516398; + public const long HashCI = 4995716026887062862; + public static ReadOnlySpan U8 => "net-bytes-all-commands-all-slots"u8; + public const string Text = "net-bytes-all-commands-all-slots"; + public static bool IsCS(long hash, ReadOnlySpan value) => hash == HashCS && value.SequenceEqual(U8); + public static bool IsCI(long hash, ReadOnlySpan value) => (hash == HashCI & value.Length == Length) && global::RESPite.AsciiHash.EqualsCI(value, U8); + } + static class net_bytes_all_commands_selected_slots + { + public const int Length = 37; + public const long HashCS = 7310601557705516398; + public const long HashCI = 4995716026887062862; + public static ReadOnlySpan U8 => "net-bytes-all-commands-selected-slots"u8; + public const string Text = "net-bytes-all-commands-selected-slots"; + public static bool IsCS(long hash, ReadOnlySpan value) => hash == HashCS && value.SequenceEqual(U8); + public static bool IsCI(long hash, ReadOnlySpan value) => (hash == HashCI & value.Length == Length) && global::RESPite.AsciiHash.EqualsCI(value, U8); + } + static class net_bytes_sampled_commands_selected_slots + { + public const int Length = 41; + public const long HashCS = 7310601557705516398; + public const long HashCI = 4995716026887062862; + public static ReadOnlySpan U8 => "net-bytes-sampled-commands-selected-slots"u8; + public const string Text = "net-bytes-sampled-commands-selected-slots"; + public static bool IsCS(long hash, ReadOnlySpan value) => hash == HashCS && value.SequenceEqual(U8); + public static bool IsCI(long hash, ReadOnlySpan value) => (hash == HashCI & value.Length == Length) && global::RESPite.AsciiHash.EqualsCI(value, U8); + } + static class collection_start_time_unix_ms + { + public const int Length = 29; + public const long HashCS = 7598807758542761827; + public const long HashCI = 5283922227724308291; + public static ReadOnlySpan U8 => "collection-start-time-unix-ms"u8; + public const string Text = "collection-start-time-unix-ms"; + public static bool IsCS(long hash, ReadOnlySpan value) => hash == HashCS && value.SequenceEqual(U8); + public static bool IsCI(long hash, ReadOnlySpan value) => (hash == HashCI & value.Length == Length) && global::RESPite.AsciiHash.EqualsCI(value, U8); + } + static class collection_duration_ms + { + public const int Length = 22; + public const long HashCS = 7598807758542761827; + public const long HashCI = 5283922227724308291; + public static ReadOnlySpan U8 => "collection-duration-ms"u8; + public const string Text = "collection-duration-ms"; + public static bool IsCS(long hash, ReadOnlySpan value) => hash == HashCS && value.SequenceEqual(U8); + public static bool IsCI(long hash, ReadOnlySpan value) => (hash == HashCI & value.Length == Length) && global::RESPite.AsciiHash.EqualsCI(value, U8); + } + static class collection_duration_us + { + public const int Length = 22; + public const long HashCS = 7598807758542761827; + public const long HashCI = 5283922227724308291; + public static ReadOnlySpan U8 => "collection-duration-us"u8; + public const string Text = "collection-duration-us"; + public static bool IsCS(long hash, ReadOnlySpan value) => hash == HashCS && value.SequenceEqual(U8); + public static bool IsCI(long hash, ReadOnlySpan value) => (hash == HashCI & value.Length == Length) && global::RESPite.AsciiHash.EqualsCI(value, U8); + } + static class total_cpu_time_user_ms + { + public const int Length = 22; + public const long HashCS = 8098366498457022324; + public const long HashCI = 5783480967638568788; + public static ReadOnlySpan U8 => "total-cpu-time-user-ms"u8; + public const string Text = "total-cpu-time-user-ms"; + public static bool IsCS(long hash, ReadOnlySpan value) => hash == HashCS && value.SequenceEqual(U8); + public static bool IsCI(long hash, ReadOnlySpan value) => (hash == HashCI & value.Length == Length) && global::RESPite.AsciiHash.EqualsCI(value, U8); + } + static class total_cpu_time_user_us + { + public const int Length = 22; + public const long HashCS = 8098366498457022324; + public const long HashCI = 5783480967638568788; + public static ReadOnlySpan U8 => "total-cpu-time-user-us"u8; + public const string Text = "total-cpu-time-user-us"; + public static bool IsCS(long hash, ReadOnlySpan value) => hash == HashCS && value.SequenceEqual(U8); + public static bool IsCI(long hash, ReadOnlySpan value) => (hash == HashCI & value.Length == Length) && global::RESPite.AsciiHash.EqualsCI(value, U8); + } + static class total_cpu_time_sys_ms + { + public const int Length = 21; + public const long HashCS = 8098366498457022324; + public const long HashCI = 5783480967638568788; + public static ReadOnlySpan U8 => "total-cpu-time-sys-ms"u8; + public const string Text = "total-cpu-time-sys-ms"; + public static bool IsCS(long hash, ReadOnlySpan value) => hash == HashCS && value.SequenceEqual(U8); + public static bool IsCI(long hash, ReadOnlySpan value) => (hash == HashCI & value.Length == Length) && global::RESPite.AsciiHash.EqualsCI(value, U8); + } + static class total_cpu_time_sys_us + { + public const int Length = 21; + public const long HashCS = 8098366498457022324; + public const long HashCI = 5783480967638568788; + public static ReadOnlySpan U8 => "total-cpu-time-sys-us"u8; + public const string Text = "total-cpu-time-sys-us"; + public static bool IsCS(long hash, ReadOnlySpan value) => hash == HashCS && value.SequenceEqual(U8); + public static bool IsCI(long hash, ReadOnlySpan value) => (hash == HashCI & value.Length == Length) && global::RESPite.AsciiHash.EqualsCI(value, U8); + } + static class total_net_bytes + { + public const int Length = 15; + public const long HashCS = 7308829188783632244; + public const long HashCI = 4993943657965178708; + public static ReadOnlySpan U8 => "total-net-bytes"u8; + public const string Text = "total-net-bytes"; + public static bool IsCS(long hash, ReadOnlySpan value) => hash == HashCS && value.SequenceEqual(U8); + public static bool IsCI(long hash, ReadOnlySpan value) => (hash == HashCI & value.Length == Length) && global::RESPite.AsciiHash.EqualsCI(value, U8); + } + static class by_cpu_time_us + { + public const int Length = 14; + public const long HashCS = 8371476407912331618; + public const long HashCI = 6056590877093878082; + public static ReadOnlySpan U8 => "by-cpu-time-us"u8; + public const string Text = "by-cpu-time-us"; + public static bool IsCS(long hash, ReadOnlySpan value) => hash == HashCS && value.SequenceEqual(U8); + public static bool IsCI(long hash, ReadOnlySpan value) => (hash == HashCI & value.Length == Length) && global::RESPite.AsciiHash.EqualsCI(value, U8); + } + static class by_net_bytes + { + public const int Length = 12; + public const long HashCS = 7074438568657910114; + public const long HashCI = 4759553037839456578; + public static ReadOnlySpan U8 => "by-net-bytes"u8; + public const string Text = "by-net-bytes"; + public static bool IsCS(long hash, ReadOnlySpan value) => hash == HashCS && value.SequenceEqual(U8); + public static bool IsCI(long hash, ReadOnlySpan value) => (hash == HashCI & value.Length == Length) && global::RESPite.AsciiHash.EqualsCI(value, U8); + } +} diff --git a/tests/StackExchange.Redis.Benchmarks/CustomConfig.cs b/tests/StackExchange.Redis.Benchmarks/CustomConfig.cs new file mode 100644 index 000000000..7013d4386 --- /dev/null +++ b/tests/StackExchange.Redis.Benchmarks/CustomConfig.cs @@ -0,0 +1,28 @@ +using System.Runtime.InteropServices; +using BenchmarkDotNet.Columns; +using BenchmarkDotNet.Configs; +using BenchmarkDotNet.Diagnosers; +using BenchmarkDotNet.Environments; +using BenchmarkDotNet.Jobs; +using BenchmarkDotNet.Validators; + +namespace StackExchange.Redis.Benchmarks +{ + internal class CustomConfig : ManualConfig + { + protected virtual Job Configure(Job j) => j; + + public CustomConfig() + { + AddDiagnoser(MemoryDiagnoser.Default); + AddColumn(StatisticColumn.OperationsPerSecond); + AddValidator(JitOptimizationsValidator.FailOnError); + + if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) + { + AddJob(Configure(Job.Default.WithRuntime(ClrRuntime.Net481))); + } + AddJob(Configure(Job.Default.WithRuntime(CoreRuntime.Core10_0))); + } + } +} diff --git a/tests/StackExchange.Redis.Benchmarks/EnumParseBenchmarks.cs b/tests/StackExchange.Redis.Benchmarks/EnumParseBenchmarks.cs new file mode 100644 index 000000000..de6ae174e --- /dev/null +++ b/tests/StackExchange.Redis.Benchmarks/EnumParseBenchmarks.cs @@ -0,0 +1,690 @@ +using System; +using System.Text; +using BenchmarkDotNet.Attributes; +using BenchmarkDotNet.Configs; +using RESPite; + +namespace StackExchange.Redis.Benchmarks; + +[ShortRunJob, MemoryDiagnoser, GroupBenchmarksBy(BenchmarkLogicalGroupRule.ByCategory)] +public partial class EnumParseBenchmarks +{ + private const int OperationsPerInvoke = 1000; + + public string[] Values() => + [ + nameof(RedisCommand.GET), + nameof(RedisCommand.EXPIREAT), + nameof(RedisCommand.ZREMRANGEBYSCORE), + "~~~~", + "get", + "expireat", + "zremrangebyscore", + "GeoRadiusByMember", + ]; + + private byte[] _bytes = []; + private string _value = ""; + + [ParamsSource(nameof(Values))] + public string Value + { + get => _value; + set + { + value ??= ""; + _bytes = Encoding.UTF8.GetBytes(value); + _value = value; + } + } + + [BenchmarkCategory("Case sensitive")] + [Benchmark(OperationsPerInvoke = OperationsPerInvoke, Baseline = true)] + public RedisCommand EnumParse_CS() + { + var value = Value; + RedisCommand r = default; + for (int i = 0; i < OperationsPerInvoke; i++) + { + Enum.TryParse(value, false, out r); + } + + return r; + } + + [BenchmarkCategory("Case insensitive")] + [Benchmark(OperationsPerInvoke = OperationsPerInvoke, Baseline = true)] + public RedisCommand EnumParse_CI() + { + var value = Value; + RedisCommand r = default; + for (int i = 0; i < OperationsPerInvoke; i++) + { + Enum.TryParse(value, true, out r); + } + + return r; + } + + [BenchmarkCategory("Case sensitive")] + [Benchmark(OperationsPerInvoke = OperationsPerInvoke)] + public RedisCommand Ascii_C_CS() + { + ReadOnlySpan value = Value; + RedisCommand r = default; + for (int i = 0; i < OperationsPerInvoke; i++) + { + TryParse_CS(value, out r); + } + + return r; + } + + [BenchmarkCategory("Case insensitive")] + [Benchmark(OperationsPerInvoke = OperationsPerInvoke)] + public RedisCommand Ascii_C_CI() + { + ReadOnlySpan value = Value; + RedisCommand r = default; + for (int i = 0; i < OperationsPerInvoke; i++) + { + TryParse_CI(value, out r); + } + + return r; + } + + [BenchmarkCategory("Case sensitive")] + [Benchmark(OperationsPerInvoke = OperationsPerInvoke)] + public RedisCommand Ascii_B_CS() + { + ReadOnlySpan value = _bytes; + RedisCommand r = default; + for (int i = 0; i < OperationsPerInvoke; i++) + { + TryParse_CS(value, out r); + } + + return r; + } + + [BenchmarkCategory("Case insensitive")] + [Benchmark(OperationsPerInvoke = OperationsPerInvoke)] + public RedisCommand Ascii_B_CI() + { + ReadOnlySpan value = _bytes; + RedisCommand r = default; + for (int i = 0; i < OperationsPerInvoke; i++) + { + TryParse_CI(value, out r); + } + + return r; + } + + [BenchmarkCategory("Case sensitive")] + [Benchmark(OperationsPerInvoke = OperationsPerInvoke)] + public RedisCommand Switch_CS() + { + var value = Value; + RedisCommand r = default; + for (int i = 0; i < OperationsPerInvoke; i++) + { + TryParseSwitch(value, out r); + } + + return r; + } + + private static bool TryParseSwitch(string s, out RedisCommand r) + { + r = s switch + { + "NONE" => RedisCommand.NONE, + "APPEND" => RedisCommand.APPEND, + "ASKING" => RedisCommand.ASKING, + "AUTH" => RedisCommand.AUTH, + "BGREWRITEAOF" => RedisCommand.BGREWRITEAOF, + "BGSAVE" => RedisCommand.BGSAVE, + "BITCOUNT" => RedisCommand.BITCOUNT, + "BITOP" => RedisCommand.BITOP, + "BITPOS" => RedisCommand.BITPOS, + "BLPOP" => RedisCommand.BLPOP, + "BRPOP" => RedisCommand.BRPOP, + "BRPOPLPUSH" => RedisCommand.BRPOPLPUSH, + "CLIENT" => RedisCommand.CLIENT, + "CLUSTER" => RedisCommand.CLUSTER, + "CONFIG" => RedisCommand.CONFIG, + "COPY" => RedisCommand.COPY, + "COMMAND" => RedisCommand.COMMAND, + "DBSIZE" => RedisCommand.DBSIZE, + "DEBUG" => RedisCommand.DEBUG, + "DECR" => RedisCommand.DECR, + "DECRBY" => RedisCommand.DECRBY, + "DEL" => RedisCommand.DEL, + "DELEX" => RedisCommand.DELEX, + "DIGEST" => RedisCommand.DIGEST, + "DISCARD" => RedisCommand.DISCARD, + "DUMP" => RedisCommand.DUMP, + "ECHO" => RedisCommand.ECHO, + "EVAL" => RedisCommand.EVAL, + "EVALSHA" => RedisCommand.EVALSHA, + "EVAL_RO" => RedisCommand.EVAL_RO, + "EVALSHA_RO" => RedisCommand.EVALSHA_RO, + "EXEC" => RedisCommand.EXEC, + "EXISTS" => RedisCommand.EXISTS, + "EXPIRE" => RedisCommand.EXPIRE, + "EXPIREAT" => RedisCommand.EXPIREAT, + "EXPIRETIME" => RedisCommand.EXPIRETIME, + "FLUSHALL" => RedisCommand.FLUSHALL, + "FLUSHDB" => RedisCommand.FLUSHDB, + "GEOADD" => RedisCommand.GEOADD, + "GEODIST" => RedisCommand.GEODIST, + "GEOHASH" => RedisCommand.GEOHASH, + "GEOPOS" => RedisCommand.GEOPOS, + "GEORADIUS" => RedisCommand.GEORADIUS, + "GEORADIUSBYMEMBER" => RedisCommand.GEORADIUSBYMEMBER, + "GEOSEARCH" => RedisCommand.GEOSEARCH, + "GEOSEARCHSTORE" => RedisCommand.GEOSEARCHSTORE, + "GET" => RedisCommand.GET, + "GETBIT" => RedisCommand.GETBIT, + "GETDEL" => RedisCommand.GETDEL, + "GETEX" => RedisCommand.GETEX, + "GETRANGE" => RedisCommand.GETRANGE, + "GETSET" => RedisCommand.GETSET, + "HDEL" => RedisCommand.HDEL, + "HELLO" => RedisCommand.HELLO, + "HEXISTS" => RedisCommand.HEXISTS, + "HEXPIRE" => RedisCommand.HEXPIRE, + "HEXPIREAT" => RedisCommand.HEXPIREAT, + "HEXPIRETIME" => RedisCommand.HEXPIRETIME, + "HGET" => RedisCommand.HGET, + "HGETEX" => RedisCommand.HGETEX, + "HGETDEL" => RedisCommand.HGETDEL, + "HGETALL" => RedisCommand.HGETALL, + "HINCRBY" => RedisCommand.HINCRBY, + "HINCRBYFLOAT" => RedisCommand.HINCRBYFLOAT, + "HKEYS" => RedisCommand.HKEYS, + "HLEN" => RedisCommand.HLEN, + "HMGET" => RedisCommand.HMGET, + "HMSET" => RedisCommand.HMSET, + "HOTKEYS" => RedisCommand.HOTKEYS, + "HPERSIST" => RedisCommand.HPERSIST, + "HPEXPIRE" => RedisCommand.HPEXPIRE, + "HPEXPIREAT" => RedisCommand.HPEXPIREAT, + "HPEXPIRETIME" => RedisCommand.HPEXPIRETIME, + "HPTTL" => RedisCommand.HPTTL, + "HRANDFIELD" => RedisCommand.HRANDFIELD, + "HSCAN" => RedisCommand.HSCAN, + "HSET" => RedisCommand.HSET, + "HSETEX" => RedisCommand.HSETEX, + "HSETNX" => RedisCommand.HSETNX, + "HSTRLEN" => RedisCommand.HSTRLEN, + "HVALS" => RedisCommand.HVALS, + "INCR" => RedisCommand.INCR, + "INCRBY" => RedisCommand.INCRBY, + "INCRBYFLOAT" => RedisCommand.INCRBYFLOAT, + "INFO" => RedisCommand.INFO, + "KEYS" => RedisCommand.KEYS, + "LASTSAVE" => RedisCommand.LASTSAVE, + "LATENCY" => RedisCommand.LATENCY, + "LCS" => RedisCommand.LCS, + "LINDEX" => RedisCommand.LINDEX, + "LINSERT" => RedisCommand.LINSERT, + "LLEN" => RedisCommand.LLEN, + "LMOVE" => RedisCommand.LMOVE, + "LMPOP" => RedisCommand.LMPOP, + "LPOP" => RedisCommand.LPOP, + "LPOS" => RedisCommand.LPOS, + "LPUSH" => RedisCommand.LPUSH, + "LPUSHX" => RedisCommand.LPUSHX, + "LRANGE" => RedisCommand.LRANGE, + "LREM" => RedisCommand.LREM, + "LSET" => RedisCommand.LSET, + "LTRIM" => RedisCommand.LTRIM, + "MEMORY" => RedisCommand.MEMORY, + "MGET" => RedisCommand.MGET, + "MIGRATE" => RedisCommand.MIGRATE, + "MONITOR" => RedisCommand.MONITOR, + "MOVE" => RedisCommand.MOVE, + "MSET" => RedisCommand.MSET, + "MSETEX" => RedisCommand.MSETEX, + "MSETNX" => RedisCommand.MSETNX, + "MULTI" => RedisCommand.MULTI, + "OBJECT" => RedisCommand.OBJECT, + "PERSIST" => RedisCommand.PERSIST, + "PEXPIRE" => RedisCommand.PEXPIRE, + "PEXPIREAT" => RedisCommand.PEXPIREAT, + "PEXPIRETIME" => RedisCommand.PEXPIRETIME, + "PFADD" => RedisCommand.PFADD, + "PFCOUNT" => RedisCommand.PFCOUNT, + "PFMERGE" => RedisCommand.PFMERGE, + "PING" => RedisCommand.PING, + "PSETEX" => RedisCommand.PSETEX, + "PSUBSCRIBE" => RedisCommand.PSUBSCRIBE, + "PTTL" => RedisCommand.PTTL, + "PUBLISH" => RedisCommand.PUBLISH, + "PUBSUB" => RedisCommand.PUBSUB, + "PUNSUBSCRIBE" => RedisCommand.PUNSUBSCRIBE, + "QUIT" => RedisCommand.QUIT, + "RANDOMKEY" => RedisCommand.RANDOMKEY, + "READONLY" => RedisCommand.READONLY, + "READWRITE" => RedisCommand.READWRITE, + "RENAME" => RedisCommand.RENAME, + "RENAMENX" => RedisCommand.RENAMENX, + "REPLICAOF" => RedisCommand.REPLICAOF, + "RESTORE" => RedisCommand.RESTORE, + "ROLE" => RedisCommand.ROLE, + "RPOP" => RedisCommand.RPOP, + "RPOPLPUSH" => RedisCommand.RPOPLPUSH, + "RPUSH" => RedisCommand.RPUSH, + "RPUSHX" => RedisCommand.RPUSHX, + "SADD" => RedisCommand.SADD, + "SAVE" => RedisCommand.SAVE, + "SCAN" => RedisCommand.SCAN, + "SCARD" => RedisCommand.SCARD, + "SCRIPT" => RedisCommand.SCRIPT, + "SDIFF" => RedisCommand.SDIFF, + "SDIFFSTORE" => RedisCommand.SDIFFSTORE, + "SELECT" => RedisCommand.SELECT, + "SENTINEL" => RedisCommand.SENTINEL, + "SET" => RedisCommand.SET, + "SETBIT" => RedisCommand.SETBIT, + "SETEX" => RedisCommand.SETEX, + "SETNX" => RedisCommand.SETNX, + "SETRANGE" => RedisCommand.SETRANGE, + "SHUTDOWN" => RedisCommand.SHUTDOWN, + "SINTER" => RedisCommand.SINTER, + "SINTERCARD" => RedisCommand.SINTERCARD, + "SINTERSTORE" => RedisCommand.SINTERSTORE, + "SISMEMBER" => RedisCommand.SISMEMBER, + "SLAVEOF" => RedisCommand.SLAVEOF, + "SLOWLOG" => RedisCommand.SLOWLOG, + "SMEMBERS" => RedisCommand.SMEMBERS, + "SMISMEMBER" => RedisCommand.SMISMEMBER, + "SMOVE" => RedisCommand.SMOVE, + "SORT" => RedisCommand.SORT, + "SORT_RO" => RedisCommand.SORT_RO, + "SPOP" => RedisCommand.SPOP, + "SPUBLISH" => RedisCommand.SPUBLISH, + "SRANDMEMBER" => RedisCommand.SRANDMEMBER, + "SREM" => RedisCommand.SREM, + "STRLEN" => RedisCommand.STRLEN, + "SUBSCRIBE" => RedisCommand.SUBSCRIBE, + "SUNION" => RedisCommand.SUNION, + "SUNIONSTORE" => RedisCommand.SUNIONSTORE, + "SSCAN" => RedisCommand.SSCAN, + "SSUBSCRIBE" => RedisCommand.SSUBSCRIBE, + "SUNSUBSCRIBE" => RedisCommand.SUNSUBSCRIBE, + "SWAPDB" => RedisCommand.SWAPDB, + "SYNC" => RedisCommand.SYNC, + "TIME" => RedisCommand.TIME, + "TOUCH" => RedisCommand.TOUCH, + "TTL" => RedisCommand.TTL, + "TYPE" => RedisCommand.TYPE, + "UNLINK" => RedisCommand.UNLINK, + "UNSUBSCRIBE" => RedisCommand.UNSUBSCRIBE, + "UNWATCH" => RedisCommand.UNWATCH, + "VADD" => RedisCommand.VADD, + "VCARD" => RedisCommand.VCARD, + "VDIM" => RedisCommand.VDIM, + "VEMB" => RedisCommand.VEMB, + "VGETATTR" => RedisCommand.VGETATTR, + "VINFO" => RedisCommand.VINFO, + "VISMEMBER" => RedisCommand.VISMEMBER, + "VLINKS" => RedisCommand.VLINKS, + "VRANDMEMBER" => RedisCommand.VRANDMEMBER, + "VREM" => RedisCommand.VREM, + "VSETATTR" => RedisCommand.VSETATTR, + "VSIM" => RedisCommand.VSIM, + "WATCH" => RedisCommand.WATCH, + "XACK" => RedisCommand.XACK, + "XACKDEL" => RedisCommand.XACKDEL, + "XADD" => RedisCommand.XADD, + "XAUTOCLAIM" => RedisCommand.XAUTOCLAIM, + "XCLAIM" => RedisCommand.XCLAIM, + "XCFGSET" => RedisCommand.XCFGSET, + "XDEL" => RedisCommand.XDEL, + "XDELEX" => RedisCommand.XDELEX, + "XGROUP" => RedisCommand.XGROUP, + "XINFO" => RedisCommand.XINFO, + "XLEN" => RedisCommand.XLEN, + "XPENDING" => RedisCommand.XPENDING, + "XRANGE" => RedisCommand.XRANGE, + "XREAD" => RedisCommand.XREAD, + "XREADGROUP" => RedisCommand.XREADGROUP, + "XREVRANGE" => RedisCommand.XREVRANGE, + "XTRIM" => RedisCommand.XTRIM, + "ZADD" => RedisCommand.ZADD, + "ZCARD" => RedisCommand.ZCARD, + "ZCOUNT" => RedisCommand.ZCOUNT, + "ZDIFF" => RedisCommand.ZDIFF, + "ZDIFFSTORE" => RedisCommand.ZDIFFSTORE, + "ZINCRBY" => RedisCommand.ZINCRBY, + "ZINTER" => RedisCommand.ZINTER, + "ZINTERCARD" => RedisCommand.ZINTERCARD, + "ZINTERSTORE" => RedisCommand.ZINTERSTORE, + "ZLEXCOUNT" => RedisCommand.ZLEXCOUNT, + "ZMPOP" => RedisCommand.ZMPOP, + "ZMSCORE" => RedisCommand.ZMSCORE, + "ZPOPMAX" => RedisCommand.ZPOPMAX, + "ZPOPMIN" => RedisCommand.ZPOPMIN, + "ZRANDMEMBER" => RedisCommand.ZRANDMEMBER, + "ZRANGE" => RedisCommand.ZRANGE, + "ZRANGEBYLEX" => RedisCommand.ZRANGEBYLEX, + "ZRANGEBYSCORE" => RedisCommand.ZRANGEBYSCORE, + "ZRANGESTORE" => RedisCommand.ZRANGESTORE, + "ZRANK" => RedisCommand.ZRANK, + "ZREM" => RedisCommand.ZREM, + "ZREMRANGEBYLEX" => RedisCommand.ZREMRANGEBYLEX, + "ZREMRANGEBYRANK" => RedisCommand.ZREMRANGEBYRANK, + "ZREMRANGEBYSCORE" => RedisCommand.ZREMRANGEBYSCORE, + "ZREVRANGE" => RedisCommand.ZREVRANGE, + "ZREVRANGEBYLEX" => RedisCommand.ZREVRANGEBYLEX, + "ZREVRANGEBYSCORE" => RedisCommand.ZREVRANGEBYSCORE, + "ZREVRANK" => RedisCommand.ZREVRANK, + "ZSCAN" => RedisCommand.ZSCAN, + "ZSCORE" => RedisCommand.ZSCORE, + "ZUNION" => RedisCommand.ZUNION, + "ZUNIONSTORE" => RedisCommand.ZUNIONSTORE, + "UNKNOWN" => RedisCommand.UNKNOWN, + _ => (RedisCommand)(-1), + }; + if (r == (RedisCommand)(-1)) + { + r = default; + return false; + } + + return true; + } + + [AsciiHash] + internal static partial bool TryParse_CS(ReadOnlySpan value, out RedisCommand command); + + [AsciiHash] + internal static partial bool TryParse_CS(ReadOnlySpan value, out RedisCommand command); + + [AsciiHash(CaseSensitive = false)] + internal static partial bool TryParse_CI(ReadOnlySpan value, out RedisCommand command); + + [AsciiHash(CaseSensitive = false)] + internal static partial bool TryParse_CI(ReadOnlySpan value, out RedisCommand command); + + public enum RedisCommand + { + NONE, // must be first for "zero reasons" + + APPEND, + ASKING, + AUTH, + + BGREWRITEAOF, + BGSAVE, + BITCOUNT, + BITOP, + BITPOS, + BLPOP, + BRPOP, + BRPOPLPUSH, + + CLIENT, + CLUSTER, + CONFIG, + COPY, + COMMAND, + + DBSIZE, + DEBUG, + DECR, + DECRBY, + DEL, + DELEX, + DIGEST, + DISCARD, + DUMP, + + ECHO, + EVAL, + EVALSHA, + EVAL_RO, + EVALSHA_RO, + EXEC, + EXISTS, + EXPIRE, + EXPIREAT, + EXPIRETIME, + + FLUSHALL, + FLUSHDB, + + GEOADD, + GEODIST, + GEOHASH, + GEOPOS, + GEORADIUS, + GEORADIUSBYMEMBER, + GEOSEARCH, + GEOSEARCHSTORE, + + GET, + GETBIT, + GETDEL, + GETEX, + GETRANGE, + GETSET, + + HDEL, + HELLO, + HEXISTS, + HEXPIRE, + HEXPIREAT, + HEXPIRETIME, + HGET, + HGETEX, + HGETDEL, + HGETALL, + HINCRBY, + HINCRBYFLOAT, + HKEYS, + HLEN, + HMGET, + HMSET, + HOTKEYS, + HPERSIST, + HPEXPIRE, + HPEXPIREAT, + HPEXPIRETIME, + HPTTL, + HRANDFIELD, + HSCAN, + HSET, + HSETEX, + HSETNX, + HSTRLEN, + HVALS, + + INCR, + INCRBY, + INCRBYFLOAT, + INFO, + + KEYS, + + LASTSAVE, + LATENCY, + LCS, + LINDEX, + LINSERT, + LLEN, + LMOVE, + LMPOP, + LPOP, + LPOS, + LPUSH, + LPUSHX, + LRANGE, + LREM, + LSET, + LTRIM, + + MEMORY, + MGET, + MIGRATE, + MONITOR, + MOVE, + MSET, + MSETEX, + MSETNX, + MULTI, + + OBJECT, + + PERSIST, + PEXPIRE, + PEXPIREAT, + PEXPIRETIME, + PFADD, + PFCOUNT, + PFMERGE, + PING, + PSETEX, + PSUBSCRIBE, + PTTL, + PUBLISH, + PUBSUB, + PUNSUBSCRIBE, + + QUIT, + + RANDOMKEY, + READONLY, + READWRITE, + RENAME, + RENAMENX, + REPLICAOF, + RESTORE, + ROLE, + RPOP, + RPOPLPUSH, + RPUSH, + RPUSHX, + + SADD, + SAVE, + SCAN, + SCARD, + SCRIPT, + SDIFF, + SDIFFSTORE, + SELECT, + SENTINEL, + SET, + SETBIT, + SETEX, + SETNX, + SETRANGE, + SHUTDOWN, + SINTER, + SINTERCARD, + SINTERSTORE, + SISMEMBER, + SLAVEOF, + SLOWLOG, + SMEMBERS, + SMISMEMBER, + SMOVE, + SORT, + SORT_RO, + SPOP, + SPUBLISH, + SRANDMEMBER, + SREM, + STRLEN, + SUBSCRIBE, + SUNION, + SUNIONSTORE, + SSCAN, + SSUBSCRIBE, + SUNSUBSCRIBE, + SWAPDB, + SYNC, + + TIME, + TOUCH, + TTL, + TYPE, + + UNLINK, + UNSUBSCRIBE, + UNWATCH, + + VADD, + VCARD, + VDIM, + VEMB, + VGETATTR, + VINFO, + VISMEMBER, + VLINKS, + VRANDMEMBER, + VREM, + VSETATTR, + VSIM, + + WATCH, + + XACK, + XACKDEL, + XADD, + XAUTOCLAIM, + XCLAIM, + XCFGSET, + XDEL, + XDELEX, + XGROUP, + XINFO, + XLEN, + XPENDING, + XRANGE, + XREAD, + XREADGROUP, + XREVRANGE, + XTRIM, + + ZADD, + ZCARD, + ZCOUNT, + ZDIFF, + ZDIFFSTORE, + ZINCRBY, + ZINTER, + ZINTERCARD, + ZINTERSTORE, + ZLEXCOUNT, + ZMPOP, + ZMSCORE, + ZPOPMAX, + ZPOPMIN, + ZRANDMEMBER, + ZRANGE, + ZRANGEBYLEX, + ZRANGEBYSCORE, + ZRANGESTORE, + ZRANK, + ZREM, + ZREMRANGEBYLEX, + ZREMRANGEBYRANK, + ZREMRANGEBYSCORE, + ZREVRANGE, + ZREVRANGEBYLEX, + ZREVRANGEBYSCORE, + ZREVRANK, + ZSCAN, + ZSCORE, + ZUNION, + ZUNIONSTORE, + + UNKNOWN, + } +} diff --git a/tests/StackExchange.Redis.Benchmarks/FormatBenchmarks.cs b/tests/StackExchange.Redis.Benchmarks/FormatBenchmarks.cs new file mode 100644 index 000000000..714e1724a --- /dev/null +++ b/tests/StackExchange.Redis.Benchmarks/FormatBenchmarks.cs @@ -0,0 +1,55 @@ +/* +using System; +using System.Net; +using BenchmarkDotNet.Attributes; + +namespace StackExchange.Redis.Benchmarks +{ + [Config(typeof(CustomConfig))] + public class FormatBenchmarks + { + [GlobalSetup] + public void Setup() { } + + [Benchmark] + [Arguments("64")] + [Arguments("-1")] + [Arguments("0")] + [Arguments("123442")] + public long ParseInt64(string s) => Format.ParseInt64(s); + + [Benchmark] + [Arguments("64")] + [Arguments("-1")] + [Arguments("0")] + [Arguments("123442")] + public long ParseInt32(string s) => Format.ParseInt32(s); + + [Benchmark] + [Arguments("64")] + [Arguments("-1")] + [Arguments("0")] + [Arguments("123442")] + [Arguments("-inf")] + [Arguments("nan")] + public double ParseDouble(string s) => Format.TryParseDouble(s, out var val) ? val : double.NaN; + + private byte[] buffer = new byte[128]; + + [Benchmark] + [Arguments(64D)] + [Arguments(-1D)] + [Arguments(0D)] + [Arguments(123442D)] + [Arguments(double.NegativeInfinity)] + [Arguments(double.NaN)] + public int FormatDouble(double value) => Format.FormatDouble(value, buffer.AsSpan()); + + [Benchmark] + [Arguments("host.com", -1)] + [Arguments("host.com", 0)] + [Arguments("host.com", 65345)] + public EndPoint ParseEndPoint(string host, int port) => Format.ParseEndPoint(host, port); + } +} +*/ diff --git a/tests/StackExchange.Redis.Benchmarks/Program.cs b/tests/StackExchange.Redis.Benchmarks/Program.cs new file mode 100644 index 000000000..3999a61b4 --- /dev/null +++ b/tests/StackExchange.Redis.Benchmarks/Program.cs @@ -0,0 +1,26 @@ +using System; +using System.Reflection; +using BenchmarkDotNet.Running; + +namespace StackExchange.Redis.Benchmarks +{ + internal static class Program + { + private static void Main(string[] args) + { +#if DEBUG + var obj = new AsciiHashBenchmarks(); + foreach (var size in obj.Sizes) + { + Console.WriteLine($"Size: {size}"); + obj.Size = size; + obj.Setup(); + obj.HashCS_C(); + obj.HashCS_B(); + } +#else + BenchmarkSwitcher.FromAssembly(typeof(Program).GetTypeInfo().Assembly).Run(args); +#endif + } + } +} diff --git a/tests/StackExchange.Redis.Benchmarks/SlowConfig.cs b/tests/StackExchange.Redis.Benchmarks/SlowConfig.cs new file mode 100644 index 000000000..fc1ab6f71 --- /dev/null +++ b/tests/StackExchange.Redis.Benchmarks/SlowConfig.cs @@ -0,0 +1,12 @@ +using BenchmarkDotNet.Jobs; + +namespace StackExchange.Redis.Benchmarks +{ + internal sealed class SlowConfig : CustomConfig + { + protected override Job Configure(Job j) + => j.WithLaunchCount(1) + .WithWarmupCount(1) + .WithIterationCount(5); + } +} diff --git a/tests/StackExchange.Redis.Benchmarks/StackExchange.Redis.Benchmarks.csproj b/tests/StackExchange.Redis.Benchmarks/StackExchange.Redis.Benchmarks.csproj new file mode 100644 index 000000000..47359ac85 --- /dev/null +++ b/tests/StackExchange.Redis.Benchmarks/StackExchange.Redis.Benchmarks.csproj @@ -0,0 +1,17 @@ + + + StackExchange.Redis MicroBenchmark Suite + net481;net8.0 + Release + Exe + true + enable + + + + + + + + + diff --git a/tests/StackExchange.Redis.Benchmarks/run.cmd b/tests/StackExchange.Redis.Benchmarks/run.cmd new file mode 100644 index 000000000..2b8844c56 --- /dev/null +++ b/tests/StackExchange.Redis.Benchmarks/run.cmd @@ -0,0 +1 @@ +dotnet run --framework net8.0 -c Release %* \ No newline at end of file diff --git a/tests/StackExchange.Redis.Benchmarks/run.sh b/tests/StackExchange.Redis.Benchmarks/run.sh new file mode 100755 index 000000000..1824c7161 --- /dev/null +++ b/tests/StackExchange.Redis.Benchmarks/run.sh @@ -0,0 +1,2 @@ +#!/bin/bash +dotnet run --framework net8.0 -c Release "$@" \ No newline at end of file diff --git a/tests/StackExchange.Redis.Tests/AbortOnConnectFailTests.cs b/tests/StackExchange.Redis.Tests/AbortOnConnectFailTests.cs new file mode 100644 index 000000000..25033fa1a --- /dev/null +++ b/tests/StackExchange.Redis.Tests/AbortOnConnectFailTests.cs @@ -0,0 +1,98 @@ +using System; +using System.Threading.Tasks; +using StackExchange.Redis.Tests.Helpers; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class AbortOnConnectFailTests(ITestOutputHelper output) : TestBase(output) +{ + [Fact] + public async Task NeverEverConnectedNoBacklogThrowsConnectionNotAvailableSync() + { + await using var conn = GetFailFastConn(); + var db = conn.GetDatabase(); + var key = Me(); + + // No connection is active/available to service this operation: GET 6.0.18AbortOnConnectFailTests-NeverEverConnectedNoBacklogThrowsConnectionNotAvailableSync; UnableToConnect on doesnot.exist.d4d1424806204b68b047954b1db3411d:6379/Interactive, Initializing/NotStarted, last: NONE, origin: BeginConnectAsync, outstanding: 0, last-read: 0s ago, last-write: 0s ago, keep-alive: 100s, state: Connecting, mgr: 4 of 10 available, last-heartbeat: never, global: 0s ago, v: 2.6.120.51136, mc: 1/1/0, mgr: 5 of 10 available, clientName: CRAVERTOP7(SE.Redis-v2.6.120.51136), IOCP: (Busy=0,Free=1000,Min=16,Max=1000), WORKER: (Busy=3,Free=32764,Min=16,Max=32767), POOL: (Threads=25,QueuedItems=0,CompletedItems=1066,Timers=46), v: 2.6.120.51136 + var ex = Assert.Throws(() => db.StringGet(key)); + Log("Exception: " + ex.Message); + Assert.Contains("No connection is active/available to service this operation", ex.Message); + } + + [Fact] + public async Task NeverEverConnectedNoBacklogThrowsConnectionNotAvailableAsync() + { + await using var conn = GetFailFastConn(); + var db = conn.GetDatabase(); + var key = Me(); + + // No connection is active/available to service this operation: GET 6.0.18AbortOnConnectFailTests-NeverEverConnectedNoBacklogThrowsConnectionNotAvailableSync; UnableToConnect on doesnot.exist.d4d1424806204b68b047954b1db3411d:6379/Interactive, Initializing/NotStarted, last: NONE, origin: BeginConnectAsync, outstanding: 0, last-read: 0s ago, last-write: 0s ago, keep-alive: 100s, state: Connecting, mgr: 4 of 10 available, last-heartbeat: never, global: 0s ago, v: 2.6.120.51136, mc: 1/1/0, mgr: 5 of 10 available, clientName: CRAVERTOP7(SE.Redis-v2.6.120.51136), IOCP: (Busy=0,Free=1000,Min=16,Max=1000), WORKER: (Busy=3,Free=32764,Min=16,Max=32767), POOL: (Threads=25,QueuedItems=0,CompletedItems=1066,Timers=46), v: 2.6.120.51136 + var ex = await Assert.ThrowsAsync(() => db.StringGetAsync(key)); + Log("Exception: " + ex.Message); + Assert.Contains("No connection is active/available to service this operation", ex.Message); + } + + [Fact] + public async Task DisconnectAndReconnectThrowsConnectionExceptionSync() + { + await using var conn = GetWorkingBacklogConn(); + + var db = conn.GetDatabase(); + var key = Me(); + await db.PingAsync(); // Doesn't throw - we're connected + + // Disconnect and don't allow re-connection + conn.AllowConnect = false; + var server = conn.GetServerSnapshot()[0]; + server.SimulateConnectionFailure(SimulatedFailureType.All); + + // Exception: The message timed out in the backlog attempting to send because no connection became available (400ms) - Last Connection Exception: SocketFailure (InputReaderCompleted, last-recv: 7) on 127.0.0.1:6379/Interactive, Idle/ReadAsync, last: PING, origin: SimulateConnectionFailure, outstanding: 0, last-read: 0s ago, last-write: 0s ago, keep-alive: 100s, state: ConnectedEstablished, mgr: 10 of 10 available, in: 0, in-pipe: 0, out-pipe: 0, last-heartbeat: never, last-mbeat: 0s ago, global: 0s ago, v: 2.6.120.51136, command=PING, timeout: 100, inst: 13, qu: 1, qs: 0, aw: False, bw: Inactive, last-in: 0, cur-in: 0, sync-ops: 2, async-ops: 0, serverEndpoint: 127.0.0.1:6379, conn-sec: n/a, aoc: 0, mc: 1/1/0, mgr: 10 of 10 available, clientName: CRAVERTOP7(SE.Redis-v2.6.120.51136), IOCP: (Busy=0,Free=1000,Min=16,Max=1000), WORKER: (Busy=2,Free=32765,Min=16,Max=32767), POOL: (Threads=33,QueuedItems=0,CompletedItems=6237,Timers=39), v: 2.6.120.51136 (Please take a look at this article for some common client-side issues that can cause timeouts: https://stackexchange.github.io/StackExchange.Redis/Timeouts) + var ex = Assert.ThrowsAny(() => db.Ping()); + Log("Exception: " + ex.Message); + Assert.True(ex is RedisConnectionException or RedisTimeoutException); + Assert.StartsWith("The message timed out in the backlog attempting to send because no connection became available (1000ms) - Last Connection Exception: ", ex.Message); + Assert.NotNull(ex.InnerException); + var iex = Assert.IsType(ex.InnerException); + Assert.Contains(iex.Message, ex.Message); + } + + [Fact] + public async Task DisconnectAndNoReconnectThrowsConnectionExceptionAsync() + { + await using var conn = GetWorkingBacklogConn(); + + var db = conn.GetDatabase(); + var key = Me(); + await db.PingAsync(); // Doesn't throw - we're connected + + // Disconnect and don't allow re-connection + conn.AllowConnect = false; + var server = conn.GetServerSnapshot()[0]; + server.SimulateConnectionFailure(SimulatedFailureType.All); + + // Exception: The message timed out in the backlog attempting to send because no connection became available (400ms) - Last Connection Exception: SocketFailure (InputReaderCompleted, last-recv: 7) on 127.0.0.1:6379/Interactive, Idle/ReadAsync, last: PING, origin: SimulateConnectionFailure, outstanding: 0, last-read: 0s ago, last-write: 0s ago, keep-alive: 100s, state: ConnectedEstablished, mgr: 8 of 10 available, in: 0, in-pipe: 0, out-pipe: 0, last-heartbeat: never, last-mbeat: 0s ago, global: 0s ago, v: 2.6.120.51136, command=PING, timeout: 100, inst: 0, qu: 0, qs: 0, aw: False, bw: CheckingForTimeout, last-in: 0, cur-in: 0, sync-ops: 1, async-ops: 1, serverEndpoint: 127.0.0.1:6379, conn-sec: n/a, aoc: 0, mc: 1/1/0, mgr: 8 of 10 available, clientName: CRAVERTOP7(SE.Redis-v2.6.120.51136), IOCP: (Busy=0,Free=1000,Min=16,Max=1000), WORKER: (Busy=6,Free=32761,Min=16,Max=32767), POOL: (Threads=33,QueuedItems=0,CompletedItems=5547,Timers=60), v: 2.6.120.51136 (Please take a look at this article for some common client-side issues that can cause timeouts: https://stackexchange.github.io/StackExchange.Redis/Timeouts) + var ex = await Assert.ThrowsAsync(() => db.PingAsync()); + Log("Exception: " + ex.Message); + Assert.StartsWith("The message timed out in the backlog attempting to send because no connection became available (1000ms) - Last Connection Exception: ", ex.Message); + Assert.NotNull(ex.InnerException); + var iex = Assert.IsType(ex.InnerException); + Assert.Contains(iex.Message, ex.Message); + } + + private ConnectionMultiplexer GetFailFastConn() => + ConnectionMultiplexer.Connect(GetOptions(BacklogPolicy.FailFast, duration: 400, connectTimeout: 500).Apply(o => o.EndPoints.Add($"doesnot.exist.{Guid.NewGuid():N}:6379")), Writer); + + private ConnectionMultiplexer GetWorkingBacklogConn() => + ConnectionMultiplexer.Connect(GetOptions(BacklogPolicy.Default).Apply(o => o.EndPoints.Add(GetConfiguration())), Writer); + + private static ConfigurationOptions GetOptions(BacklogPolicy policy, int duration = 1000, int connectTimeout = 2000) => new ConfigurationOptions() + { + AbortOnConnectFail = false, + BacklogPolicy = policy, + ConnectTimeout = connectTimeout, + SyncTimeout = duration, + KeepAlive = duration, + AllowAdmin = true, + }.WithoutSubscriptions(); +} diff --git a/tests/StackExchange.Redis.Tests/AdhocTests.cs b/tests/StackExchange.Redis.Tests/AdhocTests.cs index 924669ee5..42a5ebb23 100644 --- a/tests/StackExchange.Redis.Tests/AdhocTests.cs +++ b/tests/StackExchange.Redis.Tests/AdhocTests.cs @@ -1,34 +1,28 @@ -using Xunit; -using Xunit.Abstractions; +using System.Threading.Tasks; +using Xunit; -namespace StackExchange.Redis.Tests +namespace StackExchange.Redis.Tests; + +public class AdhocTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) { - [Collection(SharedConnectionFixture.Key)] - public class AdhocTests : TestBase + [Fact] + public async Task TestAdhocCommandsAPI() { - public AdhocTests(ITestOutputHelper output, SharedConnectionFixture fixture) : base (output, fixture) { } - - [Fact] - public void TestAdhocCommandsAPI() - { - using (var conn = Create()) - { - var db = conn.GetDatabase(); + await using var conn = Create(); + var db = conn.GetDatabase(); - // needs explicit RedisKey type for key-based - // sharding to work; will still work with strings, - // but no key-based sharding support - RedisKey key = Me(); + // needs explicit RedisKey type for key-based + // sharding to work; will still work with strings, + // but no key-based sharding support + RedisKey key = Me(); - // note: if command renames are configured in - // the API, they will still work automatically - db.Execute("del", key); - db.Execute("set", key, "12"); - db.Execute("incrby", key, 4); - int i = (int)db.Execute("get", key); + // note: if command renames are configured in + // the API, they will still work automatically + db.Execute("del", key); + db.Execute("set", key, "12"); + db.Execute("incrby", key, 4); + int i = (int)db.Execute("get", key); - Assert.Equal(16, i); - } - } + Assert.Equal(16, i); } } diff --git a/tests/StackExchange.Redis.Tests/AggressiveTests.cs b/tests/StackExchange.Redis.Tests/AggressiveTests.cs new file mode 100644 index 000000000..f0ba91f16 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/AggressiveTests.cs @@ -0,0 +1,317 @@ +using System.Threading; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +[Collection(NonParallelCollection.Name)] +public class AggressiveTests(ITestOutputHelper output) : TestBase(output) +{ + [Fact] + public async Task ParallelTransactionsWithConditions() + { + Skip.UnlessLongRunning(); + const int Muxers = 4, Workers = 20, PerThread = 250; + + var muxers = new IConnectionMultiplexer[Muxers]; + try + { + for (int i = 0; i < Muxers; i++) + muxers[i] = Create(); + + RedisKey hits = Me(), trigger = Me() + "3"; + int expectedSuccess = 0; + + await muxers[0].GetDatabase().KeyDeleteAsync([hits, trigger]).ForAwait(); + + Task[] tasks = new Task[Workers]; + for (int i = 0; i < tasks.Length; i++) + { + var scopedDb = muxers[i % Muxers].GetDatabase(); + tasks[i] = Task.Run(async () => + { + for (int j = 0; j < PerThread; j++) + { + var oldVal = await scopedDb.StringGetAsync(trigger).ForAwait(); + var tran = scopedDb.CreateTransaction(); + tran.AddCondition(Condition.StringEqual(trigger, oldVal)); + var x = tran.StringIncrementAsync(trigger); + var y = tran.StringIncrementAsync(hits); + if (await tran.ExecuteAsync().ForAwait()) + { + Interlocked.Increment(ref expectedSuccess); + await x; + await y; + } + else + { + await Assert.ThrowsAsync(() => x).ForAwait(); + await Assert.ThrowsAsync(() => y).ForAwait(); + } + } + }); + } + for (int i = tasks.Length - 1; i >= 0; i--) + { + await tasks[i]; + } + var actual = (int)await muxers[0].GetDatabase().StringGetAsync(hits).ForAwait(); + Assert.Equal(expectedSuccess, actual); + Log($"success: {actual} out of {Workers * PerThread} attempts"); + } + finally + { + for (int i = 0; i < muxers.Length; i++) + { + try { muxers[i]?.Dispose(); } + catch { /* Don't care */ } + } + } + } + + private const int IterationCount = 5000, InnerCount = 20; + + [Fact] + public async Task RunCompetingBatchesOnSameMuxer() + { + Skip.UnlessLongRunning(); + await using var conn = Create(); + var db = conn.GetDatabase(); + + Thread x = new Thread(state => BatchRunPings((IDatabase)state!)) + { + Name = nameof(BatchRunPings), + }; + Thread y = new Thread(state => BatchRunIntegers((IDatabase)state!)) + { + Name = nameof(BatchRunIntegers), + }; + + x.Start(db); + y.Start(db); + x.Join(); + y.Join(); + + Log(conn.GetCounters().Interactive.ToString()); + } + + private void BatchRunIntegers(IDatabase db) + { + var key = Me(); + db.KeyDelete(key); + db.StringSet(key, 1); + Task[] tasks = new Task[InnerCount]; + for (int i = 0; i < IterationCount; i++) + { + var batch = db.CreateBatch(); + for (int j = 0; j < tasks.Length; j++) + { + tasks[j] = batch.StringIncrementAsync(key); + } + batch.Execute(); + db.Multiplexer.WaitAll(tasks); + } + + var count = (long)db.StringGet(key); + Log($"tally: {count}"); + } + + private static void BatchRunPings(IDatabase db) + { + Task[] tasks = new Task[InnerCount]; + for (int i = 0; i < IterationCount; i++) + { + var batch = db.CreateBatch(); + for (int j = 0; j < tasks.Length; j++) + { + tasks[j] = batch.PingAsync(); + } + batch.Execute(); + db.Multiplexer.WaitAll(tasks); + } + } + + [Fact] + public async Task RunCompetingBatchesOnSameMuxerAsync() + { + Skip.UnlessLongRunning(); + await using var conn = Create(); + var db = conn.GetDatabase(); + + var x = Task.Run(() => BatchRunPingsAsync(db)); + var y = Task.Run(() => BatchRunIntegersAsync(db)); + + await x; + await y; + + Log(conn.GetCounters().Interactive.ToString()); + } + + private async Task BatchRunIntegersAsync(IDatabase db) + { + var key = Me(); + await db.KeyDeleteAsync(key).ForAwait(); + await db.StringSetAsync(key, 1).ForAwait(); + Task[] tasks = new Task[InnerCount]; + for (int i = 0; i < IterationCount; i++) + { + var batch = db.CreateBatch(); + for (int j = 0; j < tasks.Length; j++) + { + tasks[j] = batch.StringIncrementAsync(key); + } + batch.Execute(); + for (int j = tasks.Length - 1; j >= 0; j--) + { + await tasks[j]; + } + } + + var count = (long)await db.StringGetAsync(key).ForAwait(); + Log($"tally: {count}"); + } + + private static async Task BatchRunPingsAsync(IDatabase db) + { + Task[] tasks = new Task[InnerCount]; + for (int i = 0; i < IterationCount; i++) + { + var batch = db.CreateBatch(); + for (int j = 0; j < tasks.Length; j++) + { + tasks[j] = batch.PingAsync(); + } + batch.Execute(); + for (int j = tasks.Length - 1; j >= 0; j--) + { + await tasks[j]; + } + } + } + + [Fact] + public async Task RunCompetingTransactionsOnSameMuxer() + { + Skip.UnlessLongRunning(); + await using var conn = Create(logTransactionData: false); + var db = conn.GetDatabase(); + + Thread x = new Thread(state => TranRunPings((IDatabase)state!)) + { + Name = nameof(BatchRunPings), + }; + Thread y = new Thread(state => TranRunIntegers((IDatabase)state!)) + { + Name = nameof(BatchRunIntegers), + }; + + x.Start(db); + y.Start(db); + x.Join(); + y.Join(); + + Log(conn.GetCounters().Interactive.ToString()); + } + + private void TranRunIntegers(IDatabase db) + { + var key = Me(); + db.KeyDelete(key); + db.StringSet(key, 1); + Task[] tasks = new Task[InnerCount]; + for (int i = 0; i < IterationCount; i++) + { + var batch = db.CreateTransaction(); + batch.AddCondition(Condition.KeyExists(key)); + for (int j = 0; j < tasks.Length; j++) + { + tasks[j] = batch.StringIncrementAsync(key); + } + batch.Execute(); + db.Multiplexer.WaitAll(tasks); + } + + var count = (long)db.StringGet(key); + Log($"tally: {count}"); + } + + private void TranRunPings(IDatabase db) + { + var key = Me(); + db.KeyDelete(key); + Task[] tasks = new Task[InnerCount]; + for (int i = 0; i < IterationCount; i++) + { + var batch = db.CreateTransaction(); + batch.AddCondition(Condition.KeyNotExists(key)); + for (int j = 0; j < tasks.Length; j++) + { + tasks[j] = batch.PingAsync(); + } + batch.Execute(); + db.Multiplexer.WaitAll(tasks); + } + } + + [Fact] + public async Task RunCompetingTransactionsOnSameMuxerAsync() + { + Skip.UnlessLongRunning(); + await using var conn = Create(logTransactionData: false); + var db = conn.GetDatabase(); + + var x = Task.Run(() => TranRunPingsAsync(db)); + var y = Task.Run(() => TranRunIntegersAsync(db)); + + await x; + await y; + + Log(conn.GetCounters().Interactive.ToString()); + } + + private async Task TranRunIntegersAsync(IDatabase db) + { + var key = Me(); + await db.KeyDeleteAsync(key).ForAwait(); + await db.StringSetAsync(key, 1).ForAwait(); + Task[] tasks = new Task[InnerCount]; + for (int i = 0; i < IterationCount; i++) + { + var batch = db.CreateTransaction(); + batch.AddCondition(Condition.KeyExists(key)); + for (int j = 0; j < tasks.Length; j++) + { + tasks[j] = batch.StringIncrementAsync(key); + } + await batch.ExecuteAsync().ForAwait(); + for (int j = tasks.Length - 1; j >= 0; j--) + { + await tasks[j]; + } + } + + var count = (long)await db.StringGetAsync(key).ForAwait(); + Log($"tally: {count}"); + } + + private async Task TranRunPingsAsync(IDatabase db) + { + var key = Me(); + db.KeyDelete(key); + Task[] tasks = new Task[InnerCount]; + for (int i = 0; i < IterationCount; i++) + { + var batch = db.CreateTransaction(); + batch.AddCondition(Condition.KeyNotExists(key)); + for (int j = 0; j < tasks.Length; j++) + { + tasks[j] = batch.PingAsync(); + } + await batch.ExecuteAsync().ForAwait(); + for (int j = tasks.Length - 1; j >= 0; j--) + { + await tasks[j]; + } + } + } +} diff --git a/tests/StackExchange.Redis.Tests/AggresssiveTests.cs b/tests/StackExchange.Redis.Tests/AggresssiveTests.cs deleted file mode 100644 index 438431a75..000000000 --- a/tests/StackExchange.Redis.Tests/AggresssiveTests.cs +++ /dev/null @@ -1,324 +0,0 @@ -using System.Threading; -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - [Collection(NonParallelCollection.Name)] - public class AggresssiveTests : TestBase - { - public AggresssiveTests(ITestOutputHelper output) : base(output) { } - - [FactLongRunning] - public async Task ParallelTransactionsWithConditions() - { - const int Muxers = 4, Workers = 20, PerThread = 250; - - var muxers = new IConnectionMultiplexer[Muxers]; - try - { - for (int i = 0; i < Muxers; i++) - muxers[i] = Create(); - - RedisKey hits = Me(), trigger = Me() + "3"; - int expectedSuccess = 0; - - await muxers[0].GetDatabase().KeyDeleteAsync(new[] { hits, trigger }).ForAwait(); - - Task[] tasks = new Task[Workers]; - for (int i = 0; i < tasks.Length; i++) - { - var scopedDb = muxers[i % Muxers].GetDatabase(); - tasks[i] = Task.Run(async () => - { - for (int j = 0; j < PerThread; j++) - { - var oldVal = await scopedDb.StringGetAsync(trigger).ForAwait(); - var tran = scopedDb.CreateTransaction(); - tran.AddCondition(Condition.StringEqual(trigger, oldVal)); - var x = tran.StringIncrementAsync(trigger); - var y = tran.StringIncrementAsync(hits); - if (await tran.ExecuteAsync().ForAwait()) - { - Interlocked.Increment(ref expectedSuccess); - await x; - await y; - } - else - { - await Assert.ThrowsAsync(() => x).ForAwait(); - await Assert.ThrowsAsync(() => y).ForAwait(); - } - } - }); - } - for (int i = tasks.Length - 1; i >= 0; i--) - { - await tasks[i]; - } - var actual = (int)await muxers[0].GetDatabase().StringGetAsync(hits).ForAwait(); - Assert.Equal(expectedSuccess, actual); - Writer.WriteLine($"success: {actual} out of {Workers * PerThread} attempts"); - } - finally - { - for (int i = 0; i < muxers.Length; i++) - { - try { muxers[i]?.Dispose(); } - catch { /* Don't care */ } - } - } - } - - private const int IterationCount = 5000, InnerCount = 20; - - [FactLongRunning] - public void RunCompetingBatchesOnSameMuxer() - { - using (var muxer = Create()) - { - var db = muxer.GetDatabase(); - - Thread x = new Thread(state => BatchRunPings((IDatabase)state)) - { - Name = nameof(BatchRunPings) - }; - Thread y = new Thread(state => BatchRunIntegers((IDatabase)state)) - { - Name = nameof(BatchRunIntegers) - }; - - x.Start(db); - y.Start(db); - x.Join(); - y.Join(); - - Writer.WriteLine(muxer.GetCounters().Interactive); - } - } - - private void BatchRunIntegers(IDatabase db) - { - var key = Me(); - db.KeyDelete(key); - db.StringSet(key, 1); - Task[] tasks = new Task[InnerCount]; - for(int i = 0; i < IterationCount; i++) - { - var batch = db.CreateBatch(); - for (int j = 0; j < tasks.Length; j++) - { - tasks[j] = batch.StringIncrementAsync(key); - } - batch.Execute(); - db.Multiplexer.WaitAll(tasks); - } - - var count = (long)db.StringGet(key); - Writer.WriteLine($"tally: {count}"); - } - - private void BatchRunPings(IDatabase db) - { - Task[] tasks = new Task[InnerCount]; - for (int i = 0; i < IterationCount; i++) - { - var batch = db.CreateBatch(); - for (int j = 0; j < tasks.Length; j++) - { - tasks[j] = batch.PingAsync(); - } - batch.Execute(); - db.Multiplexer.WaitAll(tasks); - } - } - - [FactLongRunning] - public async Task RunCompetingBatchesOnSameMuxerAsync() - { - using (var muxer = Create()) - { - var db = muxer.GetDatabase(); - - var x = Task.Run(() => BatchRunPingsAsync(db)); - var y = Task.Run(() => BatchRunIntegersAsync(db)); - - await x; - await y; - - Writer.WriteLine(muxer.GetCounters().Interactive); - } - } - - private async Task BatchRunIntegersAsync(IDatabase db) - { - var key = Me(); - await db.KeyDeleteAsync(key).ForAwait(); - await db.StringSetAsync(key, 1).ForAwait(); - Task[] tasks = new Task[InnerCount]; - for (int i = 0; i < IterationCount; i++) - { - var batch = db.CreateBatch(); - for (int j = 0; j < tasks.Length; j++) - { - tasks[j] = batch.StringIncrementAsync(key); - } - batch.Execute(); - for(int j = tasks.Length - 1; j >= 0;j--) - { - await tasks[j]; - } - } - - var count = (long)await db.StringGetAsync(key).ForAwait(); - Writer.WriteLine($"tally: {count}"); - } - - private async Task BatchRunPingsAsync(IDatabase db) - { - Task[] tasks = new Task[InnerCount]; - for (int i = 0; i < IterationCount; i++) - { - var batch = db.CreateBatch(); - for (int j = 0; j < tasks.Length; j++) - { - tasks[j] = batch.PingAsync(); - } - batch.Execute(); - for (int j = tasks.Length - 1; j >= 0; j--) - { - await tasks[j]; - } - } - } - - [FactLongRunning] - public void RunCompetingTransactionsOnSameMuxer() - { - using (var muxer = Create(logTransactionData: false)) - { - var db = muxer.GetDatabase(); - - Thread x = new Thread(state => TranRunPings((IDatabase)state)) - { - Name = nameof(BatchRunPings) - }; - Thread y = new Thread(state => TranRunIntegers((IDatabase)state)) - { - Name = nameof(BatchRunIntegers) - }; - - x.Start(db); - y.Start(db); - x.Join(); - y.Join(); - - Writer.WriteLine(muxer.GetCounters().Interactive); - } - } - - private void TranRunIntegers(IDatabase db) - { - var key = Me(); - db.KeyDelete(key); - db.StringSet(key, 1); - Task[] tasks = new Task[InnerCount]; - for (int i = 0; i < IterationCount; i++) - { - var batch = db.CreateTransaction(); - batch.AddCondition(Condition.KeyExists(key)); - for (int j = 0; j < tasks.Length; j++) - { - tasks[j] = batch.StringIncrementAsync(key); - } - batch.Execute(); - db.Multiplexer.WaitAll(tasks); - } - - var count = (long)db.StringGet(key); - Writer.WriteLine($"tally: {count}"); - } - - private void TranRunPings(IDatabase db) - { - var key = Me(); - db.KeyDelete(key); - Task[] tasks = new Task[InnerCount]; - for (int i = 0; i < IterationCount; i++) - { - var batch = db.CreateTransaction(); - batch.AddCondition(Condition.KeyNotExists(key)); - for (int j = 0; j < tasks.Length; j++) - { - tasks[j] = batch.PingAsync(); - } - batch.Execute(); - db.Multiplexer.WaitAll(tasks); - } - } - - [FactLongRunning] - public async Task RunCompetingTransactionsOnSameMuxerAsync() - { - using (var muxer = Create(logTransactionData: false)) - { - var db = muxer.GetDatabase(); - - var x = Task.Run(() => TranRunPingsAsync(db)); - var y = Task.Run(() => TranRunIntegersAsync(db)); - - await x; - await y; - - Writer.WriteLine(muxer.GetCounters().Interactive); - } - } - - private async Task TranRunIntegersAsync(IDatabase db) - { - var key = Me(); - await db.KeyDeleteAsync(key).ForAwait(); - await db.StringSetAsync(key, 1).ForAwait(); - Task[] tasks = new Task[InnerCount]; - for (int i = 0; i < IterationCount; i++) - { - var batch = db.CreateTransaction(); - batch.AddCondition(Condition.KeyExists(key)); - for (int j = 0; j < tasks.Length; j++) - { - tasks[j] = batch.StringIncrementAsync(key); - } - await batch.ExecuteAsync().ForAwait(); - for (int j = tasks.Length - 1; j >= 0; j--) - { - await tasks[j]; - } - } - - var count = (long)await db.StringGetAsync(key).ForAwait(); - Writer.WriteLine($"tally: {count}"); - } - - private async Task TranRunPingsAsync(IDatabase db) - { - var key = Me(); - db.KeyDelete(key); - Task[] tasks = new Task[InnerCount]; - for (int i = 0; i < IterationCount; i++) - { - var batch = db.CreateTransaction(); - batch.AddCondition(Condition.KeyNotExists(key)); - for (int j = 0; j < tasks.Length; j++) - { - tasks[j] = batch.PingAsync(); - } - await batch.ExecuteAsync().ForAwait(); - for (int j = tasks.Length - 1; j >= 0; j--) - { - await tasks[j]; - } - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/App.config b/tests/StackExchange.Redis.Tests/App.config index c7c0b6d7a..295bdd49d 100644 --- a/tests/StackExchange.Redis.Tests/App.config +++ b/tests/StackExchange.Redis.Tests/App.config @@ -4,7 +4,7 @@ - + diff --git a/tests/StackExchange.Redis.Tests/AsciiHashUnitTests.cs b/tests/StackExchange.Redis.Tests/AsciiHashUnitTests.cs new file mode 100644 index 000000000..5e2b9571f --- /dev/null +++ b/tests/StackExchange.Redis.Tests/AsciiHashUnitTests.cs @@ -0,0 +1,460 @@ +using System; +using System.Runtime.InteropServices; +using System.Text; +using RESPite; +using Xunit; +using Xunit.Sdk; + +#pragma warning disable CS8981, SA1134, SA1300, SA1303, SA1502 // names are weird in this test! +// ReSharper disable InconsistentNaming - to better represent expected literals +// ReSharper disable IdentifierTypo +namespace StackExchange.Redis.Tests; + +public partial class AsciiHashUnitTests +{ + // note: if the hashing algorithm changes, we can update the last parameter freely; it doesn't matter + // what it *is* - what matters is that we can see that it has entropy between different values + [Theory] + [InlineData(1, a.Length, a.Text, a.HashCS, 97)] + [InlineData(2, ab.Length, ab.Text, ab.HashCS, 25185)] + [InlineData(3, abc.Length, abc.Text, abc.HashCS, 6513249)] + [InlineData(4, abcd.Length, abcd.Text, abcd.HashCS, 1684234849)] + [InlineData(5, abcde.Length, abcde.Text, abcde.HashCS, 435475931745)] + [InlineData(6, abcdef.Length, abcdef.Text, abcdef.HashCS, 112585661964897)] + [InlineData(7, abcdefg.Length, abcdefg.Text, abcdefg.HashCS, 29104508263162465)] + [InlineData(8, abcdefgh.Length, abcdefgh.Text, abcdefgh.HashCS, 7523094288207667809)] + + [InlineData(1, x.Length, x.Text, x.HashCS, 120)] + [InlineData(2, xx.Length, xx.Text, xx.HashCS, 30840)] + [InlineData(3, xxx.Length, xxx.Text, xxx.HashCS, 7895160)] + [InlineData(4, xxxx.Length, xxxx.Text, xxxx.HashCS, 2021161080)] + [InlineData(5, xxxxx.Length, xxxxx.Text, xxxxx.HashCS, 517417236600)] + [InlineData(6, xxxxxx.Length, xxxxxx.Text, xxxxxx.HashCS, 132458812569720)] + [InlineData(7, xxxxxxx.Length, xxxxxxx.Text, xxxxxxx.HashCS, 33909456017848440)] + [InlineData(8, xxxxxxxx.Length, xxxxxxxx.Text, xxxxxxxx.HashCS, 8680820740569200760)] + + [InlineData(20, abcdefghijklmnopqrst.Length, abcdefghijklmnopqrst.Text, abcdefghijklmnopqrst.HashCS, 7523094288207667809)] + + // show that foo_bar is interpreted as foo-bar + [InlineData(7, foo_bar.Length, foo_bar.Text, foo_bar.HashCS, 32195221641981798, "foo-bar", nameof(foo_bar))] + [InlineData(7, foo_bar_hyphen.Length, foo_bar_hyphen.Text, foo_bar_hyphen.HashCS, 32195221641981798, "foo-bar", nameof(foo_bar_hyphen))] + [InlineData(7, foo_bar_underscore.Length, foo_bar_underscore.Text, foo_bar_underscore.HashCS, 32195222480842598, "foo_bar", nameof(foo_bar_underscore))] + public void Validate(int expectedLength, int actualLength, string actualValue, long actualHash, long expectedHash, string? expectedValue = null, string originForDisambiguation = "") + { + _ = originForDisambiguation; // to allow otherwise-identical test data to coexist + Assert.Equal(expectedLength, actualLength); + Assert.Equal(expectedHash, actualHash); + var bytes = Encoding.UTF8.GetBytes(actualValue); + Assert.Equal(expectedLength, bytes.Length); + Assert.Equal(expectedHash, AsciiHash.HashCS(bytes)); + Assert.Equal(expectedHash, AsciiHash.HashCS(actualValue.AsSpan())); + + if (expectedValue is not null) + { + Assert.Equal(expectedValue, actualValue); + } + } + + [Fact] + public void AsciiHashIs_Short() + { + ReadOnlySpan value = "abc"u8; + var hash = AsciiHash.HashCS(value); + Assert.Equal(abc.HashCS, hash); + Assert.True(abc.IsCS(value, hash)); + + value = "abz"u8; + hash = AsciiHash.HashCS(value); + Assert.NotEqual(abc.HashCS, hash); + Assert.False(abc.IsCS(value, hash)); + } + + [Fact] + public void AsciiHashIs_Long() + { + ReadOnlySpan value = "abcdefghijklmnopqrst"u8; + var hash = AsciiHash.HashCS(value); + Assert.Equal(abcdefghijklmnopqrst.HashCS, hash); + Assert.True(abcdefghijklmnopqrst.IsCS(value, hash)); + + value = "abcdefghijklmnopqrsz"u8; + hash = AsciiHash.HashCS(value); + Assert.Equal(abcdefghijklmnopqrst.HashCS, hash); // hash collision, fine + Assert.False(abcdefghijklmnopqrst.IsCS(value, hash)); + } + + // Test case-sensitive and case-insensitive equality for various lengths + [Theory] + [InlineData("a")] // length 1 + [InlineData("ab")] // length 2 + [InlineData("abc")] // length 3 + [InlineData("abcd")] // length 4 + [InlineData("abcde")] // length 5 + [InlineData("abcdef")] // length 6 + [InlineData("abcdefg")] // length 7 + [InlineData("abcdefgh")] // length 8 + [InlineData("abcdefghi")] // length 9 + [InlineData("abcdefghij")] // length 10 + [InlineData("abcdefghijklmnop")] // length 16 + [InlineData("abcdefghijklmnopqrst")] // length 20 + public void CaseSensitiveEquality(string text) + { + var lower = Encoding.UTF8.GetBytes(text); + var upper = Encoding.UTF8.GetBytes(text.ToUpperInvariant()); + + var hashLowerCS = AsciiHash.HashCS(lower); + var hashUpperCS = AsciiHash.HashCS(upper); + + // Case-sensitive: same case should match + Assert.True(AsciiHash.EqualsCS(lower, lower), "CS: lower == lower"); + Assert.True(AsciiHash.EqualsCS(upper, upper), "CS: upper == upper"); + + // Case-sensitive: different case should NOT match + Assert.False(AsciiHash.EqualsCS(lower, upper), "CS: lower != upper"); + Assert.False(AsciiHash.EqualsCS(upper, lower), "CS: upper != lower"); + + // Hashes should be different for different cases + Assert.NotEqual(hashLowerCS, hashUpperCS); + } + + [Theory] + [InlineData("a")] // length 1 + [InlineData("ab")] // length 2 + [InlineData("abc")] // length 3 + [InlineData("abcd")] // length 4 + [InlineData("abcde")] // length 5 + [InlineData("abcdef")] // length 6 + [InlineData("abcdefg")] // length 7 + [InlineData("abcdefgh")] // length 8 + [InlineData("abcdefghi")] // length 9 + [InlineData("abcdefghij")] // length 10 + [InlineData("abcdefghijklmnop")] // length 16 + [InlineData("abcdefghijklmnopqrst")] // length 20 + public void CaseInsensitiveEquality(string text) + { + var lower = Encoding.UTF8.GetBytes(text); + var upper = Encoding.UTF8.GetBytes(text.ToUpperInvariant()); + + var hashLowerUC = AsciiHash.HashUC(lower); + var hashUpperUC = AsciiHash.HashUC(upper); + + // Case-insensitive: same case should match + Assert.True(AsciiHash.EqualsCI(lower, lower), "CI: lower == lower"); + Assert.True(AsciiHash.EqualsCI(upper, upper), "CI: upper == upper"); + + // Case-insensitive: different case SHOULD match + Assert.True(AsciiHash.EqualsCI(lower, upper), "CI: lower == upper"); + Assert.True(AsciiHash.EqualsCI(upper, lower), "CI: upper == lower"); + + // CI hashes should be the same for different cases + Assert.Equal(hashLowerUC, hashUpperUC); + } + + [Theory] + [InlineData("a")] // length 1 + [InlineData("ab")] // length 2 + [InlineData("abc")] // length 3 + [InlineData("abcd")] // length 4 + [InlineData("abcde")] // length 5 + [InlineData("abcdef")] // length 6 + [InlineData("abcdefg")] // length 7 + [InlineData("abcdefgh")] // length 8 + [InlineData("abcdefghi")] // length 9 + [InlineData("abcdefghij")] // length 10 + [InlineData("abcdefghijklmnop")] // length 16 + [InlineData("abcdefghijklmnopqrst")] // length 20 + [InlineData("foo-bar")] // foo_bar_hyphen + [InlineData("foo_bar")] // foo_bar_underscore + public void GeneratedTypes_CaseSensitive(string text) + { + var lower = Encoding.UTF8.GetBytes(text); + var upper = Encoding.UTF8.GetBytes(text.ToUpperInvariant()); + + var hashLowerCS = AsciiHash.HashCS(lower); + var hashUpperCS = AsciiHash.HashCS(upper); + + // Use the generated types to verify CS behavior + switch (text) + { + case "a": + Assert.True(a.IsCS(lower, hashLowerCS)); + Assert.False(a.IsCS(lower, hashUpperCS)); + break; + case "ab": + Assert.True(ab.IsCS(lower, hashLowerCS)); + Assert.False(ab.IsCS(lower, hashUpperCS)); + break; + case "abc": + Assert.True(abc.IsCS(lower, hashLowerCS)); + Assert.False(abc.IsCS(lower, hashUpperCS)); + break; + case "abcd": + Assert.True(abcd.IsCS(lower, hashLowerCS)); + Assert.False(abcd.IsCS(lower, hashUpperCS)); + break; + case "abcde": + Assert.True(abcde.IsCS(lower, hashLowerCS)); + Assert.False(abcde.IsCS(lower, hashUpperCS)); + break; + case "abcdef": + Assert.True(abcdef.IsCS(lower, hashLowerCS)); + Assert.False(abcdef.IsCS(lower, hashUpperCS)); + break; + case "abcdefg": + Assert.True(abcdefg.IsCS(lower, hashLowerCS)); + Assert.False(abcdefg.IsCS(lower, hashUpperCS)); + break; + case "abcdefgh": + Assert.True(abcdefgh.IsCS(lower, hashLowerCS)); + Assert.False(abcdefgh.IsCS(lower, hashUpperCS)); + break; + case "abcdefghijklmnopqrst": + Assert.True(abcdefghijklmnopqrst.IsCS(lower, hashLowerCS)); + Assert.False(abcdefghijklmnopqrst.IsCS(lower, hashUpperCS)); + break; + case "foo-bar": + Assert.True(foo_bar_hyphen.IsCS(lower, hashLowerCS)); + Assert.False(foo_bar_hyphen.IsCS(lower, hashUpperCS)); + break; + case "foo_bar": + Assert.True(foo_bar_underscore.IsCS(lower, hashLowerCS)); + Assert.False(foo_bar_underscore.IsCS(lower, hashUpperCS)); + break; + } + } + + [Theory] + [InlineData("a")] // length 1 + [InlineData("ab")] // length 2 + [InlineData("abc")] // length 3 + [InlineData("abcd")] // length 4 + [InlineData("abcde")] // length 5 + [InlineData("abcdef")] // length 6 + [InlineData("abcdefg")] // length 7 + [InlineData("abcdefgh")] // length 8 + [InlineData("abcdefghi")] // length 9 + [InlineData("abcdefghij")] // length 10 + [InlineData("abcdefghijklmnop")] // length 16 + [InlineData("abcdefghijklmnopqrst")] // length 20 + [InlineData("foo-bar")] // foo_bar_hyphen + [InlineData("foo_bar")] // foo_bar_underscore + public void GeneratedTypes_CaseInsensitive(string text) + { + var lower = Encoding.UTF8.GetBytes(text); + var upper = Encoding.UTF8.GetBytes(text.ToUpperInvariant()); + + var hashLowerUC = AsciiHash.HashUC(lower); + var hashUpperUC = AsciiHash.HashUC(upper); + + // Use the generated types to verify CI behavior + switch (text) + { + case "a": + Assert.True(a.IsCI(lower, hashLowerUC)); + Assert.True(a.IsCI(upper, hashUpperUC)); + break; + case "ab": + Assert.True(ab.IsCI(lower, hashLowerUC)); + Assert.True(ab.IsCI(upper, hashUpperUC)); + break; + case "abc": + Assert.True(abc.IsCI(lower, hashLowerUC)); + Assert.True(abc.IsCI(upper, hashUpperUC)); + break; + case "abcd": + Assert.True(abcd.IsCI(lower, hashLowerUC)); + Assert.True(abcd.IsCI(upper, hashUpperUC)); + break; + case "abcde": + Assert.True(abcde.IsCI(lower, hashLowerUC)); + Assert.True(abcde.IsCI(upper, hashUpperUC)); + break; + case "abcdef": + Assert.True(abcdef.IsCI(lower, hashLowerUC)); + Assert.True(abcdef.IsCI(upper, hashUpperUC)); + break; + case "abcdefg": + Assert.True(abcdefg.IsCI(lower, hashLowerUC)); + Assert.True(abcdefg.IsCI(upper, hashUpperUC)); + break; + case "abcdefgh": + Assert.True(abcdefgh.IsCI(lower, hashLowerUC)); + Assert.True(abcdefgh.IsCI(upper, hashUpperUC)); + break; + case "abcdefghijklmnopqrst": + Assert.True(abcdefghijklmnopqrst.IsCI(lower, hashLowerUC)); + Assert.True(abcdefghijklmnopqrst.IsCI(upper, hashUpperUC)); + break; + case "foo-bar": + Assert.True(foo_bar_hyphen.IsCI(lower, hashLowerUC)); + Assert.True(foo_bar_hyphen.IsCI(upper, hashUpperUC)); + break; + case "foo_bar": + Assert.True(foo_bar_underscore.IsCI(lower, hashLowerUC)); + Assert.True(foo_bar_underscore.IsCI(upper, hashUpperUC)); + break; + } + } + + // Test each generated AsciiHash type individually for case sensitivity + [Fact] + public void GeneratedType_a_CaseSensitivity() + { + ReadOnlySpan lower = "a"u8; + ReadOnlySpan upper = "A"u8; + + Assert.True(a.IsCS(lower, AsciiHash.HashCS(lower))); + Assert.False(a.IsCS(upper, AsciiHash.HashCS(upper))); + Assert.True(a.IsCI(lower, AsciiHash.HashUC(lower))); + Assert.True(a.IsCI(upper, AsciiHash.HashUC(upper))); + } + + [Fact] + public void GeneratedType_ab_CaseSensitivity() + { + ReadOnlySpan lower = "ab"u8; + ReadOnlySpan upper = "AB"u8; + + Assert.True(ab.IsCS(lower, AsciiHash.HashCS(lower))); + Assert.False(ab.IsCS(upper, AsciiHash.HashCS(upper))); + Assert.True(ab.IsCI(lower, AsciiHash.HashUC(lower))); + Assert.True(ab.IsCI(upper, AsciiHash.HashUC(upper))); + } + + [Fact] + public void GeneratedType_abc_CaseSensitivity() + { + ReadOnlySpan lower = "abc"u8; + ReadOnlySpan upper = "ABC"u8; + + Assert.True(abc.IsCS(lower, AsciiHash.HashCS(lower))); + Assert.False(abc.IsCS(upper, AsciiHash.HashCS(upper))); + Assert.True(abc.IsCI(lower, AsciiHash.HashUC(lower))); + Assert.True(abc.IsCI(upper, AsciiHash.HashUC(upper))); + } + + [Fact] + public void GeneratedType_abcd_CaseSensitivity() + { + ReadOnlySpan lower = "abcd"u8; + ReadOnlySpan upper = "ABCD"u8; + + Assert.True(abcd.IsCS(lower, AsciiHash.HashCS(lower))); + Assert.False(abcd.IsCS(upper, AsciiHash.HashCS(upper))); + Assert.True(abcd.IsCI(lower, AsciiHash.HashUC(lower))); + Assert.True(abcd.IsCI(upper, AsciiHash.HashUC(upper))); + } + + [Fact] + public void GeneratedType_abcde_CaseSensitivity() + { + ReadOnlySpan lower = "abcde"u8; + ReadOnlySpan upper = "ABCDE"u8; + + Assert.True(abcde.IsCS(lower, AsciiHash.HashCS(lower))); + Assert.False(abcde.IsCS(upper, AsciiHash.HashCS(upper))); + Assert.True(abcde.IsCI(lower, AsciiHash.HashUC(lower))); + Assert.True(abcde.IsCI(upper, AsciiHash.HashUC(upper))); + } + + [Fact] + public void GeneratedType_abcdef_CaseSensitivity() + { + ReadOnlySpan lower = "abcdef"u8; + ReadOnlySpan upper = "ABCDEF"u8; + + Assert.True(abcdef.IsCS(lower, AsciiHash.HashCS(lower))); + Assert.False(abcdef.IsCS(upper, AsciiHash.HashCS(upper))); + Assert.True(abcdef.IsCI(lower, AsciiHash.HashUC(lower))); + Assert.True(abcdef.IsCI(upper, AsciiHash.HashUC(upper))); + } + + [Fact] + public void GeneratedType_abcdefg_CaseSensitivity() + { + ReadOnlySpan lower = "abcdefg"u8; + ReadOnlySpan upper = "ABCDEFG"u8; + + Assert.True(abcdefg.IsCS(lower, AsciiHash.HashCS(lower))); + Assert.False(abcdefg.IsCS(upper, AsciiHash.HashCS(upper))); + Assert.True(abcdefg.IsCI(lower, AsciiHash.HashUC(lower))); + Assert.True(abcdefg.IsCI(upper, AsciiHash.HashUC(upper))); + } + + [Fact] + public void GeneratedType_abcdefgh_CaseSensitivity() + { + ReadOnlySpan lower = "abcdefgh"u8; + ReadOnlySpan upper = "ABCDEFGH"u8; + + Assert.True(abcdefgh.IsCS(lower, AsciiHash.HashCS(lower))); + Assert.False(abcdefgh.IsCS(upper, AsciiHash.HashCS(upper))); + Assert.True(abcdefgh.IsCI(lower, AsciiHash.HashUC(lower))); + Assert.True(abcdefgh.IsCI(upper, AsciiHash.HashUC(upper))); + } + + [Fact] + public void GeneratedType_abcdefghijklmnopqrst_CaseSensitivity() + { + ReadOnlySpan lower = "abcdefghijklmnopqrst"u8; + ReadOnlySpan upper = "ABCDEFGHIJKLMNOPQRST"u8; + + Assert.True(abcdefghijklmnopqrst.IsCS(lower, AsciiHash.HashCS(lower))); + Assert.False(abcdefghijklmnopqrst.IsCS(upper, AsciiHash.HashCS(upper))); + Assert.True(abcdefghijklmnopqrst.IsCI(lower, AsciiHash.HashUC(lower))); + Assert.True(abcdefghijklmnopqrst.IsCI(upper, AsciiHash.HashUC(upper))); + } + + [Fact] + public void GeneratedType_foo_bar_CaseSensitivity() + { + // foo_bar is interpreted as foo-bar + ReadOnlySpan lower = "foo-bar"u8; + ReadOnlySpan upper = "FOO-BAR"u8; + + Assert.True(foo_bar.IsCS(lower, AsciiHash.HashCS(lower))); + Assert.False(foo_bar.IsCS(upper, AsciiHash.HashCS(upper))); + Assert.True(foo_bar.IsCI(lower, AsciiHash.HashUC(lower))); + Assert.True(foo_bar.IsCI(upper, AsciiHash.HashUC(upper))); + } + + [Fact] + public void GeneratedType_foo_bar_hyphen_CaseSensitivity() + { + // foo_bar_hyphen is explicitly "foo-bar" + ReadOnlySpan lower = "foo-bar"u8; + ReadOnlySpan upper = "FOO-BAR"u8; + + Assert.True(foo_bar_hyphen.IsCS(lower, AsciiHash.HashCS(lower))); + Assert.False(foo_bar_hyphen.IsCS(upper, AsciiHash.HashCS(upper))); + Assert.True(foo_bar_hyphen.IsCI(lower, AsciiHash.HashUC(lower))); + Assert.True(foo_bar_hyphen.IsCI(upper, AsciiHash.HashUC(upper))); + } + + [AsciiHash] private static partial class a { } + [AsciiHash] private static partial class ab { } + [AsciiHash] private static partial class abc { } + [AsciiHash] private static partial class abcd { } + [AsciiHash] private static partial class abcde { } + [AsciiHash] private static partial class abcdef { } + [AsciiHash] private static partial class abcdefg { } + [AsciiHash] private static partial class abcdefgh { } + + [AsciiHash] private static partial class abcdefghijklmnopqrst { } + + // show that foo_bar and foo-bar are different + [AsciiHash] private static partial class foo_bar { } + [AsciiHash("foo-bar")] private static partial class foo_bar_hyphen { } + [AsciiHash("foo_bar")] private static partial class foo_bar_underscore { } + + [AsciiHash] private static partial class 窓 { } + + [AsciiHash] private static partial class x { } + [AsciiHash] private static partial class xx { } + [AsciiHash] private static partial class xxx { } + [AsciiHash] private static partial class xxxx { } + [AsciiHash] private static partial class xxxxx { } + [AsciiHash] private static partial class xxxxxx { } + [AsciiHash] private static partial class xxxxxxx { } + [AsciiHash] private static partial class xxxxxxxx { } +} diff --git a/tests/StackExchange.Redis.Tests/AsyncTests.cs b/tests/StackExchange.Redis.Tests/AsyncTests.cs index 5367ef0b6..cba1b1145 100644 --- a/tests/StackExchange.Redis.Tests/AsyncTests.cs +++ b/tests/StackExchange.Redis.Tests/AsyncTests.cs @@ -3,83 +3,84 @@ using System.Linq; using System.Threading.Tasks; using Xunit; -using Xunit.Abstractions; -namespace StackExchange.Redis.Tests +namespace StackExchange.Redis.Tests; + +[Collection(NonParallelCollection.Name)] +public class AsyncTests(ITestOutputHelper output) : TestBase(output) { - [Collection(NonParallelCollection.Name)] - public class AsyncTests : TestBase + [Fact] + public async Task AsyncTasksReportFailureIfServerUnavailable() { - public AsyncTests(ITestOutputHelper output) : base(output) { } - - protected override string GetConfiguration() => TestConfig.Current.MasterServerAndPort; + SetExpectedAmbientFailureCount(-1); // this will get messy - [Fact] - public void AsyncTasksReportFailureIfServerUnavailable() - { - SetExpectedAmbientFailureCount(-1); // this will get messy + await using var conn = Create(allowAdmin: true, shared: false, backlogPolicy: BacklogPolicy.FailFast); + var server = conn.GetServer(TestConfig.Current.PrimaryServer, TestConfig.Current.PrimaryPort); - using (var conn = Create(allowAdmin: true)) - { - var server = conn.GetServer(TestConfig.Current.MasterServer, TestConfig.Current.MasterPort); + RedisKey key = Me(); + var db = conn.GetDatabase(); + db.KeyDelete(key); + var a = db.SetAddAsync(key, "a"); + var b = db.SetAddAsync(key, "b"); - RedisKey key = Me(); - var db = conn.GetDatabase(); - db.KeyDelete(key); - var a = db.SetAddAsync(key, "a"); - var b = db.SetAddAsync(key, "b"); + Assert.True(conn.Wait(a)); + Assert.True(conn.Wait(b)); - Assert.True(conn.Wait(a)); - Assert.True(conn.Wait(b)); + conn.AllowConnect = false; + server.SimulateConnectionFailure(SimulatedFailureType.All); + var c = db.SetAddAsync(key, "c"); - conn.AllowConnect = false; - server.SimulateConnectionFailure(); - var c = db.SetAddAsync(key, "c"); + Assert.True(c.IsFaulted, "faulted"); + Assert.NotNull(c.Exception); + var ex = c.Exception.InnerExceptions.Single(); + Assert.IsType(ex); + Assert.StartsWith("No connection is active/available to service this operation: SADD " + key.ToString(), ex.Message); + } - Assert.True(c.IsFaulted, "faulted"); - var ex = c.Exception.InnerExceptions.Single(); - Assert.IsType(ex); - Assert.StartsWith("No connection is active/available to service this operation: SADD " + key.ToString(), ex.Message); - } + [Fact] + public async Task AsyncTimeoutIsNoticed() + { + await using var conn = Create(syncTimeout: 1000, asyncTimeout: 1000); + await using var pauseConn = Create(); + var opt = ConfigurationOptions.Parse(conn.Configuration); + if (!Debugger.IsAttached) + { // we max the timeouts if a debugger is detected + Assert.Equal(1000, opt.AsyncTimeout); } - [Fact] - public async Task AsyncTimeoutIsNoticed() - { - using (var conn = Create(syncTimeout: 1000)) - { - var opt = ConfigurationOptions.Parse(conn.Configuration); - if (!Debugger.IsAttached) - { // we max the timeouts if a degugger is detected - Assert.Equal(1000, opt.AsyncTimeout); - } + RedisKey key = Me(); + var val = Guid.NewGuid().ToString(); + var db = conn.GetDatabase(); + db.StringSet(key, val); - RedisKey key = Me(); - var val = Guid.NewGuid().ToString(); - var db = conn.GetDatabase(); - db.StringSet(key, val); + Assert.Contains("; async timeouts: 0;", conn.GetStatus()); - Assert.Contains("; async timeouts: 0;", conn.GetStatus()); + // This is done on another connection, because it queues a SELECT due to being an unknown command that will not timeout + // at the head of the queue + await pauseConn.GetDatabase().ExecuteAsync("client", "pause", 4000).ForAwait(); // client pause returns immediately - await db.ExecuteAsync("client", "pause", 4000).ForAwait(); // client pause returns immediately + var ms = Stopwatch.StartNew(); + var ex = await Assert.ThrowsAsync(async () => + { + Log("Issuing StringGetAsync"); + await db.StringGetAsync(key).ForAwait(); // but *subsequent* operations are paused + ms.Stop(); + Log($"Unexpectedly succeeded after {ms.ElapsedMilliseconds}ms"); + }).ForAwait(); + ms.Stop(); + Log($"Timed out after {ms.ElapsedMilliseconds}ms"); - var ms = Stopwatch.StartNew(); - var ex = await Assert.ThrowsAsync(async () => - { - await db.StringGetAsync(key).ForAwait(); // but *subsequent* operations are paused - ms.Stop(); - Writer.WriteLine($"Unexpectedly succeeded after {ms.ElapsedMilliseconds}ms"); - }).ForAwait(); - ms.Stop(); - Writer.WriteLine($"Timed out after {ms.ElapsedMilliseconds}ms"); + Log("Exception message: " + ex.Message); + Assert.Contains("Timeout awaiting response", ex.Message); + // Ensure we are including the last payload size + Assert.Contains("last-in:", ex.Message); + Assert.DoesNotContain("last-in: 0", ex.Message); + Assert.NotNull(ex.Data["Redis-Last-Result-Bytes"]); - Assert.Contains("Timeout awaiting response", ex.Message); - Writer.WriteLine(ex.Message); + Assert.Contains("cur-in:", ex.Message); - string status = conn.GetStatus(); - Writer.WriteLine(status); - Assert.Contains("; async timeouts: 1;", status); - } - } + string status = conn.GetStatus(); + Log(status); + Assert.Contains("; async timeouts: 1;", status); } } diff --git a/tests/StackExchange.Redis.Tests/AzureMaintenanceEventTests.cs b/tests/StackExchange.Redis.Tests/AzureMaintenanceEventTests.cs new file mode 100644 index 000000000..b43731efc --- /dev/null +++ b/tests/StackExchange.Redis.Tests/AzureMaintenanceEventTests.cs @@ -0,0 +1,47 @@ +using System; +using System.Globalization; +using System.Net; +using StackExchange.Redis.Maintenance; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class AzureMaintenanceEventTests(ITestOutputHelper output) : TestBase(output) +{ + [Theory] + [InlineData("NotificationType|NodeMaintenanceStarting|StartTimeInUTC|2021-03-02T23:26:57|IsReplica|False|IPAddress||SSLPort|15001|NonSSLPort|13001", AzureNotificationType.NodeMaintenanceStarting, "2021-03-02T23:26:57", false, null, 15001, 13001)] + [InlineData("NotificationType|NodeMaintenanceFailover|StartTimeInUTC||IsReplica|False|IPAddress||SSLPort|15001|NonSSLPort|13001", AzureNotificationType.NodeMaintenanceFailoverComplete, null, false, null, 15001, 13001)] + [InlineData("NotificationType|NodeMaintenanceFailover|StartTimeInUTC||IsReplica|True|IPAddress||SSLPort|15001|NonSSLPort|13001", AzureNotificationType.NodeMaintenanceFailoverComplete, null, true, null, 15001, 13001)] + [InlineData("NotificationType|NodeMaintenanceStarting|StartTimeInUTC|2021-03-02T23:26:57|IsReplica|j|IPAddress||SSLPort|char|NonSSLPort|char", AzureNotificationType.NodeMaintenanceStarting, "2021-03-02T23:26:57", false, null, 0, 0)] + [InlineData("NotificationType|NodeMaintenanceStarting|somejunkkey|somejunkvalue|StartTimeInUTC|2021-03-02T23:26:57|IsReplica|False|IPAddress||SSLPort|15999|NonSSLPort|139991", AzureNotificationType.NodeMaintenanceStarting, "2021-03-02T23:26:57", false, null, 15999, 139991)] + [InlineData("NotificationType|NodeMaintenanceStarting|somejunkkey|somejunkvalue|StartTimeInUTC|2021-03-02T23:26:57|IsReplica|False|IPAddress|127.0.0.1|SSLPort|15999|NonSSLPort|139991", AzureNotificationType.NodeMaintenanceStarting, "2021-03-02T23:26:57", false, "127.0.0.1", 15999, 139991)] + [InlineData("NotificationType|NodeMaintenanceScaleComplete|somejunkkey|somejunkvalue|StartTimeInUTC|2021-03-02T23:26:57|IsReplica|False|IPAddress|127.0.0.1|SSLPort|15999|NonSSLPort|139991", AzureNotificationType.NodeMaintenanceScaleComplete, "2021-03-02T23:26:57", false, "127.0.0.1", 15999, 139991)] + [InlineData("NotificationTypeNodeMaintenanceStartingsomejunkkeysomejunkvalueStartTimeInUTC2021-03-02T23:26:57IsReplicaFalseIPAddress127.0.0.1SSLPort15999NonSSLPort139991", AzureNotificationType.Unknown, null, false, null, 0, 0)] + [InlineData("NotificationType|", AzureNotificationType.Unknown, null, false, null, 0, 0)] + [InlineData("NotificationType|NodeMaintenanceStarting1", AzureNotificationType.Unknown, null, false, null, 0, 0)] + [InlineData("1|2|3", AzureNotificationType.Unknown, null, false, null, 0, 0)] + [InlineData("StartTimeInUTC|", AzureNotificationType.Unknown, null, false, null, 0, 0)] + [InlineData("IsReplica|", AzureNotificationType.Unknown, null, false, null, 0, 0)] + [InlineData("SSLPort|", AzureNotificationType.Unknown, null, false, null, 0, 0)] + [InlineData("NonSSLPort |", AzureNotificationType.Unknown, null, false, null, 0, 0)] + [InlineData("StartTimeInUTC|thisisthestart", AzureNotificationType.Unknown, null, false, null, 0, 0)] + [InlineData(null, AzureNotificationType.Unknown, null, false, null, 0, 0)] + public void TestAzureMaintenanceEventStrings(string? message, AzureNotificationType expectedEventType, string? expectedStart, bool expectedIsReplica, string? expectedIP, int expectedSSLPort, int expectedNonSSLPort) + { + DateTime? expectedStartTimeUtc = null; + if (expectedStart != null && DateTime.TryParseExact(expectedStart, "s", CultureInfo.InvariantCulture, DateTimeStyles.AssumeUniversal, out DateTime startTimeUtc)) + { + expectedStartTimeUtc = DateTime.SpecifyKind(startTimeUtc, DateTimeKind.Utc); + } + _ = IPAddress.TryParse(expectedIP, out IPAddress? expectedIPAddress); + + var azureMaintenance = new AzureMaintenanceEvent(message); + + Assert.Equal(expectedEventType, azureMaintenance.NotificationType); + Assert.Equal(expectedStartTimeUtc, azureMaintenance.StartTimeUtc); + Assert.Equal(expectedIsReplica, azureMaintenance.IsReplica); + Assert.Equal(expectedIPAddress, azureMaintenance.IPAddress); + Assert.Equal(expectedSSLPort, azureMaintenance.SslPort); + Assert.Equal(expectedNonSSLPort, azureMaintenance.NonSslPort); + } +} diff --git a/tests/StackExchange.Redis.Tests/BacklogTests.cs b/tests/StackExchange.Redis.Tests/BacklogTests.cs new file mode 100644 index 000000000..e8ed1daf0 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/BacklogTests.cs @@ -0,0 +1,473 @@ +using System; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class BacklogTests(ITestOutputHelper output) : TestBase(output) +{ + protected override string GetConfiguration() => TestConfig.Current.PrimaryServerAndPort + "," + TestConfig.Current.ReplicaServerAndPort; + + [Fact] + public async Task FailFast() + { + void PrintSnapshot(ConnectionMultiplexer muxer) + { + Log("Snapshot summary:"); + foreach (var server in muxer.GetServerSnapshot()) + { + Log($" {server.EndPoint}: "); + Log($" Type: {server.ServerType}"); + Log($" IsConnected: {server.IsConnected}"); + Log($" IsConnecting: {server.IsConnecting}"); + Log($" IsSelectable(allowDisconnected: true): {server.IsSelectable(RedisCommand.PING, true)}"); + Log($" IsSelectable(allowDisconnected: false): {server.IsSelectable(RedisCommand.PING, false)}"); + Log($" UnselectableFlags: {server.GetUnselectableFlags()}"); + var bridge = server.GetBridge(RedisCommand.PING, create: false); + Log($" GetBridge: {bridge}"); + Log($" IsConnected: {bridge?.IsConnected}"); + Log($" ConnectionState: {bridge?.ConnectionState}"); + } + } + + try + { + // Ensuring the FailFast policy errors immediate with no connection available exceptions + var options = new ConfigurationOptions() + { + BacklogPolicy = BacklogPolicy.FailFast, + AbortOnConnectFail = false, + ConnectTimeout = 1000, + ConnectRetry = 2, + SyncTimeout = 10000, + KeepAlive = 10000, + AsyncTimeout = 5000, + AllowAdmin = true, + }; + options.EndPoints.Add(TestConfig.Current.PrimaryServerAndPort); + + await using var conn = await ConnectionMultiplexer.ConnectAsync(options, Writer); + + var db = conn.GetDatabase(); + Log("Test: Initial (connected) ping"); + await db.PingAsync(); + + var server = conn.GetServerSnapshot()[0]; + var stats = server.GetBridgeStatus(ConnectionType.Interactive); + Assert.Equal(0, stats.BacklogMessagesPending); // Everything's normal + + // Fail the connection + Log("Test: Simulating failure"); + conn.AllowConnect = false; + server.SimulateConnectionFailure(SimulatedFailureType.All); + Assert.False(conn.IsConnected); + + // Queue up some commands + Log("Test: Disconnected pings"); + await Assert.ThrowsAsync(() => db.PingAsync()); + + var disconnectedStats = server.GetBridgeStatus(ConnectionType.Interactive); + Assert.False(conn.IsConnected); + Assert.Equal(0, disconnectedStats.BacklogMessagesPending); + + Log("Test: Allowing reconnect"); + conn.AllowConnect = true; + Log("Test: Awaiting reconnect"); + await UntilConditionAsync(TimeSpan.FromSeconds(3), () => conn.IsConnected).ForAwait(); + + Log("Test: Reconnecting"); + Assert.True(conn.IsConnected); + Assert.True(server.IsConnected); + var reconnectedStats = server.GetBridgeStatus(ConnectionType.Interactive); + Assert.Equal(0, reconnectedStats.BacklogMessagesPending); + + _ = db.PingAsync(); + _ = db.PingAsync(); + var lastPing = db.PingAsync(); + + // For debug, print out the snapshot and server states + PrintSnapshot(conn); + + Assert.NotNull(conn.SelectServer(Message.Create(-1, CommandFlags.None, RedisCommand.PING))); + + // We should see none queued + Assert.Equal(0, stats.BacklogMessagesPending); + await lastPing; + } + finally + { + ClearAmbientFailures(); + } + } + + [Fact] + public async Task QueuesAndFlushesAfterReconnectingAsync() + { + try + { + var options = new ConfigurationOptions() + { + BacklogPolicy = BacklogPolicy.Default, + AbortOnConnectFail = false, + ConnectTimeout = 1000, + ConnectRetry = 2, + SyncTimeout = 10000, + KeepAlive = 10000, + AsyncTimeout = 5000, + AllowAdmin = true, + SocketManager = SocketManager.ThreadPool, + }; + options.EndPoints.Add(TestConfig.Current.PrimaryServerAndPort); + + await using var conn = await ConnectionMultiplexer.ConnectAsync(options, Writer); + conn.ErrorMessage += (s, e) => Log($"Error Message {e.EndPoint}: {e.Message}"); + conn.InternalError += (s, e) => Log($"Internal Error {e.EndPoint}: {e.Exception.Message}"); + conn.ConnectionFailed += (s, a) => Log("Disconnected: " + EndPointCollection.ToString(a.EndPoint)); + conn.ConnectionRestored += (s, a) => Log("Reconnected: " + EndPointCollection.ToString(a.EndPoint)); + + var db = conn.GetDatabase(); + Log("Test: Initial (connected) ping"); + await db.PingAsync(); + + var server = conn.GetServerSnapshot()[0]; + var stats = server.GetBridgeStatus(ConnectionType.Interactive); + Assert.Equal(0, stats.BacklogMessagesPending); // Everything's normal + + // Fail the connection + Log("Test: Simulating failure"); + conn.AllowConnect = false; + server.SimulateConnectionFailure(SimulatedFailureType.All); + Assert.False(conn.IsConnected); + + // Queue up some commands + Log("Test: Disconnected pings"); + var ignoredA = db.PingAsync(); + var ignoredB = db.PingAsync(); + var lastPing = db.PingAsync(); + + // TODO: Add specific server call + var disconnectedStats = server.GetBridgeStatus(ConnectionType.Interactive); + Assert.False(conn.IsConnected); + Assert.True(disconnectedStats.BacklogMessagesPending >= 3, $"Expected {nameof(disconnectedStats.BacklogMessagesPending)} > 3, got {disconnectedStats.BacklogMessagesPending}"); + + Log("Test: Allowing reconnect"); + conn.AllowConnect = true; + Log("Test: Awaiting reconnect"); + await UntilConditionAsync(TimeSpan.FromSeconds(3), () => conn.IsConnected).ForAwait(); + + Log("Test: Checking reconnected 1"); + Assert.True(conn.IsConnected); + + Log("Test: ignoredA Status: " + ignoredA.Status); + Log("Test: ignoredB Status: " + ignoredB.Status); + Log("Test: lastPing Status: " + lastPing.Status); + var afterConnectedStats = server.GetBridgeStatus(ConnectionType.Interactive); + Log($"Test: BacklogStatus: {afterConnectedStats.BacklogStatus}, BacklogMessagesPending: {afterConnectedStats.BacklogMessagesPending}, IsWriterActive: {afterConnectedStats.IsWriterActive}, MessagesSinceLastHeartbeat: {afterConnectedStats.MessagesSinceLastHeartbeat}, TotalBacklogMessagesQueued: {afterConnectedStats.TotalBacklogMessagesQueued}"); + + Log("Test: Awaiting lastPing 1"); + await lastPing; + + Log("Test: Checking reconnected 2"); + Assert.True(conn.IsConnected); + var reconnectedStats = server.GetBridgeStatus(ConnectionType.Interactive); + Assert.Equal(0, reconnectedStats.BacklogMessagesPending); + + Log("Test: Pinging again..."); + _ = db.PingAsync(); + _ = db.PingAsync(); + Log("Test: Last Ping issued"); + lastPing = db.PingAsync(); + + // We should see none queued + Log("Test: BacklogMessagesPending check"); + Assert.Equal(0, stats.BacklogMessagesPending); + Log("Test: Awaiting lastPing 2"); + await lastPing; + Log("Test: Done"); + } + finally + { + ClearAmbientFailures(); + } + } + + [Fact] + public async Task QueuesAndFlushesAfterReconnecting() + { + try + { + var options = new ConfigurationOptions() + { + BacklogPolicy = BacklogPolicy.Default, + AbortOnConnectFail = false, + ConnectTimeout = 1000, + ConnectRetry = 2, + SyncTimeout = 10000, + KeepAlive = 10000, + AsyncTimeout = 5000, + AllowAdmin = true, + SocketManager = SocketManager.ThreadPool, + }; + options.EndPoints.Add(TestConfig.Current.PrimaryServerAndPort); + + await using var conn = await ConnectionMultiplexer.ConnectAsync(options, Writer); + conn.ErrorMessage += (s, e) => Log($"Error Message {e.EndPoint}: {e.Message}"); + conn.InternalError += (s, e) => Log($"Internal Error {e.EndPoint}: {e.Exception.Message}"); + conn.ConnectionFailed += (s, a) => Log("Disconnected: " + EndPointCollection.ToString(a.EndPoint)); + conn.ConnectionRestored += (s, a) => Log("Reconnected: " + EndPointCollection.ToString(a.EndPoint)); + + var db = conn.GetDatabase(); + Log("Test: Initial (connected) ping"); + await db.PingAsync(); + + var server = conn.GetServerSnapshot()[0]; + var stats = server.GetBridgeStatus(ConnectionType.Interactive); + Assert.Equal(0, stats.BacklogMessagesPending); // Everything's normal + + // Fail the connection + Log("Test: Simulating failure"); + conn.AllowConnect = false; + server.SimulateConnectionFailure(SimulatedFailureType.All); + Assert.False(conn.IsConnected); + + // Queue up some commands + Log("Test: Disconnected pings"); + + Task[] pings = + [ + RunBlockingSynchronousWithExtraThreadAsync(() => DisconnectedPings(1)), + RunBlockingSynchronousWithExtraThreadAsync(() => DisconnectedPings(2)), + RunBlockingSynchronousWithExtraThreadAsync(() => DisconnectedPings(3)), + ]; + void DisconnectedPings(int id) + { + // No need to delay, we're going to try a disconnected connection immediately so it'll fail... + Log($"Pinging (disconnected - {id})"); + var result = db.Ping(); + Log($"Pinging (disconnected - {id}) - result: " + result); + } + Log("Test: Disconnected pings issued"); + + Assert.False(conn.IsConnected); + // Give the tasks time to queue + await UntilConditionAsync(TimeSpan.FromSeconds(5), () => server.GetBridgeStatus(ConnectionType.Interactive).BacklogMessagesPending >= 3); + + var disconnectedStats = server.GetBridgeStatus(ConnectionType.Interactive); + Log($"Test Stats: (BacklogMessagesPending: {disconnectedStats.BacklogMessagesPending}, TotalBacklogMessagesQueued: {disconnectedStats.TotalBacklogMessagesQueued})"); + Assert.True(disconnectedStats.BacklogMessagesPending >= 3, $"Expected {nameof(disconnectedStats.BacklogMessagesPending)} > 3, got {disconnectedStats.BacklogMessagesPending}"); + + Log("Test: Allowing reconnect"); + conn.AllowConnect = true; + Log("Test: Awaiting reconnect"); + await UntilConditionAsync(TimeSpan.FromSeconds(3), () => conn.IsConnected).ForAwait(); + + Log("Test: Checking reconnected 1"); + Assert.True(conn.IsConnected); + + var afterConnectedStats = server.GetBridgeStatus(ConnectionType.Interactive); + Log($"Test: BacklogStatus: {afterConnectedStats.BacklogStatus}, BacklogMessagesPending: {afterConnectedStats.BacklogMessagesPending}, IsWriterActive: {afterConnectedStats.IsWriterActive}, MessagesSinceLastHeartbeat: {afterConnectedStats.MessagesSinceLastHeartbeat}, TotalBacklogMessagesQueued: {afterConnectedStats.TotalBacklogMessagesQueued}"); + + Log("Test: Awaiting 3 pings"); + await Task.WhenAll(pings); + + Log("Test: Checking reconnected 2"); + Assert.True(conn.IsConnected); + var reconnectedStats = server.GetBridgeStatus(ConnectionType.Interactive); + Assert.Equal(0, reconnectedStats.BacklogMessagesPending); + + Log("Test: Pinging again..."); + pings[0] = RunBlockingSynchronousWithExtraThreadAsync(() => DisconnectedPings(4)); + pings[1] = RunBlockingSynchronousWithExtraThreadAsync(() => DisconnectedPings(5)); + pings[2] = RunBlockingSynchronousWithExtraThreadAsync(() => DisconnectedPings(6)); + Log("Test: Last Ping queued"); + + // We should see none queued + Log("Test: BacklogMessagesPending check"); + Assert.Equal(0, stats.BacklogMessagesPending); + Log("Test: Awaiting 3 more pings"); + await Task.WhenAll(pings); + Log("Test: Done"); + } + finally + { + ClearAmbientFailures(); + } + } + + [Fact] + public async Task QueuesAndFlushesAfterReconnectingClusterAsync() + { + try + { + var options = ConfigurationOptions.Parse(TestConfig.Current.ClusterServersAndPorts); + options.BacklogPolicy = BacklogPolicy.Default; + options.AbortOnConnectFail = false; + options.ConnectTimeout = 1000; + options.ConnectRetry = 2; + options.SyncTimeout = 10000; + options.KeepAlive = 10000; + options.AsyncTimeout = 5000; + options.AllowAdmin = true; + options.SocketManager = SocketManager.ThreadPool; + + await using var conn = await ConnectionMultiplexer.ConnectAsync(options, Writer); + conn.ErrorMessage += (s, e) => Log($"Error Message {e.EndPoint}: {e.Message}"); + conn.InternalError += (s, e) => Log($"Internal Error {e.EndPoint}: {e.Exception.Message}"); + conn.ConnectionFailed += (s, a) => Log("Disconnected: " + EndPointCollection.ToString(a.EndPoint)); + conn.ConnectionRestored += (s, a) => Log("Reconnected: " + EndPointCollection.ToString(a.EndPoint)); + + var db = conn.GetDatabase(); + Log("Test: Initial (connected) ping"); + await db.PingAsync(); + + RedisKey meKey = Me(); + var getMsg = Message.Create(0, CommandFlags.None, RedisCommand.GET, meKey); + + ServerEndPoint? server = null; // Get the server specifically for this message's hash slot + await UntilConditionAsync(TimeSpan.FromSeconds(10), () => (server = conn.SelectServer(getMsg)) != null); + + Assert.NotNull(server); + var stats = server.GetBridgeStatus(ConnectionType.Interactive); + Assert.Equal(0, stats.BacklogMessagesPending); // Everything's normal + + static Task PingAsync(ServerEndPoint server, CommandFlags flags = CommandFlags.None) + { + var message = ResultProcessor.TimingProcessor.CreateMessage(-1, flags, RedisCommand.PING); + + server.Multiplexer.CheckMessage(message); + return server.Multiplexer.ExecuteAsyncImpl(message, ResultProcessor.ResponseTimer, null, server); + } + + // Fail the connection + Log("Test: Simulating failure"); + conn.AllowConnect = false; + server.SimulateConnectionFailure(SimulatedFailureType.All); + Assert.False(server.IsConnected); // Server isn't connected + Assert.True(conn.IsConnected); // ...but the multiplexer is + + // Queue up some commands + Log("Test: Disconnected pings"); + var ignoredA = PingAsync(server); + var ignoredB = PingAsync(server); + var lastPing = PingAsync(server); + + var disconnectedStats = server.GetBridgeStatus(ConnectionType.Interactive); + Assert.False(server.IsConnected); + Assert.True(conn.IsConnected); + Assert.True(disconnectedStats.BacklogMessagesPending >= 3, $"Expected {nameof(disconnectedStats.BacklogMessagesPending)} > 3, got {disconnectedStats.BacklogMessagesPending}"); + + Log("Test: Allowing reconnect"); + conn.AllowConnect = true; + Log("Test: Awaiting reconnect"); + await UntilConditionAsync(TimeSpan.FromSeconds(3), () => server.IsConnected).ForAwait(); + + Log("Test: Checking reconnected 1"); + Assert.True(server.IsConnected); + Assert.True(conn.IsConnected); + + Log("Test: ignoredA Status: " + ignoredA.Status); + Log("Test: ignoredB Status: " + ignoredB.Status); + Log("Test: lastPing Status: " + lastPing.Status); + var afterConnectedStats = server.GetBridgeStatus(ConnectionType.Interactive); + Log($"Test: BacklogStatus: {afterConnectedStats.BacklogStatus}, BacklogMessagesPending: {afterConnectedStats.BacklogMessagesPending}, IsWriterActive: {afterConnectedStats.IsWriterActive}, MessagesSinceLastHeartbeat: {afterConnectedStats.MessagesSinceLastHeartbeat}, TotalBacklogMessagesQueued: {afterConnectedStats.TotalBacklogMessagesQueued}"); + + Log("Test: Awaiting lastPing 1"); + await lastPing; + + Log("Test: Checking reconnected 2"); + Assert.True(server.IsConnected); + Assert.True(conn.IsConnected); + var reconnectedStats = server.GetBridgeStatus(ConnectionType.Interactive); + Assert.Equal(0, reconnectedStats.BacklogMessagesPending); + + Log("Test: Pinging again..."); + _ = PingAsync(server); + _ = PingAsync(server); + Log("Test: Last Ping issued"); + lastPing = PingAsync(server); + + // We should see none queued + Log("Test: BacklogMessagesPending check"); + Assert.Equal(0, stats.BacklogMessagesPending); + Log("Test: Awaiting lastPing 2"); + await lastPing; + Log("Test: Done"); + } + finally + { + ClearAmbientFailures(); + } + } + + [Fact] + public async Task TotalOutstandingIncludesBacklogQueue() + { + try + { + var options = new ConfigurationOptions() + { + BacklogPolicy = BacklogPolicy.Default, + AbortOnConnectFail = false, + ConnectTimeout = 1000, + ConnectRetry = 2, + SyncTimeout = 10000, + KeepAlive = 10000, + AsyncTimeout = 5000, + AllowAdmin = true, + SocketManager = SocketManager.ThreadPool, + }; + options.EndPoints.Add(TestConfig.Current.PrimaryServerAndPort); + + using var conn = await ConnectionMultiplexer.ConnectAsync(options, Writer); + var db = conn.GetDatabase(); + Log("Test: Initial (connected) ping"); + await db.PingAsync(); + + var server = conn.GetServerSnapshot()[0]; + + // Verify TotalOutstanding is 0 when connected and idle + Log("Test: asserting connected counters"); + var connectedServerCounters = server.GetCounters(); + var connectedConnCounters = conn.GetCounters(); + Assert.Equal(0, connectedServerCounters.Interactive.TotalOutstanding); + Assert.Equal(0, connectedConnCounters.TotalOutstanding); + + Log("Test: Simulating failure"); + conn.AllowConnect = false; + server.SimulateConnectionFailure(SimulatedFailureType.All); + + // Queue up some commands + Log("Test: Disconnected pings"); + _ = db.PingAsync(); + _ = db.PingAsync(); + var lastPing = db.PingAsync(); + + Log("Test: asserting disconnected counters"); + var disconnectedServerCounters = server.GetCounters(); + var disconnectedConnCounters = conn.GetCounters(); + Assert.True(disconnectedServerCounters.Interactive.PendingUnsentItems >= 3, $"Expected PendingUnsentItems >= 3, got {disconnectedServerCounters.Interactive.PendingUnsentItems}"); + Assert.True(disconnectedConnCounters.TotalOutstanding >= 3, $"Expected TotalOutstanding >= 3, got {disconnectedServerCounters.Interactive.TotalOutstanding}"); + + Log("Test: Awaiting reconnect"); + conn.AllowConnect = true; + await UntilConditionAsync(TimeSpan.FromSeconds(3), () => conn.IsConnected).ForAwait(); + + Log("Test: Awaiting lastPing"); + await lastPing; + + Log("Test: Checking reconnected"); + Assert.True(conn.IsConnected); + + Log("Test: asserting reconnected counters"); + var reconnectedServerCounters = server.GetCounters(); + var reconnectedConnCounters = conn.GetCounters(); + Assert.Equal(0, reconnectedServerCounters.Interactive.PendingUnsentItems); + Assert.Equal(0, reconnectedConnCounters.TotalOutstanding); + Log("Test: Done"); + } + finally + { + ClearAmbientFailures(); + } + } +} diff --git a/tests/StackExchange.Redis.Tests/BasicOpTests.cs b/tests/StackExchange.Redis.Tests/BasicOpTests.cs new file mode 100644 index 000000000..2b045823d --- /dev/null +++ b/tests/StackExchange.Redis.Tests/BasicOpTests.cs @@ -0,0 +1,510 @@ +using System; +using System.Diagnostics; +using System.Threading.Tasks; +using StackExchange.Redis.KeyspaceIsolation; +using Xunit; + +namespace StackExchange.Redis.Tests; + +[RunPerProtocol] +public class BasicOpsTests(ITestOutputHelper output, SharedConnectionFixture fixture) + : BasicOpsTestsBase(output, fixture, null) +{ +} + +/* +[RunPerProtocol] +public class InProcBasicOpsTests(ITestOutputHelper output, InProcServerFixture fixture) + : BasicOpsTestsBase(output, null, fixture) +{ + protected override bool UseDedicatedInProcessServer => true; +} +*/ + +[RunPerProtocol] +public abstract class BasicOpsTestsBase(ITestOutputHelper output, SharedConnectionFixture? connection, InProcServerFixture? server) + : TestBase(output, connection, server) +{ + [Fact] + public async Task PingOnce() + { + await using var conn = ConnectFactory(); + var db = conn.GetDatabase(); + + var duration = await db.PingAsync().ForAwait(); + Log("Ping took: " + duration); + Assert.True(duration.TotalMilliseconds > 0); + } + + [Fact] + public async Task RapidDispose() + { + SkipIfWouldUseRealServer("This needs some CI love, it's not a scenario we care about too much but noisy atm."); + await using var primary = ConnectFactory(); + var db = primary.GetDatabase(); + RedisKey key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + for (int i = 0; i < 10; i++) + { + await using var secondary = primary.CreateClient(); + secondary.GetDatabase().StringIncrement(key, flags: CommandFlags.FireAndForget); + } + // Give it a moment to get through the pipe...they were fire and forget + await UntilConditionAsync(TimeSpan.FromSeconds(5), () => 10 == (int)db.StringGet(key)); + Assert.Equal(10, (int)db.StringGet(key)); + } + + [Fact] + public async Task PingMany() + { + await using var conn = ConnectFactory(); + var db = conn.GetDatabase(); + var tasks = new Task[100]; + for (int i = 0; i < tasks.Length; i++) + { + tasks[i] = db.PingAsync(); + } + await Task.WhenAll(tasks).ForAwait(); + Assert.True(tasks[0].Result.TotalMilliseconds > 0); + Assert.True(tasks[tasks.Length - 1].Result.TotalMilliseconds > 0); + } + + [Fact] + public async Task GetWithNullKey() + { + await using var conn = ConnectFactory(); + var db = conn.GetDatabase(); + const string? key = null; + var ex = Assert.Throws(() => db.StringGet(key)); + Assert.Equal("A null key is not valid in this context", ex.Message); + } + + [Fact] + public async Task SetWithNullKey() + { + await using var conn = ConnectFactory(); + var db = conn.GetDatabase(); + const string? key = null, value = "abc"; + var ex = Assert.Throws(() => db.StringSet(key!, value)); + Assert.Equal("A null key is not valid in this context", ex.Message); + } + + [Fact] + public async Task SetWithNullValue() + { + await using var conn = ConnectFactory(); + var db = conn.GetDatabase(); + string key = Me(); + const string? value = null; + db.KeyDelete(key, CommandFlags.FireAndForget); + + db.StringSet(key, "abc", flags: CommandFlags.FireAndForget); + Assert.True(db.KeyExists(key)); + db.StringSet(key, value, flags: CommandFlags.FireAndForget); + + var actual = (string?)db.StringGet(key); + Assert.Null(actual); + Assert.False(db.KeyExists(key)); + } + + [Fact] + public async Task SetWithDefaultValue() + { + await using var conn = ConnectFactory(); + var db = conn.GetDatabase(); + string key = Me(); + var value = default(RedisValue); // this is kinda 0... ish + db.KeyDelete(key, CommandFlags.FireAndForget); + + db.StringSet(key, "abc", flags: CommandFlags.FireAndForget); + Assert.True(db.KeyExists(key)); + db.StringSet(key, value, flags: CommandFlags.FireAndForget); + + var actual = (string?)db.StringGet(key); + Assert.Null(actual); + Assert.False(db.KeyExists(key)); + } + + [Fact] + public async Task SetWithZeroValue() + { + await using var conn = ConnectFactory(); + var db = conn.GetDatabase(); + string key = Me(); + const long value = 0; + db.KeyDelete(key, CommandFlags.FireAndForget); + + db.StringSet(key, "abc", flags: CommandFlags.FireAndForget); + Assert.True(db.KeyExists(key)); + db.StringSet(key, value, flags: CommandFlags.FireAndForget); + + var actual = (string?)db.StringGet(key); + Assert.Equal("0", actual); + Assert.True(db.KeyExists(key)); + } + + [Fact] + public async Task GetSetAsync() + { + await using var conn = ConnectFactory(); + var db = conn.GetDatabase(); + + RedisKey key = Me(); + var d0 = db.KeyDeleteAsync(key); + var d1 = db.KeyDeleteAsync(key); + var g1 = db.StringGetAsync(key); + var s1 = db.StringSetAsync(key, "123"); + var g2 = db.StringGetAsync(key); + var d2 = db.KeyDeleteAsync(key); + + await d0; + Assert.False(await d1); + Assert.Null((string?)(await g1)); + Assert.True((await g1).IsNull); + await s1; + Assert.Equal("123", await g2); + Assert.Equal(123, (int)(await g2)); + Assert.False((await g2).IsNull); + Assert.True(await d2); + } + + [Fact] + public async Task GetSetSync() + { + await using var conn = ConnectFactory(); + var db = conn.GetDatabase(); + + RedisKey key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + var d1 = db.KeyDelete(key); + var g1 = db.StringGet(key); + db.StringSet(key, "123", flags: CommandFlags.FireAndForget); + var g2 = db.StringGet(key); + var d2 = db.KeyDelete(key); + + Assert.False(d1); + Assert.Null((string?)g1); + Assert.True(g1.IsNull); + + Assert.Equal("123", g2); + Assert.Equal(123, (int)g2); + Assert.False(g2.IsNull); + Assert.True(d2); + } + + [Theory] + [InlineData(false, false)] + [InlineData(true, true)] + [InlineData(true, false)] + public async Task GetWithExpiry(bool exists, bool hasExpiry) + { + await using var conn = ConnectFactory(); + var db = conn.GetDatabase(); + RedisKey key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + if (exists) + { + if (hasExpiry) + db.StringSet(key, "val", TimeSpan.FromMinutes(5), flags: CommandFlags.FireAndForget); + else + db.StringSet(key, "val", flags: CommandFlags.FireAndForget); + } + var async = db.StringGetWithExpiryAsync(key); + var syncResult = db.StringGetWithExpiry(key); + var asyncResult = await async; + + if (exists) + { + Assert.Equal("val", asyncResult.Value); + Assert.Equal(hasExpiry, asyncResult.Expiry.HasValue); + if (hasExpiry) Assert.True(asyncResult.Expiry!.Value.TotalMinutes >= 4.9 && asyncResult.Expiry.Value.TotalMinutes <= 5); + Assert.Equal("val", syncResult.Value); + Assert.Equal(hasExpiry, syncResult.Expiry.HasValue); + if (hasExpiry) Assert.True(syncResult.Expiry!.Value.TotalMinutes >= 4.9 && syncResult.Expiry.Value.TotalMinutes <= 5); + } + else + { + Assert.True(asyncResult.Value.IsNull); + Assert.False(asyncResult.Expiry.HasValue); + Assert.True(syncResult.Value.IsNull); + Assert.False(syncResult.Expiry.HasValue); + } + } + + [Fact] + public async Task GetWithExpiryWrongTypeAsync() + { + await using var conn = ConnectFactory(); + var db = conn.GetDatabase(); + RedisKey key = Me(); + _ = db.KeyDeleteAsync(key); + _ = db.SetAddAsync(key, "abc"); + var ex = await Assert.ThrowsAsync(async () => + { + try + { + Log("Key: " + (string?)key); + await db.StringGetWithExpiryAsync(key).ForAwait(); + } + catch (AggregateException e) + { + throw e.InnerExceptions[0]; + } + }).ForAwait(); + Assert.Equal("WRONGTYPE Operation against a key holding the wrong kind of value", ex.Message); + } + + [Fact] + public async Task GetWithExpiryWrongTypeSync() + { + await using var conn = ConnectFactory(); + var db = conn.GetDatabase(); + RedisKey key = Me(); + var ex = await Assert.ThrowsAsync(async () => + { + db.KeyDelete(key, CommandFlags.FireAndForget); + db.SetAdd(key, "abc", CommandFlags.FireAndForget); + db.StringGetWithExpiry(key); + }); + Assert.Equal("WRONGTYPE Operation against a key holding the wrong kind of value", ex.Message); + } + +#if DEBUG + [Fact] + public async Task TestSevered() + { + await using var conn = ConnectFactory(allowAdmin: true, shared: false); + var db = conn.GetDatabase(); + string key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.StringSet(key, key, flags: CommandFlags.FireAndForget); + var server = GetServer(conn.DefaultClient); + Assert.SkipUnless(server.CanSimulateConnectionFailure(), "Skipping because server cannot simulate connection failure"); + + SetExpectedAmbientFailureCount(2); + server.SimulateConnectionFailure(SimulatedFailureType.All); + var watch = Stopwatch.StartNew(); + await UntilConditionAsync(TimeSpan.FromSeconds(10), () => server.IsConnected); + watch.Stop(); + Log("Time to re-establish: {0}ms (any order)", watch.ElapsedMilliseconds); + await UntilConditionAsync(TimeSpan.FromSeconds(10), () => key == db.StringGet(key)); + Debug.WriteLine("Pinging..."); + Assert.Equal(key, db.StringGet(key)); + } +#endif + + [Fact] + public async Task IncrAsync() + { + await using var conn = ConnectFactory(); + var db = conn.GetDatabase(); + RedisKey key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + var nix = db.KeyExistsAsync(key).ForAwait(); + var a = db.StringGetAsync(key).ForAwait(); + var b = db.StringIncrementAsync(key).ForAwait(); + var c = db.StringGetAsync(key).ForAwait(); + var d = db.StringIncrementAsync(key, 10).ForAwait(); + var e = db.StringGetAsync(key).ForAwait(); + var f = db.StringDecrementAsync(key, 11).ForAwait(); + var g = db.StringGetAsync(key).ForAwait(); + var h = db.KeyExistsAsync(key).ForAwait(); + Assert.False(await nix); + Assert.True((await a).IsNull); + Assert.Equal(0, (long)(await a)); + Assert.Equal(1, await b); + Assert.Equal(1, (long)(await c)); + Assert.Equal(11, await d); + Assert.Equal(11, (long)(await e)); + Assert.Equal(0, await f); + Assert.Equal(0, (long)(await g)); + Assert.True(await h); + } + + [Fact] + public async Task IncrSync() + { + await using var conn = ConnectFactory(); + var db = conn.GetDatabase(); + RedisKey key = Me(); + Log(key); + db.KeyDelete(key, CommandFlags.FireAndForget); + var nix = db.KeyExists(key); + var a = db.StringGet(key); + var b = db.StringIncrement(key); + var c = db.StringGet(key); + var d = db.StringIncrement(key, 10); + var e = db.StringGet(key); + var f = db.StringDecrement(key, 11); + var g = db.StringGet(key); + var h = db.KeyExists(key); + Assert.False(nix); + Assert.True(a.IsNull); + Assert.Equal(0, (long)a); + Assert.Equal(1, b); + Assert.Equal(1, (long)c); + Assert.Equal(11, d); + Assert.Equal(11, (long)e); + Assert.Equal(0, f); + Assert.Equal(0, (long)g); + Assert.True(h); + } + + [Fact] + public async Task IncrDifferentSizes() + { + await using var conn = ConnectFactory(); + var db = conn.GetDatabase(); + RedisKey key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + int expected = 0; + Incr(db, key, -129019, ref expected); + Incr(db, key, -10023, ref expected); + Incr(db, key, -9933, ref expected); + Incr(db, key, -23, ref expected); + Incr(db, key, -7, ref expected); + Incr(db, key, -1, ref expected); + Incr(db, key, 0, ref expected); + Incr(db, key, 1, ref expected); + Incr(db, key, 9, ref expected); + Incr(db, key, 11, ref expected); + Incr(db, key, 345, ref expected); + Incr(db, key, 4982, ref expected); + Incr(db, key, 13091, ref expected); + Incr(db, key, 324092, ref expected); + Assert.NotEqual(0, expected); + var sum = (long)db.StringGet(key); + Assert.Equal(expected, sum); + } + + private static void Incr(IDatabase database, RedisKey key, int delta, ref int total) + { + database.StringIncrement(key, delta, CommandFlags.FireAndForget); + total += delta; + } + + [Fact] + public async Task Delete() + { + await using var conn = ConnectFactory(); + var db = conn.GetDatabase(); + var key = Me(); + _ = db.StringSetAsync(key, "Heyyyyy"); + var ke1 = db.KeyExistsAsync(key).ForAwait(); + var ku1 = db.KeyDelete(key); + var ke2 = db.KeyExistsAsync(key).ForAwait(); + Assert.True(await ke1); + Assert.True(ku1); + Assert.False(await ke2); + } + + [Fact] + public async Task DeleteAsync() + { + await using var conn = ConnectFactory(); + var db = conn.GetDatabase(); + var key = Me(); + _ = db.StringSetAsync(key, "Heyyyyy"); + var ke1 = db.KeyExistsAsync(key).ForAwait(); + var ku1 = db.KeyDeleteAsync(key).ForAwait(); + var ke2 = db.KeyExistsAsync(key).ForAwait(); + Assert.True(await ke1); + Assert.True(await ku1); + Assert.False(await ke2); + } + + [Fact] + public async Task DeleteMany() + { + await using var conn = ConnectFactory(); + var db = conn.GetDatabase(); + var key1 = Me(); + var key2 = Me() + "2"; + var key3 = Me() + "3"; + _ = db.StringSetAsync(key1, "Heyyyyy"); + _ = db.StringSetAsync(key2, "Heyyyyy"); + // key 3 not set + var ku1 = db.KeyDelete([key1, key2, key3]); + var ke1 = db.KeyExistsAsync(key1).ForAwait(); + var ke2 = db.KeyExistsAsync(key2).ForAwait(); + Assert.Equal(2, ku1); + Assert.False(await ke1); + Assert.False(await ke2); + } + + [Fact] + public async Task DeleteManyAsync() + { + await using var conn = ConnectFactory(); + var db = conn.GetDatabase(); + var key1 = Me(); + var key2 = Me() + "2"; + var key3 = Me() + "3"; + _ = db.StringSetAsync(key1, "Heyyyyy"); + _ = db.StringSetAsync(key2, "Heyyyyy"); + // key 3 not set + var ku1 = db.KeyDeleteAsync([key1, key2, key3]).ForAwait(); + var ke1 = db.KeyExistsAsync(key1).ForAwait(); + var ke2 = db.KeyExistsAsync(key2).ForAwait(); + Assert.Equal(2, await ku1); + Assert.False(await ke1); + Assert.False(await ke2); + } + + [Fact] + public async Task WrappedDatabasePrefixIntegration() + { + var key = Me(); + await using var conn = ConnectFactory(); + var db = conn.GetDatabase().WithKeyPrefix("abc"); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.StringIncrement(key, flags: CommandFlags.FireAndForget); + db.StringIncrement(key, flags: CommandFlags.FireAndForget); + db.StringIncrement(key, flags: CommandFlags.FireAndForget); + + int count = (int)conn.GetDatabase().StringGet("abc" + key); + Assert.Equal(3, count); + } + + [Fact] + public async Task TransactionSync() + { + await using var conn = ConnectFactory(); + Assert.SkipUnless(conn.DefaultClient.RawConfig.CommandMap.IsAvailable(RedisCommand.MULTI), "MULTI is not available"); + var db = conn.GetDatabase(); + + RedisKey key = Me(); + + var tran = db.CreateTransaction(); + _ = db.KeyDeleteAsync(key); + var x = tran.StringIncrementAsync(Me()); + var y = tran.StringIncrementAsync(Me()); + var z = tran.StringIncrementAsync(Me()); + Assert.True(tran.Execute()); + Assert.Equal(1, x.Result); + Assert.Equal(2, y.Result); + Assert.Equal(3, z.Result); + } + + [Fact] + public async Task TransactionAsync() + { + await using var conn = ConnectFactory(); + Assert.SkipUnless(conn.DefaultClient.RawConfig.CommandMap.IsAvailable(RedisCommand.MULTI), "MULTI is not available"); + + var db = conn.GetDatabase(); + + RedisKey key = Me(); + + var tran = db.CreateTransaction(); + _ = db.KeyDeleteAsync(key); + var x = tran.StringIncrementAsync(Me()); + var y = tran.StringIncrementAsync(Me()); + var z = tran.StringIncrementAsync(Me()); + Assert.True(await tran.ExecuteAsync()); + Assert.Equal(1, await x); + Assert.Equal(2, await y); + Assert.Equal(3, await z); + } +} diff --git a/tests/StackExchange.Redis.Tests/BasicOps.cs b/tests/StackExchange.Redis.Tests/BasicOps.cs deleted file mode 100644 index 6a5d352d8..000000000 --- a/tests/StackExchange.Redis.Tests/BasicOps.cs +++ /dev/null @@ -1,524 +0,0 @@ -using System; -using System.Diagnostics; -using System.Threading.Tasks; -using StackExchange.Redis.KeyspaceIsolation; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - [Collection(SharedConnectionFixture.Key)] - public class BasicOpsTests : TestBase - { - public BasicOpsTests(ITestOutputHelper output, SharedConnectionFixture fixture) : base (output, fixture) { } - - [Fact] - public async Task PingOnce() - { - using (var muxer = Create()) - { - var conn = muxer.GetDatabase(); - - var duration = await conn.PingAsync().ForAwait(); - Log("Ping took: " + duration); - Assert.True(duration.TotalMilliseconds > 0); - } - } - - [Fact] - public void RapidDispose() - { - RedisKey key = Me(); - using (var primary = Create()) - { - var conn = primary.GetDatabase(); - conn.KeyDelete(key, CommandFlags.FireAndForget); - - for (int i = 0; i < 10; i++) - { - using (var secondary = Create(fail: true, shared: false)) - { - secondary.GetDatabase().StringIncrement(key, flags: CommandFlags.FireAndForget); - } - } - Assert.Equal(10, (int)conn.StringGet(key)); - } - } - - [Fact] - public async Task PingMany() - { - using (var muxer = Create()) - { - var conn = muxer.GetDatabase(); - var tasks = new Task[100]; - for (int i = 0; i < tasks.Length; i++) - { - tasks[i] = conn.PingAsync(); - } - await Task.WhenAll(tasks).ForAwait(); - Assert.True(tasks[0].Result.TotalMilliseconds > 0); - Assert.True(tasks[tasks.Length - 1].Result.TotalMilliseconds > 0); - } - } - - [Fact] - public void GetWithNullKey() - { - using (var muxer = Create()) - { - var db = muxer.GetDatabase(); - const string key = null; - var ex = Assert.Throws(() => db.StringGet(key)); - Assert.Equal("A null key is not valid in this context", ex.Message); - } - } - - [Fact] - public void SetWithNullKey() - { - using (var muxer = Create()) - { - var db = muxer.GetDatabase(); - const string key = null, value = "abc"; - var ex = Assert.Throws(() => db.StringSet(key, value)); - Assert.Equal("A null key is not valid in this context", ex.Message); - } - } - - [Fact] - public void SetWithNullValue() - { - using (var muxer = Create()) - { - var db = muxer.GetDatabase(); - string key = Me(), value = null; - db.KeyDelete(key, CommandFlags.FireAndForget); - - db.StringSet(key, "abc", flags: CommandFlags.FireAndForget); - Assert.True(db.KeyExists(key)); - db.StringSet(key, value, flags: CommandFlags.FireAndForget); - - var actual = (string)db.StringGet(key); - Assert.Null(actual); - Assert.False(db.KeyExists(key)); - } - } - - [Fact] - public void SetWithDefaultValue() - { - using (var muxer = Create()) - { - var db = muxer.GetDatabase(); - string key = Me(); - var value = default(RedisValue); // this is kinda 0... ish - db.KeyDelete(key, CommandFlags.FireAndForget); - - db.StringSet(key, "abc", flags: CommandFlags.FireAndForget); - Assert.True(db.KeyExists(key)); - db.StringSet(key, value, flags: CommandFlags.FireAndForget); - - var actual = (string)db.StringGet(key); - Assert.Null(actual); - Assert.False(db.KeyExists(key)); - } - } - - [Fact] - public void SetWithZeroValue() - { - using (var muxer = Create()) - { - var db = muxer.GetDatabase(); - string key = Me(); - const long value = 0; - db.KeyDelete(key, CommandFlags.FireAndForget); - - db.StringSet(key, "abc", flags: CommandFlags.FireAndForget); - Assert.True(db.KeyExists(key)); - db.StringSet(key, value, flags: CommandFlags.FireAndForget); - - var actual = (string)db.StringGet(key); - Assert.Equal("0", actual); - Assert.True(db.KeyExists(key)); - } - } - - [Fact] - public async Task GetSetAsync() - { - using (var muxer = Create()) - { - var conn = muxer.GetDatabase(); - - RedisKey key = Me(); - var d0 = conn.KeyDeleteAsync(key); - var d1 = conn.KeyDeleteAsync(key); - var g1 = conn.StringGetAsync(key); - var s1 = conn.StringSetAsync(key, "123"); - var g2 = conn.StringGetAsync(key); - var d2 = conn.KeyDeleteAsync(key); - - await d0; - Assert.False(await d1); - Assert.Null((string)(await g1)); - Assert.True((await g1).IsNull); - await s1; - Assert.Equal("123", await g2); - Assert.Equal(123, (int)(await g2)); - Assert.False((await g2).IsNull); - Assert.True(await d2); - } - } - - [Fact] - public void GetSetSync() - { - using (var muxer = Create()) - { - var conn = muxer.GetDatabase(); - - RedisKey key = Me(); - conn.KeyDelete(key, CommandFlags.FireAndForget); - var d1 = conn.KeyDelete(key); - var g1 = conn.StringGet(key); - conn.StringSet(key, "123", flags: CommandFlags.FireAndForget); - var g2 = conn.StringGet(key); - var d2 = conn.KeyDelete(key); - - Assert.False(d1); - Assert.Null((string)g1); - Assert.True(g1.IsNull); - - Assert.Equal("123", g2); - Assert.Equal(123, (int)g2); - Assert.False(g2.IsNull); - Assert.True(d2); - } - } - - [Theory] - [InlineData(false, false)] - [InlineData(true, true)] - [InlineData(true, false)] - public async Task GetWithExpiry(bool exists, bool hasExpiry) - { - using (var conn = Create()) - { - var db = conn.GetDatabase(); - RedisKey key = Me(); - db.KeyDelete(key, CommandFlags.FireAndForget); - if (exists) - { - if (hasExpiry) - db.StringSet(key, "val", TimeSpan.FromMinutes(5), flags: CommandFlags.FireAndForget); - else - db.StringSet(key, "val", flags: CommandFlags.FireAndForget); - } - var async = db.StringGetWithExpiryAsync(key); - var syncResult = db.StringGetWithExpiry(key); - var asyncResult = await async; - - if (exists) - { - Assert.Equal("val", asyncResult.Value); - Assert.Equal(hasExpiry, asyncResult.Expiry.HasValue); - if (hasExpiry) Assert.True(asyncResult.Expiry.Value.TotalMinutes >= 4.9 && asyncResult.Expiry.Value.TotalMinutes <= 5); - Assert.Equal("val", syncResult.Value); - Assert.Equal(hasExpiry, syncResult.Expiry.HasValue); - if (hasExpiry) Assert.True(syncResult.Expiry.Value.TotalMinutes >= 4.9 && syncResult.Expiry.Value.TotalMinutes <= 5); - } - else - { - Assert.True(asyncResult.Value.IsNull); - Assert.False(asyncResult.Expiry.HasValue); - Assert.True(syncResult.Value.IsNull); - Assert.False(syncResult.Expiry.HasValue); - } - } - } - - [Fact] - public async Task GetWithExpiryWrongTypeAsync() - { - using (var conn = Create()) - { - var db = conn.GetDatabase(); - RedisKey key = Me(); - _ = db.KeyDeleteAsync(key); - _ = db.SetAddAsync(key, "abc"); - var ex = await Assert.ThrowsAsync(async () => - { - try - { - Log("Key: " + (string)key); - await db.StringGetWithExpiryAsync(key).ForAwait(); - } - catch (AggregateException e) - { - throw e.InnerExceptions[0]; - } - }).ForAwait(); - Assert.Equal("WRONGTYPE Operation against a key holding the wrong kind of value", ex.Message); - } - } - - [Fact] - public void GetWithExpiryWrongTypeSync() - { - RedisKey key = Me(); - var ex = Assert.Throws(() => - { - using (var conn = Create()) - { - var db = conn.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.SetAdd(key, "abc", CommandFlags.FireAndForget); - db.StringGetWithExpiry(key); - } - }); - Assert.Equal("WRONGTYPE Operation against a key holding the wrong kind of value", ex.Message); - } - -#if DEBUG - [Fact] - public async Task TestSevered() - { - SetExpectedAmbientFailureCount(2); - using (var muxer = Create(allowAdmin: true)) - { - var db = muxer.GetDatabase(); - string key = Me(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.StringSet(key, key, flags: CommandFlags.FireAndForget); - var server = GetServer(muxer); - server.SimulateConnectionFailure(); - var watch = Stopwatch.StartNew(); - await UntilCondition(TimeSpan.FromSeconds(10), () => server.IsConnected); - watch.Stop(); - Log("Time to re-establish: {0}ms (any order)", watch.ElapsedMilliseconds); - await UntilCondition(TimeSpan.FromSeconds(10), () => key == db.StringGet(key)); - Debug.WriteLine("Pinging..."); - Assert.Equal(key, db.StringGet(key)); - } - } -#endif - - [Fact] - public async Task IncrAsync() - { - using (var muxer = Create()) - { - var conn = muxer.GetDatabase(); - RedisKey key = Me(); - conn.KeyDelete(key, CommandFlags.FireAndForget); - var nix = conn.KeyExistsAsync(key).ForAwait(); - var a = conn.StringGetAsync(key).ForAwait(); - var b = conn.StringIncrementAsync(key).ForAwait(); - var c = conn.StringGetAsync(key).ForAwait(); - var d = conn.StringIncrementAsync(key, 10).ForAwait(); - var e = conn.StringGetAsync(key).ForAwait(); - var f = conn.StringDecrementAsync(key, 11).ForAwait(); - var g = conn.StringGetAsync(key).ForAwait(); - var h = conn.KeyExistsAsync(key).ForAwait(); - Assert.False(await nix); - Assert.True((await a).IsNull); - Assert.Equal(0, (long)(await a)); - Assert.Equal(1, await b); - Assert.Equal(1, (long)(await c)); - Assert.Equal(11, await d); - Assert.Equal(11, (long)(await e)); - Assert.Equal(0, await f); - Assert.Equal(0, (long)(await g)); - Assert.True(await h); - } - } - - [Fact] - public void IncrSync() - { - using (var muxer = Create()) - { - var conn = muxer.GetDatabase(); - RedisKey key = Me(); - conn.KeyDelete(key, CommandFlags.FireAndForget); - var nix = conn.KeyExists(key); - var a = conn.StringGet(key); - var b = conn.StringIncrement(key); - var c = conn.StringGet(key); - var d = conn.StringIncrement(key, 10); - var e = conn.StringGet(key); - var f = conn.StringDecrement(key, 11); - var g = conn.StringGet(key); - var h = conn.KeyExists(key); - Assert.False(nix); - Assert.True(a.IsNull); - Assert.Equal(0, (long)a); - Assert.Equal(1, b); - Assert.Equal(1, (long)c); - Assert.Equal(11, d); - Assert.Equal(11, (long)e); - Assert.Equal(0, f); - Assert.Equal(0, (long)g); - Assert.True(h); - } - } - - [Fact] - public void IncrDifferentSizes() - { - using (var muxer = Create()) - { - var db = muxer.GetDatabase(); - RedisKey key = Me(); - db.KeyDelete(key, CommandFlags.FireAndForget); - int expected = 0; - Incr(db, key, -129019, ref expected); - Incr(db, key, -10023, ref expected); - Incr(db, key, -9933, ref expected); - Incr(db, key, -23, ref expected); - Incr(db, key, -7, ref expected); - Incr(db, key, -1, ref expected); - Incr(db, key, 0, ref expected); - Incr(db, key, 1, ref expected); - Incr(db, key, 9, ref expected); - Incr(db, key, 11, ref expected); - Incr(db, key, 345, ref expected); - Incr(db, key, 4982, ref expected); - Incr(db, key, 13091, ref expected); - Incr(db, key, 324092, ref expected); - Assert.NotEqual(0, expected); - var sum = (long)db.StringGet(key); - Assert.Equal(expected, sum); - } - } - - private void Incr(IDatabase database, RedisKey key, int delta, ref int total) - { - database.StringIncrement(key, delta, CommandFlags.FireAndForget); - total += delta; - } - - [Fact] - public void ShouldUseSharedMuxer() - { - Writer.WriteLine($"Shared: {SharedFixtureAvailable}"); - if (SharedFixtureAvailable) - { - using (var a = Create()) - { - Assert.IsNotType(a); - using (var b = Create()) - { - Assert.Same(a, b); - } - } - } - else - { - using (var a = Create()) - { - Assert.IsType(a); - using (var b = Create()) - { - Assert.NotSame(a, b); - } - } - } - } - - [Fact] - public async Task Delete() - { - using (var muxer = Create()) - { - var db = muxer.GetDatabase(); - var key = Me(); - _ = db.StringSetAsync(key, "Heyyyyy"); - var ke1 = db.KeyExistsAsync(key).ForAwait(); - var ku1 = db.KeyDelete(key); - var ke2 = db.KeyExistsAsync(key).ForAwait(); - Assert.True(await ke1); - Assert.True(ku1); - Assert.False(await ke2); - } - } - - [Fact] - public async Task DeleteAsync() - { - using (var muxer = Create()) - { - var db = muxer.GetDatabase(); - var key = Me(); - _ = db.StringSetAsync(key, "Heyyyyy"); - var ke1 = db.KeyExistsAsync(key).ForAwait(); - var ku1 = db.KeyDeleteAsync(key).ForAwait(); - var ke2 = db.KeyExistsAsync(key).ForAwait(); - Assert.True(await ke1); - Assert.True(await ku1); - Assert.False(await ke2); - } - } - - [Fact] - public async Task DeleteMany() - { - using (var muxer = Create()) - { - var db = muxer.GetDatabase(); - var key1 = Me(); - var key2 = Me() + "2"; - var key3 = Me() + "3"; - _ = db.StringSetAsync(key1, "Heyyyyy"); - _ = db.StringSetAsync(key2, "Heyyyyy"); - // key 3 not set - var ku1 = db.KeyDelete(new RedisKey[] { key1, key2, key3 }); - var ke1 = db.KeyExistsAsync(key1).ForAwait(); - var ke2 = db.KeyExistsAsync(key2).ForAwait(); - Assert.Equal(2, ku1); - Assert.False(await ke1); - Assert.False(await ke2); - } - } - - [Fact] - public async Task DeleteManyAsync() - { - using (var muxer = Create()) - { - var db = muxer.GetDatabase(); - var key1 = Me(); - var key2 = Me() + "2"; - var key3 = Me() + "3"; - _ = db.StringSetAsync(key1, "Heyyyyy"); - _ = db.StringSetAsync(key2, "Heyyyyy"); - // key 3 not set - var ku1 = db.KeyDeleteAsync(new RedisKey[] { key1, key2, key3 }).ForAwait(); - var ke1 = db.KeyExistsAsync(key1).ForAwait(); - var ke2 = db.KeyExistsAsync(key2).ForAwait(); - Assert.Equal(2, await ku1); - Assert.False(await ke1); - Assert.False(await ke2); - } - } - - [Fact] - public void WrappedDatabasePrefixIntegration() - { - var key = Me(); - using (var conn = Create()) - { - var db = conn.GetDatabase().WithKeyPrefix("abc"); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.StringIncrement(key, flags: CommandFlags.FireAndForget); - db.StringIncrement(key, flags: CommandFlags.FireAndForget); - db.StringIncrement(key, flags: CommandFlags.FireAndForget); - - int count = (int)conn.GetDatabase().StringGet("abc" + key); - Assert.Equal(3, count); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/BatchTests.cs b/tests/StackExchange.Redis.Tests/BatchTests.cs new file mode 100644 index 000000000..2605b172d --- /dev/null +++ b/tests/StackExchange.Redis.Tests/BatchTests.cs @@ -0,0 +1,55 @@ +using System; +using System.Collections.Generic; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class BatchTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + [Fact] + public async Task TestBatchNotSent() + { + await using var conn = Create(); + var db = conn.GetDatabase(); + var key = Me(); + _ = db.KeyDeleteAsync(key); + _ = db.StringSetAsync(key, "batch-not-sent"); + var batch = db.CreateBatch(); + + _ = batch.KeyDeleteAsync(key); + _ = batch.SetAddAsync(key, "a"); + _ = batch.SetAddAsync(key, "b"); + _ = batch.SetAddAsync(key, "c"); + + Assert.Equal("batch-not-sent", db.StringGet(key)); + } + + [Fact] + public async Task TestBatchSent() + { + await using var conn = Create(); + var db = conn.GetDatabase(); + var key = Me(); + _ = db.KeyDeleteAsync(key); + _ = db.StringSetAsync(key, "batch-sent"); + var tasks = new List(); + var batch = db.CreateBatch(); + tasks.Add(batch.KeyDeleteAsync(key)); + tasks.Add(batch.SetAddAsync(key, "a")); + tasks.Add(batch.SetAddAsync(key, "b")); + tasks.Add(batch.SetAddAsync(key, "c")); + batch.Execute(); + + var result = db.SetMembersAsync(key); + tasks.Add(result); + await Task.WhenAll(tasks.ToArray()); + + var arr = result.Result; + Array.Sort(arr, (x, y) => string.Compare(x, y)); + Assert.Equal(3, arr.Length); + Assert.Equal("a", arr[0]); + Assert.Equal("b", arr[1]); + Assert.Equal("c", arr[2]); + } +} diff --git a/tests/StackExchange.Redis.Tests/BatchWrapperTests.cs b/tests/StackExchange.Redis.Tests/BatchWrapperTests.cs deleted file mode 100644 index 1bb41c53b..000000000 --- a/tests/StackExchange.Redis.Tests/BatchWrapperTests.cs +++ /dev/null @@ -1,27 +0,0 @@ -using Moq; -using StackExchange.Redis.KeyspaceIsolation; -using System.Text; -using Xunit; - -namespace StackExchange.Redis.Tests -{ - [Collection(nameof(MoqDependentCollection))] - public sealed class BatchWrapperTests - { - private readonly Mock mock; - private readonly BatchWrapper wrapper; - - public BatchWrapperTests() - { - mock = new Mock(); - wrapper = new BatchWrapper(mock.Object, Encoding.UTF8.GetBytes("prefix:")); - } - - [Fact] - public void Execute() - { - wrapper.Execute(); - mock.Verify(_ => _.Execute(), Times.Once()); - } - } -} diff --git a/tests/StackExchange.Redis.Tests/Batches.cs b/tests/StackExchange.Redis.Tests/Batches.cs deleted file mode 100644 index 04174cad0..000000000 --- a/tests/StackExchange.Redis.Tests/Batches.cs +++ /dev/null @@ -1,64 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - [Collection(SharedConnectionFixture.Key)] - public class Batches : TestBase - { - public Batches(ITestOutputHelper output, SharedConnectionFixture fixture) : base(output, fixture) { } - - [Fact] - public void TestBatchNotSent() - { - using (var muxer = Create()) - { - var conn = muxer.GetDatabase(); - var key = Me(); - conn.KeyDeleteAsync(key); - conn.StringSetAsync(key, "batch-not-sent"); - var batch = conn.CreateBatch(); - - batch.KeyDeleteAsync(key); - batch.SetAddAsync(key, "a"); - batch.SetAddAsync(key, "b"); - batch.SetAddAsync(key, "c"); - - Assert.Equal("batch-not-sent", conn.StringGet(key)); - } - } - - [Fact] - public void TestBatchSent() - { - using (var muxer = Create()) - { - var conn = muxer.GetDatabase(); - var key = Me(); - conn.KeyDeleteAsync(key); - conn.StringSetAsync(key, "batch-sent"); - var tasks = new List(); - var batch = conn.CreateBatch(); - tasks.Add(batch.KeyDeleteAsync(key)); - tasks.Add(batch.SetAddAsync(key, "a")); - tasks.Add(batch.SetAddAsync(key, "b")); - tasks.Add(batch.SetAddAsync(key, "c")); - batch.Execute(); - - var result = conn.SetMembersAsync(key); - tasks.Add(result); - Task.WhenAll(tasks.ToArray()); - - var arr = result.Result; - Array.Sort(arr, (x, y) => string.Compare(x, y)); - Assert.Equal(3, arr.Length); - Assert.Equal("a", arr[0]); - Assert.Equal("b", arr[1]); - Assert.Equal("c", arr[2]); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/BitTests.cs b/tests/StackExchange.Redis.Tests/BitTests.cs new file mode 100644 index 000000000..b4c032366 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/BitTests.cs @@ -0,0 +1,21 @@ +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +[RunPerProtocol] +public class BitTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + [Fact] + public async Task BasicOps() + { + await using var conn = Create(); + var db = conn.GetDatabase(); + RedisKey key = Me(); + + db.KeyDelete(key, CommandFlags.FireAndForget); + db.StringSetBit(key, 10, true); + Assert.True(db.StringGetBit(key, 10)); + Assert.False(db.StringGetBit(key, 11)); + } +} diff --git a/tests/StackExchange.Redis.Tests/Bits.cs b/tests/StackExchange.Redis.Tests/Bits.cs deleted file mode 100644 index 054d7661c..000000000 --- a/tests/StackExchange.Redis.Tests/Bits.cs +++ /dev/null @@ -1,26 +0,0 @@ -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - [Collection(SharedConnectionFixture.Key)] - public class Bits : TestBase - { - public Bits(ITestOutputHelper output, SharedConnectionFixture fixture) : base (output, fixture) { } - - [Fact] - public void BasicOps() - { - using (var conn = Create()) - { - var db = conn.GetDatabase(); - RedisKey key = Me(); - - db.KeyDelete(key, CommandFlags.FireAndForget); - db.StringSetBit(key, 10, true); - Assert.True(db.StringGetBit(key, 10)); - Assert.False(db.StringGetBit(key, 11)); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/BoxUnbox.cs b/tests/StackExchange.Redis.Tests/BoxUnbox.cs deleted file mode 100644 index c6e012372..000000000 --- a/tests/StackExchange.Redis.Tests/BoxUnbox.cs +++ /dev/null @@ -1,169 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Text; -using Xunit; - -namespace StackExchange.Redis.Tests -{ - public class BoxUnboxTests - { - [Theory] - [MemberData(nameof(RoundTripValues))] - public void RoundTripRedisValue(RedisValue value) - { - var boxed = value.Box(); - var unboxed = RedisValue.Unbox(boxed); - AssertEqualGiveOrTakeNaN(value, unboxed); - } - - [Theory] - [MemberData(nameof(UnboxValues))] - public void UnboxCommonValues(object value, RedisValue expected) - { - var unboxed = RedisValue.Unbox(value); - AssertEqualGiveOrTakeNaN(expected, unboxed); - } - - [Theory] - [MemberData(nameof(InternedValues))] - public void ReturnInternedBoxesForCommonValues(RedisValue value, bool expectSameReference) - { - object x = value.Box(), y = value.Box(); - Assert.Equal(expectSameReference, ReferenceEquals(x, y)); - // check we got the right values! - AssertEqualGiveOrTakeNaN(value, RedisValue.Unbox(x)); - AssertEqualGiveOrTakeNaN(value, RedisValue.Unbox(y)); - } - - static void AssertEqualGiveOrTakeNaN(RedisValue expected, RedisValue actual) - { - if (expected.Type == RedisValue.StorageType.Double && actual.Type == expected.Type) - { - // because NaN != NaN, we need to special-case this scenario - bool enan = double.IsNaN((double)expected), anan = double.IsNaN((double)actual); - if (enan | anan) - { - Assert.Equal(enan, anan); - return; // and that's all - } - } - Assert.Equal(expected, actual); - } - - private static readonly byte[] s_abc = Encoding.UTF8.GetBytes("abc"); - public static IEnumerable RoundTripValues - => new [] - { - new object[] { RedisValue.Null }, - new object[] { RedisValue.EmptyString }, - new object[] { (RedisValue)0L }, - new object[] { (RedisValue)1L }, - new object[] { (RedisValue)18L }, - new object[] { (RedisValue)19L }, - new object[] { (RedisValue)20L }, - new object[] { (RedisValue)21L }, - new object[] { (RedisValue)22L }, - new object[] { (RedisValue)(-1L) }, - new object[] { (RedisValue)0 }, - new object[] { (RedisValue)1 }, - new object[] { (RedisValue)18 }, - new object[] { (RedisValue)19 }, - new object[] { (RedisValue)20 }, - new object[] { (RedisValue)21 }, - new object[] { (RedisValue)22 }, - new object[] { (RedisValue)(-1) }, - new object[] { (RedisValue)0F }, - new object[] { (RedisValue)1F }, - new object[] { (RedisValue)(-1F) }, - new object[] { (RedisValue)0D }, - new object[] { (RedisValue)1D }, - new object[] { (RedisValue)(-1D) }, - new object[] { (RedisValue)float.PositiveInfinity }, - new object[] { (RedisValue)float.NegativeInfinity }, - new object[] { (RedisValue)float.NaN }, - new object[] { (RedisValue)double.PositiveInfinity }, - new object[] { (RedisValue)double.NegativeInfinity }, - new object[] { (RedisValue)double.NaN }, - new object[] { (RedisValue)true }, - new object[] { (RedisValue)false }, - new object[] { (RedisValue)(string)null }, - new object[] { (RedisValue)"abc" }, - new object[] { (RedisValue)s_abc }, - new object[] { (RedisValue)new Memory(s_abc) }, - new object[] { (RedisValue)new ReadOnlyMemory(s_abc) }, - }; - - public static IEnumerable UnboxValues - => new [] - { - new object[] { null, RedisValue.Null }, - new object[] { "", RedisValue.EmptyString }, - new object[] { 0, (RedisValue)0 }, - new object[] { 1, (RedisValue)1 }, - new object[] { 18, (RedisValue)18 }, - new object[] { 19, (RedisValue)19 }, - new object[] { 20, (RedisValue)20 }, - new object[] { 21, (RedisValue)21 }, - new object[] { 22, (RedisValue)22 }, - new object[] { -1, (RedisValue)(-1) }, - new object[] { 18L, (RedisValue)18 }, - new object[] { 19L, (RedisValue)19 }, - new object[] { 20L, (RedisValue)20 }, - new object[] { 21L, (RedisValue)21 }, - new object[] { 22L, (RedisValue)22 }, - new object[] { -1L, (RedisValue)(-1) }, - new object[] { 0F, (RedisValue)0 }, - new object[] { 1F, (RedisValue)1 }, - new object[] { -1F, (RedisValue)(-1) }, - new object[] { 0D, (RedisValue)0 }, - new object[] { 1D, (RedisValue)1 }, - new object[] { -1D, (RedisValue)(-1) }, - new object[] { float.PositiveInfinity, (RedisValue)double.PositiveInfinity }, - new object[] { float.NegativeInfinity, (RedisValue)double.NegativeInfinity }, - new object[] { float.NaN, (RedisValue)double.NaN }, - new object[] { double.PositiveInfinity, (RedisValue)double.PositiveInfinity }, - new object[] { double.NegativeInfinity, (RedisValue)double.NegativeInfinity }, - new object[] { double.NaN, (RedisValue)double.NaN }, - new object[] { true, (RedisValue)true }, - new object[] { false, (RedisValue)false}, - new object[] { "abc", (RedisValue)"abc" }, - new object[] { s_abc, (RedisValue)s_abc }, - new object[] { new Memory(s_abc), (RedisValue)s_abc }, - new object[] { new ReadOnlyMemory(s_abc), (RedisValue)s_abc }, - new object[] { (RedisValue)1234, (RedisValue)1234 }, - }; - - public static IEnumerable InternedValues() - { - for(int i = -20; i <= 40; i++) - { - bool expectInterned = i >= -1 & i <= 20; - yield return new object[] { (RedisValue)i, expectInterned }; - yield return new object[] { (RedisValue)(long)i, expectInterned }; - yield return new object[] { (RedisValue)(float)i, expectInterned }; - yield return new object[] { (RedisValue)(double)i, expectInterned }; - } - - yield return new object[] { (RedisValue)float.NegativeInfinity, true }; - yield return new object[] { (RedisValue)(-0.5F), false }; - yield return new object[] { (RedisValue)(0.5F), false }; - yield return new object[] { (RedisValue)float.PositiveInfinity, true }; - yield return new object[] { (RedisValue)float.NaN, true }; - - yield return new object[] { (RedisValue)double.NegativeInfinity, true }; - yield return new object[] { (RedisValue)(-0.5D), false }; - yield return new object[] { (RedisValue)(0.5D), false }; - yield return new object[] { (RedisValue)double.PositiveInfinity, true }; - yield return new object[] { (RedisValue)double.NaN, true }; - - yield return new object[] { (RedisValue)true, true }; - yield return new object[] { (RedisValue)false, true }; - yield return new object[] { RedisValue.Null, true }; - yield return new object[] { RedisValue.EmptyString, true }; - yield return new object[] { (RedisValue)"abc", true }; - yield return new object[] { (RedisValue)s_abc, true }; - yield return new object[] { (RedisValue)new Memory(s_abc), false }; - yield return new object[] { (RedisValue)new ReadOnlyMemory(s_abc), false }; - } - } -} diff --git a/tests/StackExchange.Redis.Tests/BoxUnboxTests.cs b/tests/StackExchange.Redis.Tests/BoxUnboxTests.cs new file mode 100644 index 000000000..033a24839 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/BoxUnboxTests.cs @@ -0,0 +1,168 @@ +using System; +using System.Collections.Generic; +using System.Text; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class BoxUnboxTests +{ + [Theory] + [MemberData(nameof(RoundTripValues))] + public void RoundTripRedisValue(RedisValue value) + { + var boxed = value.Box(); + var unboxed = RedisValue.Unbox(boxed); + AssertEqualGiveOrTakeNaN(value, unboxed); + } + + [Theory] + [MemberData(nameof(UnboxValues))] + public void UnboxCommonValues(object value, RedisValue expected) + { + var unboxed = RedisValue.Unbox(value); + AssertEqualGiveOrTakeNaN(expected, unboxed); + } + + [Theory] + [MemberData(nameof(InternedValues))] + public void ReturnInternedBoxesForCommonValues(RedisValue value, bool expectSameReference) + { + object? x = value.Box(), y = value.Box(); + Assert.Equal(expectSameReference, ReferenceEquals(x, y)); + // check we got the right values! + AssertEqualGiveOrTakeNaN(value, RedisValue.Unbox(x)); + AssertEqualGiveOrTakeNaN(value, RedisValue.Unbox(y)); + } + + private static void AssertEqualGiveOrTakeNaN(RedisValue expected, RedisValue actual) + { + if (expected.Type == RedisValue.StorageType.Double && actual.Type == expected.Type) + { + // because NaN != NaN, we need to special-case this scenario + bool enan = double.IsNaN((double)expected), anan = double.IsNaN((double)actual); + if (enan | anan) + { + Assert.Equal(enan, anan); + return; // and that's all + } + } + Assert.Equal(expected, actual); + } + + private static readonly byte[] s_abc = Encoding.UTF8.GetBytes("abc"); + public static IEnumerable RoundTripValues + => new[] + { + new object[] { RedisValue.Null }, + [RedisValue.EmptyString], + [(RedisValue)0L], + [(RedisValue)1L], + [(RedisValue)18L], + [(RedisValue)19L], + [(RedisValue)20L], + [(RedisValue)21L], + [(RedisValue)22L], + [(RedisValue)(-1L)], + [(RedisValue)0], + [(RedisValue)1], + [(RedisValue)18], + [(RedisValue)19], + [(RedisValue)20], + [(RedisValue)21], + [(RedisValue)22], + [(RedisValue)(-1)], + [(RedisValue)0F], + [(RedisValue)1F], + [(RedisValue)(-1F)], + [(RedisValue)0D], + [(RedisValue)1D], + [(RedisValue)(-1D)], + [(RedisValue)float.PositiveInfinity], + [(RedisValue)float.NegativeInfinity], + [(RedisValue)float.NaN], + [(RedisValue)double.PositiveInfinity], + [(RedisValue)double.NegativeInfinity], + [(RedisValue)double.NaN], + [(RedisValue)true], + [(RedisValue)false], + [(RedisValue)(string?)null], + [(RedisValue)"abc"], + [(RedisValue)s_abc], + [(RedisValue)new Memory(s_abc)], + [(RedisValue)new ReadOnlyMemory(s_abc)], + }; + + public static IEnumerable UnboxValues + => new[] + { + new object?[] { null, RedisValue.Null }, + ["", RedisValue.EmptyString], + [0, (RedisValue)0], + [1, (RedisValue)1], + [18, (RedisValue)18], + [19, (RedisValue)19], + [20, (RedisValue)20], + [21, (RedisValue)21], + [22, (RedisValue)22], + [-1, (RedisValue)(-1)], + [18L, (RedisValue)18], + [19L, (RedisValue)19], + [20L, (RedisValue)20], + [21L, (RedisValue)21], + [22L, (RedisValue)22], + [-1L, (RedisValue)(-1)], + [0F, (RedisValue)0], + [1F, (RedisValue)1], + [-1F, (RedisValue)(-1)], + [0D, (RedisValue)0], + [1D, (RedisValue)1], + [-1D, (RedisValue)(-1)], + [float.PositiveInfinity, (RedisValue)double.PositiveInfinity], + [float.NegativeInfinity, (RedisValue)double.NegativeInfinity], + [float.NaN, (RedisValue)double.NaN], + [double.PositiveInfinity, (RedisValue)double.PositiveInfinity], + [double.NegativeInfinity, (RedisValue)double.NegativeInfinity], + [double.NaN, (RedisValue)double.NaN], + [true, (RedisValue)true], + [false, (RedisValue)false], + ["abc", (RedisValue)"abc"], + [s_abc, (RedisValue)s_abc], + [new Memory(s_abc), (RedisValue)s_abc], + [new ReadOnlyMemory(s_abc), (RedisValue)s_abc], + [(RedisValue)1234, (RedisValue)1234], + }; + + public static IEnumerable InternedValues() + { + for (int i = -20; i <= 40; i++) + { + bool expectInterned = i >= -1 & i <= 20; + yield return new object[] { (RedisValue)i, expectInterned }; + yield return new object[] { (RedisValue)(long)i, expectInterned }; + yield return new object[] { (RedisValue)(float)i, expectInterned }; + yield return new object[] { (RedisValue)(double)i, expectInterned }; + } + + yield return new object[] { (RedisValue)float.NegativeInfinity, true }; + yield return new object[] { (RedisValue)(-0.5F), false }; + yield return new object[] { (RedisValue)0.5F, false }; + yield return new object[] { (RedisValue)float.PositiveInfinity, true }; + yield return new object[] { (RedisValue)float.NaN, true }; + + yield return new object[] { (RedisValue)double.NegativeInfinity, true }; + yield return new object[] { (RedisValue)(-0.5D), false }; + yield return new object[] { (RedisValue)0.5D, false }; + yield return new object[] { (RedisValue)double.PositiveInfinity, true }; + yield return new object[] { (RedisValue)double.NaN, true }; + + yield return new object[] { (RedisValue)true, true }; + yield return new object[] { (RedisValue)false, true }; + yield return new object[] { RedisValue.Null, true }; + yield return new object[] { RedisValue.EmptyString, true }; + yield return new object[] { (RedisValue)"abc", true }; + yield return new object[] { (RedisValue)s_abc, true }; + yield return new object[] { (RedisValue)new Memory(s_abc), false }; + yield return new object[] { (RedisValue)new ReadOnlyMemory(s_abc), false }; + } +} diff --git a/tests/StackExchange.Redis.Tests/CancellationTests.cs b/tests/StackExchange.Redis.Tests/CancellationTests.cs new file mode 100644 index 000000000..a512743f9 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/CancellationTests.cs @@ -0,0 +1,195 @@ +using System; +using System.Diagnostics; +using System.Threading; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +[Collection(NonParallelCollection.Name)] +public class CancellationTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + [Fact] + public async Task WithCancellation_CancelledToken_ThrowsOperationCanceledException() + { +#if NETFRAMEWORK + Skip.UnlessLongRunning(); // unpredictable on netfx due to weak WaitAsync impl +#endif + + await using var conn = Create(); + var db = conn.GetDatabase(); + + using var cts = new CancellationTokenSource(); + cts.Cancel(); // Cancel immediately + + await Assert.ThrowsAnyAsync(async () => await db.StringSetAsync(Me(), "value").WaitAsync(cts.Token)); + } + + private IInternalConnectionMultiplexer Create() => Create(syncTimeout: 10_000); + + [Fact] + public async Task WithCancellation_ValidToken_OperationSucceeds() + { + await using var conn = Create(); + var db = conn.GetDatabase(); + + using var cts = new CancellationTokenSource(); + + RedisKey key = Me(); + // This should succeed + await db.StringSetAsync(key, "value"); + var result = await db.StringGetAsync(key).WaitAsync(cts.Token); + Assert.Equal("value", result); + } + + private static void Pause(IDatabase db) => db.Execute("client", ["pause", ConnectionPauseMilliseconds], CommandFlags.FireAndForget); + + private void Pause(IServer server) + { + server.Execute("client", new object[] { "pause", ConnectionPauseMilliseconds }, CommandFlags.FireAndForget); + } + + [Fact] + public async Task WithTimeout_ShortTimeout_Async_ThrowsOperationCanceledException() + { + Skip.UnlessLongRunning(); // because of CLIENT PAUSE impact to unrelated tests + + await using var conn = Create(); + var db = conn.GetDatabase(); + + var watch = Stopwatch.StartNew(); + Pause(db); + + var timeout = TimeSpan.FromMilliseconds(ShortDelayMilliseconds); + // This might throw due to timeout, but let's test the mechanism + var pending = db.StringSetAsync(Me(), "value").WaitAsync(timeout); // check we get past this + try + { + await pending; + // If it succeeds, that's fine too - Redis is fast + Assert.Fail(ExpectedCancel + ": " + watch.ElapsedMilliseconds + "ms"); + } + catch (TimeoutException) + { + // Expected for very short timeouts + Log($"Timeout after {watch.ElapsedMilliseconds}ms"); + } + } + + private const string ExpectedCancel = "This operation should have been cancelled"; + + [Fact] + public async Task WithoutCancellation_OperationsWorkNormally() + { + await using var conn = Create(); + var db = conn.GetDatabase(); + + // No cancellation - should work normally + RedisKey key = Me(); + await db.StringSetAsync(key, "value"); + var result = await db.StringGetAsync(key); + Assert.Equal("value", result); + } + + public enum CancelStrategy + { + Constructor, + Method, + Manual, + } + + private const int ConnectionPauseMilliseconds = 50, ShortDelayMilliseconds = 5; + + private static CancellationTokenSource CreateCts(CancelStrategy strategy) + { + switch (strategy) + { + case CancelStrategy.Constructor: + return new CancellationTokenSource(TimeSpan.FromMilliseconds(ShortDelayMilliseconds)); + case CancelStrategy.Method: + var cts = new CancellationTokenSource(); + cts.CancelAfter(TimeSpan.FromMilliseconds(ShortDelayMilliseconds)); + return cts; + case CancelStrategy.Manual: + cts = new(); + _ = Task.Run(async () => + { + await Task.Delay(ShortDelayMilliseconds); + // ReSharper disable once MethodHasAsyncOverload - TFM-dependent + cts.Cancel(); + }); + return cts; + default: + throw new ArgumentOutOfRangeException(nameof(strategy)); + } + } + + [Theory] + [InlineData(CancelStrategy.Constructor)] + [InlineData(CancelStrategy.Method)] + [InlineData(CancelStrategy.Manual)] + public async Task CancellationDuringOperation_Async_CancelsGracefully(CancelStrategy strategy) + { + Skip.UnlessLongRunning(); // because of CLIENT PAUSE impact to unrelated tests + + await using var conn = Create(); + var db = conn.GetDatabase(); + + var watch = Stopwatch.StartNew(); + Pause(db); + + // Cancel after a short delay + using var cts = CreateCts(strategy); + + // Start an operation and cancel it mid-flight + var pending = db.StringSetAsync($"{Me()}:{strategy}", "value").WaitAsync(cts.Token); + + try + { + await pending; + Assert.Fail(ExpectedCancel + ": " + watch.ElapsedMilliseconds + "ms"); + } + catch (OperationCanceledException oce) + { + // Expected if cancellation happens during operation + Log($"Cancelled after {watch.ElapsedMilliseconds}ms"); + Assert.Equal(cts.Token, oce.CancellationToken); + } + } + + [Fact] + public async Task ScanCancellable() + { + Skip.UnlessLongRunning(); // because of CLIENT PAUSE impact to unrelated tests + + using var conn = Create(); + var db = conn.GetDatabase(); + var server = conn.GetServer(conn.GetEndPoints()[0]); + + using var cts = new CancellationTokenSource(); + + var watch = Stopwatch.StartNew(); + Pause(server); + try + { + db.StringSet(Me(), "value", TimeSpan.FromMinutes(5), flags: CommandFlags.FireAndForget); + await using var iter = server.KeysAsync(pageSize: 1000).WithCancellation(cts.Token).GetAsyncEnumerator(); + var pending = iter.MoveNextAsync(); + Assert.False(cts.Token.IsCancellationRequested); + cts.CancelAfter(ShortDelayMilliseconds); // start this *after* we've got past the initial check + while (await pending) + { + pending = iter.MoveNextAsync(); + } + Assert.Fail($"{ExpectedCancel}: {watch.ElapsedMilliseconds}ms"); + } + catch (OperationCanceledException oce) + { + var taken = watch.ElapsedMilliseconds; + // Expected if cancellation happens during operation + Log($"Cancelled after {taken}ms"); + Assert.True(taken < (ConnectionPauseMilliseconds * 3) / 4, $"Should have cancelled sooner; took {taken}ms"); + Assert.Equal(cts.Token, oce.CancellationToken); + } + } +} diff --git a/tests/StackExchange.Redis.Tests/Certificates/CertValidationTests.cs b/tests/StackExchange.Redis.Tests/Certificates/CertValidationTests.cs new file mode 100644 index 000000000..fa80114f8 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/Certificates/CertValidationTests.cs @@ -0,0 +1,66 @@ +using System; +using System.IO; +using System.Net.Security; +using System.Security.Cryptography.X509Certificates; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class CertValidationTests(ITestOutputHelper output) : TestBase(output) +{ + [Fact] + public void CheckIssuerValidity() + { + // The endpoint cert is the same here + var endpointCert = LoadCert(Path.Combine("Certificates", "device01.foo.com.pem")); + + // Trusting CA explicitly + var callback = ConfigurationOptions.TrustIssuerCallback(Path.Combine("Certificates", "ca.foo.com.pem")); + Assert.True(callback(this, endpointCert, null, SslPolicyErrors.None), "subtest 1a"); + Assert.True(callback(this, endpointCert, null, SslPolicyErrors.RemoteCertificateChainErrors), "subtest 1b"); + Assert.False(callback(this, endpointCert, null, SslPolicyErrors.RemoteCertificateNameMismatch), "subtest 1c"); + Assert.False(callback(this, endpointCert, null, SslPolicyErrors.RemoteCertificateNotAvailable), "subtest 1d"); + Assert.False(callback(this, endpointCert, null, SslPolicyErrors.RemoteCertificateChainErrors | SslPolicyErrors.RemoteCertificateNameMismatch), "subtest 1e"); + Assert.False(callback(this, endpointCert, null, SslPolicyErrors.RemoteCertificateChainErrors | SslPolicyErrors.RemoteCertificateNotAvailable), "subtest 1f"); + + // Trusting the remote endpoint cert directly + callback = ConfigurationOptions.TrustIssuerCallback(Path.Combine("Certificates", "device01.foo.com.pem")); + Assert.True(callback(this, endpointCert, null, SslPolicyErrors.None), "subtest 2a"); + if (Runtime.IsMono) + { + // Mono doesn't support this cert usage, reports as rejection (happy for someone to work around this, but isn't high priority) + Assert.False(callback(this, endpointCert, null, SslPolicyErrors.RemoteCertificateChainErrors), "subtest 2b"); + } + else + { + Assert.True(callback(this, endpointCert, null, SslPolicyErrors.RemoteCertificateChainErrors), "subtest 2b"); + } + + Assert.False(callback(this, endpointCert, null, SslPolicyErrors.RemoteCertificateNameMismatch), "subtest 2c"); + Assert.False(callback(this, endpointCert, null, SslPolicyErrors.RemoteCertificateNotAvailable), "subtest 2d"); + Assert.False(callback(this, endpointCert, null, SslPolicyErrors.RemoteCertificateChainErrors | SslPolicyErrors.RemoteCertificateNameMismatch), "subtest 2e"); + Assert.False(callback(this, endpointCert, null, SslPolicyErrors.RemoteCertificateChainErrors | SslPolicyErrors.RemoteCertificateNotAvailable), "subtest 2f"); + + // Attempting to trust another CA (mismatch) + callback = ConfigurationOptions.TrustIssuerCallback(Path.Combine("Certificates", "ca2.foo.com.pem")); + Assert.True(callback(this, endpointCert, null, SslPolicyErrors.None), "subtest 3a"); + Assert.False(callback(this, endpointCert, null, SslPolicyErrors.RemoteCertificateChainErrors), "subtest 3b"); + Assert.False(callback(this, endpointCert, null, SslPolicyErrors.RemoteCertificateNameMismatch), "subtest 3c"); + Assert.False(callback(this, endpointCert, null, SslPolicyErrors.RemoteCertificateNotAvailable), "subtest 3d"); + Assert.False(callback(this, endpointCert, null, SslPolicyErrors.RemoteCertificateChainErrors | SslPolicyErrors.RemoteCertificateNameMismatch), "subtest 3e"); + Assert.False(callback(this, endpointCert, null, SslPolicyErrors.RemoteCertificateChainErrors | SslPolicyErrors.RemoteCertificateNotAvailable), "subtest 3f"); + } + +#pragma warning disable SYSLIB0057 + private static X509Certificate2 LoadCert(string certificatePath) => new X509Certificate2(File.ReadAllBytes(certificatePath)); +#pragma warning restore SYSLIB0057 + + [Fact] + public void CheckIssuerArgs() + { + Assert.ThrowsAny(() => ConfigurationOptions.TrustIssuerCallback("")); + + var opt = new ConfigurationOptions(); + Assert.Throws(() => opt.TrustIssuer((X509Certificate2)null!)); + } +} diff --git a/tests/StackExchange.Redis.Tests/Certificates/README.md b/tests/StackExchange.Redis.Tests/Certificates/README.md new file mode 100644 index 000000000..83fb5a14c --- /dev/null +++ b/tests/StackExchange.Redis.Tests/Certificates/README.md @@ -0,0 +1,4 @@ +The certificates here are randomly generated for testing only. +They are not valid and only used for test validation. + +Please do not file security issue noise - these have no impact being public at all. \ No newline at end of file diff --git a/tests/StackExchange.Redis.Tests/Certificates/ca.foo.com.pem b/tests/StackExchange.Redis.Tests/Certificates/ca.foo.com.pem new file mode 100644 index 000000000..33496adea --- /dev/null +++ b/tests/StackExchange.Redis.Tests/Certificates/ca.foo.com.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDTTCCAjWgAwIBAgIUU9SR3QMGVO8yN/mr8SQJ1p/OgIAwDQYJKoZIhvcNAQEL +BQAwNjETMBEGA1UEAwwKY2EuZm9vLmNvbTESMBAGA1UECgwJTXlEZXZpY2VzMQsw +CQYDVQQGEwJVUzAeFw0yNDAzMDcxNDAxMzJaFw00ODEwMjcxNDAxMzJaMDYxEzAR +BgNVBAMMCmNhLmZvby5jb20xEjAQBgNVBAoMCU15RGV2aWNlczELMAkGA1UEBhMC +VVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCqk8FT5dHU335oSEuY +RGeHOHmtxtr5Eoxe4pRHWcBKARzRYi+fPjP/aSWh75yYcmyQ5o5e2JQTZQRwSaLh +q8lrsT7AIeZboATVxECyT3kZdIJkLgWbfyzwJQtrW+ccDx3gDRt0FKRt8Hd3foIf +ULICgkiz3C5sihT589QWmcP4XhcRf3A1bt3rrFWJBO1jmKz0P7pijT14lkdW4sVL +AdFhoNg/a042a7wq2i8PxrkhWpwmHkW9ErnbWG9pRjMme+GDeNfGdHslL5grzbzC +4B4w3QP4opLUp29O9oO1DjaAuZ86JVdy3+glugMvj4f8NVCVlHxRM5Kn/3WgWIIM +XBK7AgMBAAGjUzBRMB0GA1UdDgQWBBRmgj4urVgvTcPgJtyqyUHaFX0svjAfBgNV +HSMEGDAWgBRmgj4urVgvTcPgJtyqyUHaFX0svjAPBgNVHRMBAf8EBTADAQH/MA0G +CSqGSIb3DQEBCwUAA4IBAQB2DIGlKpCdluVHURfgA5zfwoOnhtOZm7lwC/zbNd5a +wNmb6Vy29feN/+6/dv7MFTXXB5f0TkDGrGbAsKVLD5mNSfQhHC8sxwotheMYq6LS +T1Pjv3Vxku1O7q6FQrslDWfSBxzwep2q8fDXwD4C7VgVRM2EGg/vVee2juuTCmMU +Z1LwJrOkBczW6b3ZvUThFGOvZkuI138EuR2gqPHMQIiQcPyX1syT7yhJAyDQRYOG +cHSRojNciYVotSTgyYguUJdU7vfRJ+MLfoZClzJgvNav8yUC+sSrb5RD5vQlvxzG +KrJ8Hh+hpIFsmQKj5dcochKvLLd1Z748b2+FB0jtxraU +-----END CERTIFICATE----- diff --git a/tests/StackExchange.Redis.Tests/Certificates/ca2.foo.com.pem b/tests/StackExchange.Redis.Tests/Certificates/ca2.foo.com.pem new file mode 100644 index 000000000..b2b18d02b --- /dev/null +++ b/tests/StackExchange.Redis.Tests/Certificates/ca2.foo.com.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDTzCCAjegAwIBAgIUYXv168vvB1NPg3PfoRzcNFEMaC8wDQYJKoZIhvcNAQEL +BQAwNzEUMBIGA1UEAwwLY2EyLmZvby5jb20xEjAQBgNVBAoMCU15RGV2aWNlczEL +MAkGA1UEBhMCVVMwHhcNMjQwMzA3MTQwMTMyWhcNNDgxMDI3MTQwMTMyWjA3MRQw +EgYDVQQDDAtjYTIuZm9vLmNvbTESMBAGA1UECgwJTXlEZXZpY2VzMQswCQYDVQQG +EwJVUzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOFDZ2sf8Ik/I3jD +Mp4NGoo+kMY1BiRWjSKnaphfcosR2MAT/ROIVhnkzSeiQQByf34cqN/7xNkHaufr +oVcMuuiyWERPoZjBqnfzLZ+93uxnyIU6DVDdNIcKcBQxyhHMfOigFhKTia6eWhrf +zSaBhbkndaUsXdINBAJgSq3HDuk8bIw8MTZH0giorhIyyyqT/gjWEbzKx6Ww99qV +MMcjFIvXEmD9AEaNilHD4TtwqZrZKZpnVBaQvWrCK3tCGBDyiFlUhAibchbt/JzV +sK002TFfUbn41ygHmcrBVL7lSEsuT2W/PNuDOdWa6NQ2RVzYivs5jYbWV1cAvAJP +HMWJkZ8CAwEAAaNTMFEwHQYDVR0OBBYEFA6ZeCMPgDEu+eIUoCxU/Q06ViyoMB8G +A1UdIwQYMBaAFA6ZeCMPgDEu+eIUoCxU/Q06ViyoMA8GA1UdEwEB/wQFMAMBAf8w +DQYJKoZIhvcNAQELBQADggEBAGOa/AD0JNPwvyDi9wbVU+Yktx3vfuVyOMbnUQSn +nOyWd6d95rZwbeYyN908PjERQT3EMo8/O0eOpoG9I79vjbcD6LAIbxS9XdI8kK4+ +D4e/DX/R85KoWSprB+VRXGqsChY0Y+4x5x2q/IoCi6+tywhzjqIlaGDYrlc688HO +/+4iR9L945gpg4NT1hLnCwDYcdZ5vjv4NfgXDbGPUcEheYnfz3cHE2mYxEG9KXta +f8hSj/MNNv31BzNcj4XKcDqp4Ke3ow4lAZsPPlixOxxRaLnpsKZmEYYQcLI8KVNk +gdAUOSPZgzRqAag0rvVfrpyvfvlu0D9xeiBLdhaJeZCq1/s= +-----END CERTIFICATE----- diff --git a/tests/StackExchange.Redis.Tests/Certificates/create_certificates.sh b/tests/StackExchange.Redis.Tests/Certificates/create_certificates.sh new file mode 100644 index 000000000..71ef0caa3 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/Certificates/create_certificates.sh @@ -0,0 +1,39 @@ +#!/bin/bash +set -eu +# Adapted from https://github.com/stewartadam/dotnet-x509-certificate-verification/blob/main/create_certificates.sh + +base_dir="certificates" + +create_ca() { + local CA_CN="$1" + local certificate_output="${base_dir}/${CA_CN}.pem" + + openssl genrsa -out "${base_dir}/${CA_CN}.key.pem" 2048 # Generate private key + openssl req -x509 -new -nodes -key "${base_dir}/${CA_CN}.key.pem" -sha256 -days 9000 -out "${certificate_output}" -subj "/CN=${CA_CN}/O=MyDevices/C=US" # Generate root certificate + + echo -e "\nCertificate for CA ${CA_CN} saved to ${certificate_output}\n\n" +} + +create_leaf_cert_req() { + local DEVICE_CN="$1" + + openssl genrsa -out "${base_dir}/${DEVICE_CN}.key.pem" 2048 # new private key + openssl req -new -key "${base_dir}/${DEVICE_CN}.key.pem" -out "${base_dir}/${DEVICE_CN}.csr.pem" -subj "/CN=${DEVICE_CN}/O=MyDevices/C=US" # generate signing request for the CA +} + +sign_leaf_cert() { + local DEVICE_CN="$1" + local CA_CN="$2" + local certificate_output="${base_dir}/${DEVICE_CN}.pem" + + openssl x509 -req -in "${base_dir}/${DEVICE_CN}.csr.pem" -CA ""${base_dir}/${CA_CN}.pem"" -CAkey "${base_dir}/${CA_CN}.key.pem" -set_serial 01 -out "${certificate_output}" -days 8999 -sha256 # sign the CSR + + echo -e "\nCertificate for ${DEVICE_CN} saved to ${certificate_output}\n\n" +} + +mkdir -p "${base_dir}" + +# Create one self-issued CA + signed cert +create_ca "ca.foo.com" +create_leaf_cert_req "device01.foo.com" +sign_leaf_cert "device01.foo.com" "ca.foo.com" \ No newline at end of file diff --git a/tests/StackExchange.Redis.Tests/Certificates/device01.foo.com.pem b/tests/StackExchange.Redis.Tests/Certificates/device01.foo.com.pem new file mode 100644 index 000000000..58f47641b --- /dev/null +++ b/tests/StackExchange.Redis.Tests/Certificates/device01.foo.com.pem @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC5jCCAc4CAQEwDQYJKoZIhvcNAQELBQAwNjETMBEGA1UEAwwKY2EuZm9vLmNv +bTESMBAGA1UECgwJTXlEZXZpY2VzMQswCQYDVQQGEwJVUzAeFw0yNDAzMDcxNDAx +MzJaFw00ODEwMjYxNDAxMzJaMDwxGTAXBgNVBAMMEGRldmljZTAxLmZvby5jb20x +EjAQBgNVBAoMCU15RGV2aWNlczELMAkGA1UEBhMCVVMwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQDBb4Mv87+MFVGLIWArc0wV1GH4h7Ha+49K+JAi8rtk +fpQACcu3OGq5TjUuxecOz5eXDwJj/vR1rvjP0DaCuIlx4SNXXqVKooWpCLb2g4Mr +IIiFcBsiaJNmhFvd92bqHOyuXsUTjkJKaLmH6nUqVIXEA/Py+jpuSFRp9N475IGZ +yUUdaQUx9Ur953FagLbPVeE5Vh+NEA8vnw+ZBCQRBHlRgvSJtCAR/oznXXPdHGGZ +rMWeNjl+v1iP8fZMq4vvooW0/zTVgH8lE/HJXtpaWEVeGpnOqBsgvl12WTGL5dMU +n81JiI3AdUyW0ieh/5yr+OFNa/HNqGLK1NvnCDPbBFpnAgMBAAEwDQYJKoZIhvcN +AQELBQADggEBAEpIIJJ7q4/EbCJog29Os9l5unn7QJon4R5TGJQIxdqDGrhXG8QA +HiBGl/lFhAp9tfKvQIj4aODzMgHmDpzZmH/yhjDlquJgB4JYDDjf9UhtwUUbRDp0 +rEc5VikLuTJ21hcusKALH5fgBjzplRNPH8P660FxWnv9gSWCMNaiFCCxTU91g4L3 +/qZPTl5nr1j6M/+zXbndS5qlF7GkU5Kv9xEmasZ0Z65Wao6Ufhw+nczLbiLErrxD +zLtTfr6WYFqzXeiPGnjTueG+1cYDjngmj2fbtjPn4W67q8Z0M/ZMj9ikr881d3zP +3dzUEaGexGqvA2MCapIQ2vCCMDF33ismQts= +-----END CERTIFICATE----- diff --git a/tests/StackExchange.Redis.Tests/ChannelTests.cs b/tests/StackExchange.Redis.Tests/ChannelTests.cs new file mode 100644 index 000000000..4843090f4 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/ChannelTests.cs @@ -0,0 +1,153 @@ +using System.Text; +using Xunit; + +namespace StackExchange.Redis.Tests +{ + public class ChannelTests + { + [Fact] + public void UseImplicitAutoPattern_OnByDefault() + { + Assert.True(RedisChannel.UseImplicitAutoPattern); + } + + [Theory] + [InlineData("abc", true, false)] + [InlineData("abc*def", true, true)] + [InlineData("abc", false, false)] + [InlineData("abc*def", false, false)] + public void ValidateAutoPatternModeString(string name, bool useImplicitAutoPattern, bool isPatternBased) + { + bool oldValue = RedisChannel.UseImplicitAutoPattern; + try + { + RedisChannel.UseImplicitAutoPattern = useImplicitAutoPattern; +#pragma warning disable CS0618 // we need to test the operator + RedisChannel channel = name; +#pragma warning restore CS0618 + Assert.Equal(isPatternBased, channel.IsPattern); + } + finally + { + RedisChannel.UseImplicitAutoPattern = oldValue; + } + } + + [Theory] + [InlineData("abc", RedisChannel.PatternMode.Auto, true, false)] + [InlineData("abc*def", RedisChannel.PatternMode.Auto, true, true)] + [InlineData("abc", RedisChannel.PatternMode.Literal, true, false)] + [InlineData("abc*def", RedisChannel.PatternMode.Literal, true, false)] + [InlineData("abc", RedisChannel.PatternMode.Pattern, true, true)] + [InlineData("abc*def", RedisChannel.PatternMode.Pattern, true, true)] + [InlineData("abc", RedisChannel.PatternMode.Auto, false, false)] + [InlineData("abc*def", RedisChannel.PatternMode.Auto, false, true)] + [InlineData("abc", RedisChannel.PatternMode.Literal, false, false)] + [InlineData("abc*def", RedisChannel.PatternMode.Literal, false, false)] + [InlineData("abc", RedisChannel.PatternMode.Pattern, false, true)] + [InlineData("abc*def", RedisChannel.PatternMode.Pattern, false, true)] + public void ValidateModeSpecifiedIgnoresGlobalSetting(string name, RedisChannel.PatternMode mode, bool useImplicitAutoPattern, bool isPatternBased) + { + bool oldValue = RedisChannel.UseImplicitAutoPattern; + try + { + RedisChannel.UseImplicitAutoPattern = useImplicitAutoPattern; + RedisChannel channel = new(name, mode); + Assert.Equal(isPatternBased, channel.IsPattern); + } + finally + { + RedisChannel.UseImplicitAutoPattern = oldValue; + } + } + + [Theory] + [InlineData("abc", true, false)] + [InlineData("abc*def", true, true)] + [InlineData("abc", false, false)] + [InlineData("abc*def", false, false)] + public void ValidateAutoPatternModeBytes(string name, bool useImplicitAutoPattern, bool isPatternBased) + { + var bytes = Encoding.UTF8.GetBytes(name); + bool oldValue = RedisChannel.UseImplicitAutoPattern; + try + { + RedisChannel.UseImplicitAutoPattern = useImplicitAutoPattern; +#pragma warning disable CS0618 // we need to test the operator + RedisChannel channel = bytes; +#pragma warning restore CS0618 + Assert.Equal(isPatternBased, channel.IsPattern); + } + finally + { + RedisChannel.UseImplicitAutoPattern = oldValue; + } + } + + [Theory] + [InlineData("abc", RedisChannel.PatternMode.Auto, true, false)] + [InlineData("abc*def", RedisChannel.PatternMode.Auto, true, true)] + [InlineData("abc", RedisChannel.PatternMode.Literal, true, false)] + [InlineData("abc*def", RedisChannel.PatternMode.Literal, true, false)] + [InlineData("abc", RedisChannel.PatternMode.Pattern, true, true)] + [InlineData("abc*def", RedisChannel.PatternMode.Pattern, true, true)] + [InlineData("abc", RedisChannel.PatternMode.Auto, false, false)] + [InlineData("abc*def", RedisChannel.PatternMode.Auto, false, true)] + [InlineData("abc", RedisChannel.PatternMode.Literal, false, false)] + [InlineData("abc*def", RedisChannel.PatternMode.Literal, false, false)] + [InlineData("abc", RedisChannel.PatternMode.Pattern, false, true)] + [InlineData("abc*def", RedisChannel.PatternMode.Pattern, false, true)] + public void ValidateModeSpecifiedIgnoresGlobalSettingBytes(string name, RedisChannel.PatternMode mode, bool useImplicitAutoPattern, bool isPatternBased) + { + var bytes = Encoding.UTF8.GetBytes(name); + bool oldValue = RedisChannel.UseImplicitAutoPattern; + try + { + RedisChannel.UseImplicitAutoPattern = useImplicitAutoPattern; + RedisChannel channel = new(bytes, mode); + Assert.Equal(isPatternBased, channel.IsPattern); + } + finally + { + RedisChannel.UseImplicitAutoPattern = oldValue; + } + } + + [Theory] + [InlineData("abc*def", false)] + [InlineData("abcdef", false)] + [InlineData("abc*def", true)] + [InlineData("abcdef", true)] + public void ValidateLiteralPatternMode(string name, bool useImplicitAutoPattern) + { + bool oldValue = RedisChannel.UseImplicitAutoPattern; + try + { + RedisChannel.UseImplicitAutoPattern = useImplicitAutoPattern; + RedisChannel channel; + + // literal, string + channel = RedisChannel.Literal(name); + Assert.False(channel.IsPattern); + + // pattern, string + channel = RedisChannel.Pattern(name); + Assert.True(channel.IsPattern); + + var bytes = Encoding.UTF8.GetBytes(name); + + // literal, byte[] + channel = RedisChannel.Literal(bytes); + Assert.False(channel.IsPattern); + + // pattern, byte[] + channel = RedisChannel.Pattern(bytes); + Assert.True(channel.IsPattern); + } + finally + { + RedisChannel.UseImplicitAutoPattern = oldValue; + } + } + } +} diff --git a/tests/StackExchange.Redis.Tests/ClientKillTests.cs b/tests/StackExchange.Redis.Tests/ClientKillTests.cs new file mode 100644 index 000000000..f10f69ef6 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/ClientKillTests.cs @@ -0,0 +1,59 @@ +using System.Collections.Generic; +using System.Net; +using System.Threading; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +[RunPerProtocol] + +public class ClientKillTests(ITestOutputHelper output) : TestBase(output) +{ + [Fact] + public async Task ClientKill() + { + SetExpectedAmbientFailureCount(-1); + await using var otherConnection = Create(allowAdmin: true, shared: false, backlogPolicy: BacklogPolicy.FailFast, require: RedisFeatures.v7_4_0_rc1); + var id = otherConnection.GetDatabase().Execute(RedisCommand.CLIENT.ToString(), RedisLiterals.ID); + + await using var conn = Create(allowAdmin: true, shared: false, backlogPolicy: BacklogPolicy.FailFast); + var server = conn.GetServer(conn.GetEndPoints()[0]); + long result = server.ClientKill(id.AsInt64(), ClientType.Normal, null, true); + Assert.Equal(1, result); + } + + [Fact] + public async Task ClientKillWithMaxAge() + { + SetExpectedAmbientFailureCount(-1); + await using var otherConnection = Create(allowAdmin: true, shared: false, backlogPolicy: BacklogPolicy.FailFast, require: RedisFeatures.v7_4_0_rc1); + var id = otherConnection.GetDatabase().Execute(RedisCommand.CLIENT.ToString(), RedisLiterals.ID); + await Task.Delay(1000); + + await using var conn = Create(allowAdmin: true, shared: false, backlogPolicy: BacklogPolicy.FailFast); + var server = conn.GetServer(conn.GetEndPoints()[0]); + var filter = new ClientKillFilter().WithId(id.AsInt64()).WithMaxAgeInSeconds(1).WithSkipMe(true); + long result = server.ClientKill(filter, CommandFlags.DemandMaster); + Assert.Equal(1, result); + } + + [Fact] + public void TestClientKillMessageWithAllArguments() + { + long id = 101; + ClientType type = ClientType.Normal; + string userName = "user1"; + EndPoint endpoint = new IPEndPoint(IPAddress.Parse("127.0.0.1"), 1234); + EndPoint serverEndpoint = new IPEndPoint(IPAddress.Parse("198.0.0.1"), 6379); + bool skipMe = true; + long maxAge = 102; + + var filter = new ClientKillFilter().WithId(id).WithClientType(type).WithUsername(userName).WithEndpoint(endpoint).WithServerEndpoint(serverEndpoint).WithSkipMe(skipMe).WithMaxAgeInSeconds(maxAge); + List expected = + [ + "KILL", "ID", "101", "TYPE", "normal", "USERNAME", "user1", "ADDR", "127.0.0.1:1234", "LADDR", "198.0.0.1:6379", "SKIPME", "yes", "MAXAGE", "102", + ]; + Assert.Equal(expected, filter.ToList(true)); + } +} diff --git a/tests/StackExchange.Redis.Tests/Cluster.cs b/tests/StackExchange.Redis.Tests/Cluster.cs deleted file mode 100644 index d0ef5cd88..000000000 --- a/tests/StackExchange.Redis.Tests/Cluster.cs +++ /dev/null @@ -1,757 +0,0 @@ -using System; -using System.Collections.Generic; -using System.IO; -using System.Linq; -using System.Net; -using System.Threading; -using System.Threading.Tasks; -using StackExchange.Redis.Profiling; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - public class Cluster : TestBase - { - public Cluster(ITestOutputHelper output) : base (output) { } - - protected override string GetConfiguration() - { - var server = TestConfig.Current.ClusterServer; - return string.Join(",", - Enumerable.Range(TestConfig.Current.ClusterStartPort, TestConfig.Current.ClusterServerCount).Select(port => server + ":" + port) - ) + ",connectTimeout=10000"; - } - - [Fact] - public void ExportConfiguration() - { - if (File.Exists("cluster.zip")) File.Delete("cluster.zip"); - Assert.False(File.Exists("cluster.zip")); - using (var muxer = Create(allowAdmin: true)) - using (var file = File.Create("cluster.zip")) - { - muxer.ExportConfiguration(file); - } - Assert.True(File.Exists("cluster.zip")); - } - - [Fact] - public void ConnectUsesSingleSocket() - { - using (var sw = new StringWriter()) - { - try - { - for (int i = 0; i < 5; i++) - { - using (var muxer = Create(failMessage: i + ": ", log: sw)) - { - foreach (var ep in muxer.GetEndPoints()) - { - var srv = muxer.GetServer(ep); - var counters = srv.GetCounters(); - Log($"{i}; interactive, {ep}, count: {counters.Interactive.SocketCount}"); - Log($"{i}; subscription, {ep}, count: {counters.Subscription.SocketCount}"); - } - foreach (var ep in muxer.GetEndPoints()) - { - var srv = muxer.GetServer(ep); - var counters = srv.GetCounters(); - Assert.Equal(1, counters.Interactive.SocketCount); - Assert.Equal(1, counters.Subscription.SocketCount); - } - } - } - } - finally - { - // Connection info goes at the end... - Log(sw.ToString()); - } - } - } - - [Fact] - public void CanGetTotalStats() - { - using (var muxer = Create()) - { - var counters = muxer.GetCounters(); - Log(counters.ToString()); - } - } - - private void PrintEndpoints(EndPoint[] endpoints) - { - Log($"Endpoints Expected: {TestConfig.Current.ClusterStartPort}+{TestConfig.Current.ClusterServerCount}"); - Log("Endpoints Found:"); - foreach (var endpoint in endpoints) - { - Log(" Endpoint: " + endpoint); - } - } - - [Fact] - public void Connect() - { - var expectedPorts = new HashSet(Enumerable.Range(TestConfig.Current.ClusterStartPort, TestConfig.Current.ClusterServerCount)); - using (var sw = new StringWriter()) - using (var muxer = Create(log: sw)) - { - var endpoints = muxer.GetEndPoints(); - if (TestConfig.Current.ClusterServerCount != endpoints.Length) - { - PrintEndpoints(endpoints); - } - else - { - Log(sw.ToString()); - } - - Assert.Equal(TestConfig.Current.ClusterServerCount, endpoints.Length); - int masters = 0, replicas = 0; - var failed = new List(); - foreach (var endpoint in endpoints) - { - var server = muxer.GetServer(endpoint); - if (!server.IsConnected) - { - failed.Add(endpoint); - } - Log("endpoint:" + endpoint); - Assert.Equal(endpoint, server.EndPoint); - - Log("endpoint-type:" + endpoint); - Assert.IsType(endpoint); - - Log("port:" + endpoint); - Assert.True(expectedPorts.Remove(((IPEndPoint)endpoint).Port)); - - Log("server-type:" + endpoint); - Assert.Equal(ServerType.Cluster, server.ServerType); - - if (server.IsReplica) replicas++; - else masters++; - } - if (failed.Count != 0) - { - Log("{0} failues", failed.Count); - foreach (var fail in failed) - { - Log(fail.ToString()); - } - Assert.True(false, "not all servers connected"); - } - - Assert.Equal(TestConfig.Current.ClusterServerCount / 2, replicas); - Assert.Equal(TestConfig.Current.ClusterServerCount / 2, masters); - } - } - - [Fact] - public void TestIdentity() - { - using (var conn = Create()) - { - RedisKey key = Guid.NewGuid().ToByteArray(); - var ep = conn.GetDatabase().IdentifyEndpoint(key); - Assert.Equal(ep, conn.GetServer(ep).ClusterConfiguration.GetBySlot(key).EndPoint); - } - } - - [Fact] - public void IntentionalWrongServer() - { - string StringGet(IServer server, RedisKey key, CommandFlags flags = CommandFlags.None) - => (string)server.Execute("GET", new object[] { key }, flags); - - using (var conn = Create()) - { - var endpoints = conn.GetEndPoints(); - var servers = endpoints.Select(e => conn.GetServer(e)).ToList(); - - var key = Me(); - const string value = "abc"; - var db = conn.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.StringSet(key, value, flags: CommandFlags.FireAndForget); - servers.First().Ping(); - var config = servers.First().ClusterConfiguration; - Assert.NotNull(config); - int slot = conn.HashSlot(key); - var rightMasterNode = config.GetBySlot(key); - Assert.NotNull(rightMasterNode); - Log("Right Master: {0} {1}", rightMasterNode.EndPoint, rightMasterNode.NodeId); - - string a = StringGet(conn.GetServer(rightMasterNode.EndPoint), key); - Assert.Equal(value, a); // right master - - var node = config.Nodes.FirstOrDefault(x => !x.IsReplica && x.NodeId != rightMasterNode.NodeId); - Assert.NotNull(node); - Log("Using Master: {0}", node.EndPoint, node.NodeId); - { - string b = StringGet(conn.GetServer(node.EndPoint), key); - Assert.Equal(value, b); // wrong master, allow redirect - - var ex = Assert.Throws(() => StringGet(conn.GetServer(node.EndPoint), key, CommandFlags.NoRedirect)); - Assert.StartsWith($"Key has MOVED to Endpoint {rightMasterNode.EndPoint} and hashslot {slot}", ex.Message); - } - - node = config.Nodes.FirstOrDefault(x => x.IsReplica && x.ParentNodeId == rightMasterNode.NodeId); - Assert.NotNull(node); - { - string d = StringGet(conn.GetServer(node.EndPoint), key); - Assert.Equal(value, d); // right replica - } - - node = config.Nodes.FirstOrDefault(x => x.IsReplica && x.ParentNodeId != rightMasterNode.NodeId); - Assert.NotNull(node); - { - string e = StringGet(conn.GetServer(node.EndPoint), key); - Assert.Equal(value, e); // wrong replica, allow redirect - - var ex = Assert.Throws(() => StringGet(conn.GetServer(node.EndPoint), key, CommandFlags.NoRedirect)); - Assert.StartsWith($"Key has MOVED to Endpoint {rightMasterNode.EndPoint} and hashslot {slot}", ex.Message); - } - } - } - - [Fact] - public void TransactionWithMultiServerKeys() - { - var ex = Assert.Throws(() => - { - using (var muxer = Create()) - { - // connect - var cluster = muxer.GetDatabase(); - var anyServer = muxer.GetServer(muxer.GetEndPoints()[0]); - anyServer.Ping(); - Assert.Equal(ServerType.Cluster, anyServer.ServerType); - var config = anyServer.ClusterConfiguration; - Assert.NotNull(config); - - // invent 2 keys that we believe are served by different nodes - string x = Guid.NewGuid().ToString(), y; - var xNode = config.GetBySlot(x); - int abort = 1000; - do - { - y = Guid.NewGuid().ToString(); - } while (--abort > 0 && config.GetBySlot(y) == xNode); - if (abort == 0) Skip.Inconclusive("failed to find a different node to use"); - var yNode = config.GetBySlot(y); - Log("x={0}, served by {1}", x, xNode.NodeId); - Log("y={0}, served by {1}", y, yNode.NodeId); - Assert.NotEqual(xNode.NodeId, yNode.NodeId); - - // wipe those keys - cluster.KeyDelete(x, CommandFlags.FireAndForget); - cluster.KeyDelete(y, CommandFlags.FireAndForget); - - // create a transaction that attempts to assign both keys - var tran = cluster.CreateTransaction(); - tran.AddCondition(Condition.KeyNotExists(x)); - tran.AddCondition(Condition.KeyNotExists(y)); - _ = tran.StringSetAsync(x, "x-val"); - _ = tran.StringSetAsync(y, "y-val"); - tran.Execute(); - - Assert.True(false, "Expected single-slot rules to apply"); - // the rest no longer applies while we are following single-slot rules - - //// check that everything was aborted - //Assert.False(success, "tran aborted"); - //Assert.True(setX.IsCanceled, "set x cancelled"); - //Assert.True(setY.IsCanceled, "set y cancelled"); - //var existsX = cluster.KeyExistsAsync(x); - //var existsY = cluster.KeyExistsAsync(y); - //Assert.False(cluster.Wait(existsX), "x exists"); - //Assert.False(cluster.Wait(existsY), "y exists"); - } - }); - Assert.Equal("Multi-key operations must involve a single slot; keys can use 'hash tags' to help this, i.e. '{/users/12345}/account' and '{/users/12345}/contacts' will always be in the same slot", ex.Message); - } - - [Fact] - public void TransactionWithSameServerKeys() - { - var ex = Assert.Throws(() => - { - using (var muxer = Create()) - { - // connect - var cluster = muxer.GetDatabase(); - var anyServer = muxer.GetServer(muxer.GetEndPoints()[0]); - anyServer.Ping(); - var config = anyServer.ClusterConfiguration; - Assert.NotNull(config); - - // invent 2 keys that we believe are served by different nodes - string x = Guid.NewGuid().ToString(), y; - var xNode = config.GetBySlot(x); - int abort = 1000; - do - { - y = Guid.NewGuid().ToString(); - } while (--abort > 0 && config.GetBySlot(y) != xNode); - if (abort == 0) Skip.Inconclusive("failed to find a key with the same node to use"); - var yNode = config.GetBySlot(y); - Log("x={0}, served by {1}", x, xNode.NodeId); - Log("y={0}, served by {1}", y, yNode.NodeId); - Assert.Equal(xNode.NodeId, yNode.NodeId); - - // wipe those keys - cluster.KeyDelete(x, CommandFlags.FireAndForget); - cluster.KeyDelete(y, CommandFlags.FireAndForget); - - // create a transaction that attempts to assign both keys - var tran = cluster.CreateTransaction(); - tran.AddCondition(Condition.KeyNotExists(x)); - tran.AddCondition(Condition.KeyNotExists(y)); - _ = tran.StringSetAsync(x, "x-val"); - _ = tran.StringSetAsync(y, "y-val"); - tran.Execute(); - - Assert.True(false, "Expected single-slot rules to apply"); - // the rest no longer applies while we are following single-slot rules - - //// check that everything was aborted - //Assert.True(success, "tran aborted"); - //Assert.False(setX.IsCanceled, "set x cancelled"); - //Assert.False(setY.IsCanceled, "set y cancelled"); - //var existsX = cluster.KeyExistsAsync(x); - //var existsY = cluster.KeyExistsAsync(y); - //Assert.True(cluster.Wait(existsX), "x exists"); - //Assert.True(cluster.Wait(existsY), "y exists"); - } - }); - Assert.Equal("Multi-key operations must involve a single slot; keys can use 'hash tags' to help this, i.e. '{/users/12345}/account' and '{/users/12345}/contacts' will always be in the same slot", ex.Message); - } - - [Fact] - public void TransactionWithSameSlotKeys() - { - using (var muxer = Create()) - { - // connect - var cluster = muxer.GetDatabase(); - var anyServer = muxer.GetServer(muxer.GetEndPoints()[0]); - anyServer.Ping(); - var config = anyServer.ClusterConfiguration; - Assert.NotNull(config); - - // invent 2 keys that we believe are in the same slot - var guid = Guid.NewGuid().ToString(); - string x = "/{" + guid + "}/foo", y = "/{" + guid + "}/bar"; - - Assert.Equal(muxer.HashSlot(x), muxer.HashSlot(y)); - var xNode = config.GetBySlot(x); - var yNode = config.GetBySlot(y); - Log("x={0}, served by {1}", x, xNode.NodeId); - Log("y={0}, served by {1}", y, yNode.NodeId); - Assert.Equal(xNode.NodeId, yNode.NodeId); - - // wipe those keys - cluster.KeyDelete(x, CommandFlags.FireAndForget); - cluster.KeyDelete(y, CommandFlags.FireAndForget); - - // create a transaction that attempts to assign both keys - var tran = cluster.CreateTransaction(); - tran.AddCondition(Condition.KeyNotExists(x)); - tran.AddCondition(Condition.KeyNotExists(y)); - var setX = tran.StringSetAsync(x, "x-val"); - var setY = tran.StringSetAsync(y, "y-val"); - bool success = tran.Execute(); - - // check that everything was aborted - Assert.True(success, "tran aborted"); - Assert.False(setX.IsCanceled, "set x cancelled"); - Assert.False(setY.IsCanceled, "set y cancelled"); - var existsX = cluster.KeyExistsAsync(x); - var existsY = cluster.KeyExistsAsync(y); - Assert.True(cluster.Wait(existsX), "x exists"); - Assert.True(cluster.Wait(existsY), "y exists"); - } - } - - [Theory] - [InlineData(null, 10)] - [InlineData(null, 100)] - [InlineData("abc", 10)] - [InlineData("abc", 100)] - - public void Keys(string pattern, int pageSize) - { - using (var conn = Create(allowAdmin: true)) - { - _ = conn.GetDatabase(); - var server = conn.GetEndPoints().Select(x => conn.GetServer(x)).First(x => !x.IsReplica); - server.FlushAllDatabases(); - try - { - Assert.False(server.Keys(pattern: pattern, pageSize: pageSize).Any()); - Log("Complete: '{0}' / {1}", pattern, pageSize); - } - catch - { - Log("Failed: '{0}' / {1}", pattern, pageSize); - throw; - } - } - } - - [Theory] - [InlineData("", 0)] - [InlineData("abc", 7638)] - [InlineData("{abc}", 7638)] - [InlineData("abcdef", 15101)] - [InlineData("abc{abc}def", 7638)] - [InlineData("c", 7365)] - [InlineData("g", 7233)] - [InlineData("d", 11298)] - - [InlineData("user1000", 3443)] - [InlineData("{user1000}", 3443)] - [InlineData("abc{user1000}", 3443)] - [InlineData("abc{user1000}def", 3443)] - [InlineData("{user1000}.following", 3443)] - [InlineData("{user1000}.followers", 3443)] - - [InlineData("foo{}{bar}", 8363)] - - [InlineData("foo{{bar}}zap", 4015)] - [InlineData("{bar", 4015)] - - [InlineData("foo{bar}{zap}", 5061)] - [InlineData("bar", 5061)] - - public void HashSlots(string key, int slot) - { - using (var muxer = Create(connectTimeout: 5000)) - { - Assert.Equal(slot, muxer.HashSlot(key)); - } - } - - [Fact] - public void SScan() - { - using (var conn = Create()) - { - RedisKey key = "a"; - var db = conn.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - - int totalUnfiltered = 0, totalFiltered = 0; - for (int i = 0; i < 1000; i++) - { - db.SetAdd(key, i, CommandFlags.FireAndForget); - totalUnfiltered += i; - if (i.ToString().Contains("3")) totalFiltered += i; - } - var unfilteredActual = db.SetScan(key).Select(x => (int)x).Sum(); - var filteredActual = db.SetScan(key, "*3*").Select(x => (int)x).Sum(); - Assert.Equal(totalUnfiltered, unfilteredActual); - Assert.Equal(totalFiltered, filteredActual); - } - } - - [Fact] - public void GetConfig() - { - using (var sw = new StringWriter()) - using (var muxer = Create(allowAdmin: true, log: sw)) - { - var endpoints = muxer.GetEndPoints(); - var server = muxer.GetServer(endpoints[0]); - var nodes = server.ClusterNodes(); - - Log("Endpoints:"); - foreach (var endpoint in endpoints) - { - Log(endpoint.ToString()); - } - Log("Nodes:"); - foreach (var node in nodes.Nodes.OrderBy(x => x)) - { - Log(node.ToString()); - } - Log(sw.ToString()); - - Assert.Equal(TestConfig.Current.ClusterServerCount, endpoints.Length); - Assert.Equal(TestConfig.Current.ClusterServerCount, nodes.Nodes.Count); - } - } - - [Fact] - public void AccessRandomKeys() - { - using (var conn = Create(allowAdmin: true)) - { - var cluster = conn.GetDatabase(); - int slotMovedCount = 0; - conn.HashSlotMoved += (s, a) => - { - Log("{0} moved from {1} to {2}", a.HashSlot, Describe(a.OldEndPoint), Describe(a.NewEndPoint)); - Interlocked.Increment(ref slotMovedCount); - }; - var pairs = new Dictionary(); - const int COUNT = 500; - int index = 0; - - var servers = conn.GetEndPoints().Select(x => conn.GetServer(x)).ToList(); - foreach (var server in servers) - { - if (!server.IsReplica) - { - server.Ping(); - server.FlushAllDatabases(); - } - } - - for (int i = 0; i < COUNT; i++) - { - var key = Guid.NewGuid().ToString(); - var value = Guid.NewGuid().ToString(); - pairs.Add(key, value); - cluster.StringSet(key, value, flags: CommandFlags.FireAndForget); - } - - var expected = new string[COUNT]; - var actual = new Task[COUNT]; - index = 0; - foreach (var pair in pairs) - { - expected[index] = pair.Value; - actual[index] = cluster.StringGetAsync(pair.Key); - index++; - } - cluster.WaitAll(actual); - for (int i = 0; i < COUNT; i++) - { - Assert.Equal(expected[i], actual[i].Result); - } - - int total = 0; - Parallel.ForEach(servers, server => - { - if (!server.IsReplica) - { - int count = server.Keys(pageSize: 100).Count(); - Log("{0} has {1} keys", server.EndPoint, count); - Interlocked.Add(ref total, count); - } - }); - - foreach (var server in servers) - { - var counters = server.GetCounters(); - Log(counters.ToString()); - } - int final = Interlocked.CompareExchange(ref total, 0, 0); - Assert.Equal(COUNT, final); - Assert.Equal(0, Interlocked.CompareExchange(ref slotMovedCount, 0, 0)); - } - } - - [Theory] - [InlineData(CommandFlags.DemandMaster, false)] - [InlineData(CommandFlags.DemandReplica, true)] - [InlineData(CommandFlags.PreferMaster, false)] - [InlineData(CommandFlags.PreferReplica, true)] - public void GetFromRightNodeBasedOnFlags(CommandFlags flags, bool isReplica) - { - using (var muxer = Create(allowAdmin: true)) - { - var db = muxer.GetDatabase(); - for (int i = 0; i < 1000; i++) - { - var key = Guid.NewGuid().ToString(); - var endpoint = db.IdentifyEndpoint(key, flags); - var server = muxer.GetServer(endpoint); - Assert.Equal(isReplica, server.IsReplica); - } - } - } - - private static string Describe(EndPoint endpoint) => endpoint?.ToString() ?? "(unknown)"; - - [Fact] - public void SimpleProfiling() - { - using (var conn = Create(log: Writer)) - { - var profiler = new ProfilingSession(); - var key = Me(); - var db = conn.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - - conn.RegisterProfiler(() => profiler); - db.StringSet(key, "world"); - var val = db.StringGet(key); - Assert.Equal("world", val); - - var msgs = profiler.FinishProfiling().Where(m => m.Command == "GET" || m.Command == "SET").ToList(); - foreach (var msg in msgs) - { - Log("Profiler Message: " + Environment.NewLine + msg); - } - Log("Checking GET..."); - Assert.Contains(msgs, m => m.Command == "GET"); - Log("Checking SET..."); - Assert.Contains(msgs, m => m.Command == "SET"); - Assert.Equal(2, msgs.Count(m => m.RetransmissionOf is null)); - - var arr = msgs.Where(m => m.RetransmissionOf is null).ToArray(); - Assert.Equal("SET", arr[0].Command); - Assert.Equal("GET", arr[1].Command); - } - } - - [Fact] - public void MultiKeyQueryFails() - { - var keys = InventKeys(); // note the rules expected of this data are enforced in GroupedQueriesWork - - using (var conn = Create()) - { - var ex = Assert.Throws(() => conn.GetDatabase(0).StringGet(keys)); - Assert.Contains("Multi-key operations must involve a single slot", ex.Message); - } - } - - private static RedisKey[] InventKeys() - { - RedisKey[] keys = new RedisKey[256]; - Random rand = new Random(12324); - string InventString() - { - const string alphabet = "abcdefghijklmnopqrstuvwxyz012345689"; - var len = rand.Next(10, 50); - char[] chars = new char[len]; - for (int i = 0; i < len; i++) - chars[i] = alphabet[rand.Next(alphabet.Length)]; - return new string(chars); - } - - for (int i = 0; i < keys.Length; i++) - { - keys[i] = InventString(); - } - return keys; - } - - [Fact] - public void GroupedQueriesWork() - { - // note it doesn't matter that the data doesn't exist for this; - // the point here is that the entire thing *won't work* otherwise, - // as per above test - - var keys = InventKeys(); - using (var conn = Create()) - { - var grouped = keys.GroupBy(key => conn.GetHashSlot(key)).ToList(); - Assert.True(grouped.Count > 1); // check not all a super-group - Assert.True(grouped.Count < keys.Length); // check not all singleton groups - Assert.Equal(keys.Length, grouped.Sum(x => x.Count())); // check they're all there - Assert.Contains(grouped, x => x.Count() > 1); // check at least one group with multiple items (redundant from above, but... meh) - - Log($"{grouped.Count} groups, min: {grouped.Min(x => x.Count())}, max: {grouped.Max(x => x.Count())}, avg: {grouped.Average(x => x.Count())}"); - - var db = conn.GetDatabase(0); - var all = grouped.SelectMany(grp => { - var grpKeys = grp.ToArray(); - var values = db.StringGet(grpKeys); - return grpKeys.Zip(values, (key, val) => new { key, val }); - }).ToDictionary(x => x.key, x => x.val); - - Assert.Equal(keys.Length, all.Count); - } - } - - [Fact] - public void MovedProfiling() - { - var Key = Me(); - const string Value = "redirected-value"; - - var profiler = new Profiling.PerThreadProfiler(); - - using (var conn = Create()) - { - conn.RegisterProfiler(profiler.GetSession); - - var endpoints = conn.GetEndPoints(); - var servers = endpoints.Select(e => conn.GetServer(e)); - - var db = conn.GetDatabase(); - db.KeyDelete(Key); - db.StringSet(Key, Value); - var config = servers.First().ClusterConfiguration; - Assert.NotNull(config); - - //int slot = conn.HashSlot(Key); - var rightMasterNode = config.GetBySlot(Key); - Assert.NotNull(rightMasterNode); - - string a = (string)conn.GetServer(rightMasterNode.EndPoint).Execute("GET", Key); - Assert.Equal(Value, a); // right master - - var wrongMasterNode = config.Nodes.FirstOrDefault(x => !x.IsReplica && x.NodeId != rightMasterNode.NodeId); - Assert.NotNull(wrongMasterNode); - - string b = (string)conn.GetServer(wrongMasterNode.EndPoint).Execute("GET", Key); - Assert.Equal(Value, b); // wrong master, allow redirect - - var msgs = profiler.GetSession().FinishProfiling().ToList(); - - // verify that things actually got recorded properly, and the retransmission profilings are connected as expected - { - // expect 1 DEL, 1 SET, 1 GET (to right master), 1 GET (to wrong master) that was responded to by an ASK, and 1 GET (to right master or a replica of it) - Assert.Equal(5, msgs.Count); - Assert.Equal(1, msgs.Count(c => c.Command == "DEL" || c.Command == "UNLINK")); - Assert.Equal(1, msgs.Count(c => c.Command == "SET")); - Assert.Equal(3, msgs.Count(c => c.Command == "GET")); - - var toRightMasterNotRetransmission = msgs.Where(m => m.Command == "GET" && m.EndPoint.Equals(rightMasterNode.EndPoint) && m.RetransmissionOf == null); - Assert.Single(toRightMasterNotRetransmission); - - var toWrongMasterWithoutRetransmission = msgs.Where(m => m.Command == "GET" && m.EndPoint.Equals(wrongMasterNode.EndPoint) && m.RetransmissionOf == null).ToList(); - Assert.Single(toWrongMasterWithoutRetransmission); - - var toRightMasterOrReplicaAsRetransmission = msgs.Where(m => m.Command == "GET" && (m.EndPoint.Equals(rightMasterNode.EndPoint) || rightMasterNode.Children.Any(c => m.EndPoint.Equals(c.EndPoint))) && m.RetransmissionOf != null).ToList(); - Assert.Single(toRightMasterOrReplicaAsRetransmission); - - var originalWrongMaster = toWrongMasterWithoutRetransmission.Single(); - var retransmissionToRight = toRightMasterOrReplicaAsRetransmission.Single(); - - Assert.True(ReferenceEquals(originalWrongMaster, retransmissionToRight.RetransmissionOf)); - } - - foreach (var msg in msgs) - { - Assert.True(msg.CommandCreated != default(DateTime)); - Assert.True(msg.CreationToEnqueued > TimeSpan.Zero); - Assert.True(msg.EnqueuedToSending > TimeSpan.Zero); - Assert.True(msg.SentToResponse > TimeSpan.Zero); - Assert.True(msg.ResponseToCompletion >= TimeSpan.Zero); // this can be immeasurably fast - Assert.True(msg.ElapsedTime > TimeSpan.Zero); - - if (msg.RetransmissionOf != null) - { - // imprecision of DateTime.UtcNow makes this pretty approximate - Assert.True(msg.RetransmissionOf.CommandCreated <= msg.CommandCreated); - Assert.Equal(RetransmissionReasonType.Moved, msg.RetransmissionReason.Value); - } - else - { - Assert.False(msg.RetransmissionReason.HasValue); - } - } - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/ClusterShardedTests.cs b/tests/StackExchange.Redis.Tests/ClusterShardedTests.cs new file mode 100644 index 000000000..c9101fb08 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/ClusterShardedTests.cs @@ -0,0 +1,380 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +[RunPerProtocol] +[Collection(NonParallelCollection.Name)] +public class ClusterShardedTests(ITestOutputHelper output) : TestBase(output) +{ + protected override string GetConfiguration() => TestConfig.Current.ClusterServersAndPorts + ",connectTimeout=10000"; + + [Fact] + public async Task TestShardedPubsubSubscriberAgainstReconnects() + { + Skip.UnlessLongRunning(); + var channel = RedisChannel.Sharded(Me()); + await using var conn = Create(allowAdmin: true, keepAlive: 1, connectTimeout: 3000, shared: false, require: RedisFeatures.v7_0_0_rc1); + Assert.True(conn.IsConnected); + var db = conn.GetDatabase(); + Assert.Equal(0, await db.PublishAsync(channel, "noClientReceivesThis")); + await Task.Delay(50); // let the sub settle (this isn't needed on RESP3, note) + + var pubsub = conn.GetSubscriber(); + List<(RedisChannel, RedisValue)> received = []; + var queue = await pubsub.SubscribeAsync(channel); + _ = Task.Run(async () => + { + // use queue API to have control over order + await foreach (var item in queue) + { + lock (received) + { + if (item.Channel.IsSharded && item.Channel == channel) received.Add((item.Channel, item.Message)); + } + } + }); + Assert.Equal(1, conn.GetSubscriptionsCount()); + + await Task.Delay(50); // let the sub settle (this isn't needed on RESP3, note) + await db.PingAsync(); + + for (int i = 0; i < 5; i++) + { + // check we get a hit + Assert.Equal(1, await db.PublishAsync(channel, i.ToString())); + } + await Task.Delay(50); // let the sub settle (this isn't needed on RESP3, note) + + // this is endpoint at index 1 which has the hashslot for "testShardChannel" + var server = conn.GetServer(conn.GetEndPoints()[1]); + server.SimulateConnectionFailure(SimulatedFailureType.All); + SetExpectedAmbientFailureCount(2); + + await Task.Delay(4000); + for (int i = 0; i < 5; i++) + { + // check we get a hit + Assert.Equal(1, await db.PublishAsync(channel, i.ToString())); + } + await Task.Delay(50); // let the sub settle (this isn't needed on RESP3, note) + + Assert.Equal(1, conn.GetSubscriptionsCount()); + Assert.Equal(10, received.Count); + ClearAmbientFailures(); + } + + [Fact] + public async Task TestShardedPubsubSubscriberAgainsHashSlotMigration() + { + Skip.UnlessLongRunning(); + var channel = RedisChannel.Sharded(Me()); // invent a channel that will use SSUBSCRIBE + var key = (RedisKey)(byte[])channel!; // use the same value as a key, to test keyspace notifications via a single-key API + await using var conn = Create(allowAdmin: true, keepAlive: 1, connectTimeout: 3000, shared: false, require: RedisFeatures.v7_0_0_rc1); + Assert.True(conn.IsConnected); + var db = conn.GetDatabase(); + Assert.Equal(0, await db.PublishAsync(channel, "noClientReceivesThis")); + await Task.Delay(50); // let the sub settle (this isn't needed on RESP3, note) + + var pubsub = conn.GetSubscriber(); + var keynotify = RedisChannel.KeySpaceSingleKey(key, db.Database); + Assert.False(keynotify.IsSharded); // keyspace notifications do not use SSUBSCRIBE; this matters, because it means we don't get nuked when the slot migrates + Assert.False(keynotify.IsMultiNode); // we specificially want this *not* to be multi-node; we want to test that it follows the key correctly + + int keynotificationCount = 0; + await pubsub.SubscribeAsync(keynotify, (_, _) => Interlocked.Increment(ref keynotificationCount)); + try + { + List<(RedisChannel, RedisValue)> received = []; + var queue = await pubsub.SubscribeAsync(channel); + _ = Task.Run(async () => + { + // use queue API to have control over order + await foreach (var item in queue) + { + lock (received) + { + if (item.Channel.IsSharded && item.Channel == channel) + received.Add((item.Channel, item.Message)); + } + } + }); + Assert.Equal(2, conn.GetSubscriptionsCount()); + + await Task.Delay(50); // let the sub settle (this isn't needed on RESP3, note) + await db.PingAsync(); + + for (int i = 0; i < 5; i++) + { + // check we get a hit + Assert.Equal(1, await db.PublishAsync(channel, i.ToString())); + await db.StringIncrementAsync(key); + } + + await Task.Delay(50); // let the sub settle (this isn't needed on RESP3, note) + + // lets migrate the slot for "testShardChannel" to another node + await DoHashSlotMigrationAsync(); + + await Task.Delay(4000); + for (int i = 0; i < 5; i++) + { + // check we get a hit + Assert.Equal(1, await db.PublishAsync(channel, i.ToString())); + await db.StringIncrementAsync(key); + } + + await Task.Delay(50); // let the sub settle (this isn't needed on RESP3, note) + + Assert.Equal(2, conn.GetSubscriptionsCount()); + Assert.Equal(10, received.Count); + Assert.Equal(10, Volatile.Read(ref keynotificationCount)); + await RollbackHashSlotMigrationAsync(); + ClearAmbientFailures(); + } + finally + { + try + { + // ReSharper disable once MethodHasAsyncOverload - F+F + await pubsub.UnsubscribeAsync(keynotify, flags: CommandFlags.FireAndForget); + await pubsub.UnsubscribeAsync(channel, flags: CommandFlags.FireAndForget); + Log("Channels unsubscribed."); + } + catch (Exception ex) + { + Log($"Error while unsubscribing: {ex.Message}"); + } + } + } + + private Task DoHashSlotMigrationAsync() => MigrateSlotForTestShardChannelAsync(false); + private Task RollbackHashSlotMigrationAsync() => MigrateSlotForTestShardChannelAsync(true); + + private async Task MigrateSlotForTestShardChannelAsync(bool rollback) + { + int hashSlotForTestShardChannel = 7177; + await using var conn = Create(allowAdmin: true, keepAlive: 1, connectTimeout: 5000, shared: false); + var servers = conn.GetServers(); + IServer? serverWithPort7000 = null; + IServer? serverWithPort7001 = null; + + string nodeIdForPort7000 = "780813af558af81518e58e495d63b6e248e80adf"; + string nodeIdForPort7001 = "ea828c6074663c8bd4e705d3e3024d9d1721ef3b"; + foreach (var server in servers) + { + string id = server.Execute("CLUSTER", "MYID").ToString(); + if (id == nodeIdForPort7000) + { + serverWithPort7000 = server; + } + if (id == nodeIdForPort7001) + { + serverWithPort7001 = server; + } + } + + IServer fromServer, toServer; + string fromNode, toNode; + if (rollback) + { + fromServer = serverWithPort7000!; + fromNode = nodeIdForPort7000; + toServer = serverWithPort7001!; + toNode = nodeIdForPort7001; + } + else + { + fromServer = serverWithPort7001!; + fromNode = nodeIdForPort7001; + toServer = serverWithPort7000!; + toNode = nodeIdForPort7000; + } + + try + { + Assert.Equal("OK", toServer.Execute("CLUSTER", "SETSLOT", hashSlotForTestShardChannel, "IMPORTING", fromNode).ToString()); + Assert.Equal("OK", fromServer.Execute("CLUSTER", "SETSLOT", hashSlotForTestShardChannel, "MIGRATING", toNode).ToString()); + Assert.Equal("OK", toServer.Execute("CLUSTER", "SETSLOT", hashSlotForTestShardChannel, "NODE", toNode).ToString()); + Assert.Equal("OK", fromServer!.Execute("CLUSTER", "SETSLOT", hashSlotForTestShardChannel, "NODE", toNode).ToString()); + } + catch (RedisServerException ex) when (ex.Message == "ERR I'm already the owner of hash slot 7177") + { + Log("Slot already migrated."); + } + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task SubscribeToWrongServerAsync(bool sharded) + { + // the purpose of this test is to simulate subscribing while a node move is happening, i.e. we send + // the SSUBSCRIBE to the wrong server, get a -MOVED, and redirect; in particular: do we end up *knowing* + // where we actually subscribed to? + // + // note: to check our thinking, we also do this for regular non-sharded channels too; the point here + // being that this should behave *differently*, since there will be no -MOVED + var name = $"{Me()}:{Guid.NewGuid()}"; + var channel = sharded ? RedisChannel.Sharded(name) : RedisChannel.Literal(name).WithKeyRouting(); + await using var conn = Create(require: RedisFeatures.v7_0_0_rc1); + + var asKey = (RedisKey)(byte[])channel!; + Assert.False(asKey.IsEmpty); + var shouldBeServer = conn.GetServer(asKey); // this is where it *should* go + + // now intentionally choose *a different* server + var server = conn.GetServers().First(s => !Equals(s.EndPoint, shouldBeServer.EndPoint)); + Log($"Should be {Format.ToString(shouldBeServer.EndPoint)}; routing via {Format.ToString(server.EndPoint)}"); + + var subscriber = Assert.IsType(conn.GetSubscriber()); + var serverEndpoint = conn.GetServerEndPoint(server.EndPoint); + Assert.Equal(server.EndPoint, serverEndpoint.EndPoint); + var queue = await subscriber.SubscribeAsync(channel, server: serverEndpoint); + await Task.Delay(50); + var actual = subscriber.SubscribedEndpoint(channel); + + if (sharded) + { + // we should end up at the correct node, following the -MOVED + Assert.Equal(shouldBeServer.EndPoint, actual); + } + else + { + // we should end up where we *actually sent the message* - there is no -MOVED + Assert.Equal(serverEndpoint.EndPoint, actual); + } + + Log("Unsubscribing..."); + await queue.UnsubscribeAsync(); + Log("Unsubscribed."); + } + + [Fact] + public async Task KeepSubscribedThroughSlotMigrationAsync() + { + await using var conn = Create(require: RedisFeatures.v7_0_0_rc1, allowAdmin: true); + var name = $"{Me()}:{Guid.NewGuid()}"; + var channel = RedisChannel.Sharded(name); + var subscriber = conn.GetSubscriber(); + var queue = await subscriber.SubscribeAsync(channel); + await Task.Delay(50); + var actual = subscriber.SubscribedEndpoint(channel); + Assert.NotNull(actual); + + var asKey = (RedisKey)(byte[])channel!; + Assert.False(asKey.IsEmpty); + var slot = conn.GetHashSlot(asKey); + var viaMap = conn.ServerSelectionStrategy.Select(slot, RedisCommand.SSUBSCRIBE, CommandFlags.None, allowDisconnected: false); + + Log($"Slot {slot}, subscribed to {Format.ToString(actual)} (mapped to {Format.ToString(viaMap?.EndPoint)})"); + Assert.NotNull(viaMap); + Assert.Equal(actual, viaMap.EndPoint); + + var oldServer = conn.GetServer(asKey); // this is where it *should* go + + using (var timeout = new CancellationTokenSource(TimeSpan.FromSeconds(5))) + { + // now publish... we *expect* things to have sorted themselves out + var msg = Guid.NewGuid().ToString(); + var count = await subscriber.PublishAsync(channel, msg); + Assert.Equal(1, count); + + Log("Waiting for message on original subscription..."); + var received = await queue.ReadAsync(timeout.Token); + Log($"Message received: {received.Message}"); + Assert.Equal(msg, (string)received.Message!); + } + + // now intentionally choose *a different* server + var newServer = conn.GetServers().First(s => !Equals(s.EndPoint, oldServer.EndPoint)); + + var nodes = await newServer.ClusterNodesAsync(); + Assert.NotNull(nodes); + var fromNode = nodes[oldServer.EndPoint]?.NodeId; + var toNode = nodes[newServer.EndPoint]?.NodeId; + Assert.NotNull(fromNode); + Assert.NotNull(toNode); + Assert.Equal(oldServer.EndPoint, nodes.GetBySlot(slot)?.EndPoint); + + var ep = subscriber.SubscribedEndpoint(channel); + Log($"Endpoint before migration: {Format.ToString(ep)}"); + Log($"Migrating slot {slot} to {Format.ToString(newServer.EndPoint)}; node {fromNode} -> {toNode}..."); + + // see https://redis.io/docs/latest/commands/cluster-setslot/#redis-cluster-live-resharding-explained + WriteLog("IMPORTING", await newServer.ExecuteAsync("CLUSTER", "SETSLOT", slot, "IMPORTING", fromNode)); + WriteLog("MIGRATING", await oldServer.ExecuteAsync("CLUSTER", "SETSLOT", slot, "MIGRATING", toNode)); + + while (true) + { + var keys = (await oldServer.ExecuteAsync("CLUSTER", "GETKEYSINSLOT", slot, 100)).AsRedisKeyArray()!; + Log($"Migrating {keys.Length} keys..."); + if (keys.Length == 0) break; + foreach (var key in keys) + { + await conn.GetDatabase().KeyMigrateAsync(key, newServer.EndPoint, migrateOptions: MigrateOptions.None); + } + } + + WriteLog("NODE (old)", await newServer.ExecuteAsync("CLUSTER", "SETSLOT", slot, "NODE", toNode)); + WriteLog("NODE (new)", await oldServer.ExecuteAsync("CLUSTER", "SETSLOT", slot, "NODE", toNode)); + + void WriteLog(string caption, RedisResult result) + { + if (result.IsNull) + { + Log($"{caption}: null"); + } + else if (result.Length >= 0) + { + var arr = result.AsRedisValueArray()!; + Log($"{caption}: {arr.Length} items"); + foreach (var item in arr) + { + Log($" {item}"); + } + } + else + { + Log($"{caption}: {result}"); + } + } + + Log("Migration initiated; checking node state..."); + await Task.Delay(100); + ep = subscriber.SubscribedEndpoint(channel); + Log($"Endpoint after migration: {Format.ToString(ep)}"); + Assert.True( + ep is null || ep == newServer.EndPoint, + "Target server after migration should be null or the new server"); + + nodes = await newServer.ClusterNodesAsync(); + Assert.NotNull(nodes); + Assert.Equal(newServer.EndPoint, nodes.GetBySlot(slot)?.EndPoint); + await conn.ConfigureAsync(); + Assert.Equal(newServer, conn.GetServer(asKey)); + + using (var timeout = new CancellationTokenSource(TimeSpan.FromSeconds(5))) + { + // now publish... we *expect* things to have sorted themselves out + var msg = Guid.NewGuid().ToString(); + var count = await subscriber.PublishAsync(channel, msg); + Assert.Equal(1, count); + + Log("Waiting for message on moved subscription..."); + var received = await queue.ReadAsync(timeout.Token); + Log($"Message received: {received.Message}"); + Assert.Equal(msg, (string)received.Message!); + ep = subscriber.SubscribedEndpoint(channel); + Log($"Endpoint after receiving message: {Format.ToString(ep)}"); + } + + Log("Unsubscribing..."); + await queue.UnsubscribeAsync(); + Log("Unsubscribed."); + } +} diff --git a/tests/StackExchange.Redis.Tests/ClusterTests.cs b/tests/StackExchange.Redis.Tests/ClusterTests.cs new file mode 100644 index 000000000..781b65fef --- /dev/null +++ b/tests/StackExchange.Redis.Tests/ClusterTests.cs @@ -0,0 +1,849 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Net; +using System.Threading; +using System.Threading.Tasks; +using StackExchange.Redis.Profiling; +using Xunit; + +namespace StackExchange.Redis.Tests; + +[RunPerProtocol] +public class ClusterTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + protected override string GetConfiguration() => TestConfig.Current.ClusterServersAndPorts + ",connectTimeout=10000"; + + [Fact] + public async Task ExportConfiguration() + { + if (File.Exists("cluster.zip")) File.Delete("cluster.zip"); + Assert.False(File.Exists("cluster.zip")); + await using (var conn = Create(allowAdmin: true)) + using (var file = File.Create("cluster.zip")) + { + conn.ExportConfiguration(file); + } + Assert.True(File.Exists("cluster.zip")); + } + + [Fact] + public async Task ConnectUsesSingleSocket() + { + for (int i = 0; i < 5; i++) + { + await using var conn = Create(failMessage: i + ": ", log: Writer); + + foreach (var ep in conn.GetEndPoints()) + { + var srv = conn.GetServer(ep); + var counters = srv.GetCounters(); + Log($"{i}; interactive, {ep}, count: {counters.Interactive.SocketCount}"); + Log($"{i}; subscription, {ep}, count: {counters.Subscription.SocketCount}"); + } + foreach (var ep in conn.GetEndPoints()) + { + var srv = conn.GetServer(ep); + var counters = srv.GetCounters(); + Assert.Equal(1, counters.Interactive.SocketCount); + Assert.Equal(TestContext.Current.IsResp3() ? 0 : 1, counters.Subscription.SocketCount); + } + } + } + + [Fact] + public async Task CanGetTotalStats() + { + await using var conn = Create(); + + var counters = conn.GetCounters(); + Log(counters.ToString()); + } + + private void PrintEndpoints(EndPoint[] endpoints) + { + Log($"Endpoints Expected: {TestConfig.Current.ClusterStartPort}+{TestConfig.Current.ClusterServerCount}"); + Log("Endpoints Found:"); + foreach (var endpoint in endpoints) + { + Log(" Endpoint: " + endpoint); + } + } + + [Fact] + public async Task Connect() + { + await using var conn = Create(log: Writer); + + var expectedPorts = new HashSet(Enumerable.Range(TestConfig.Current.ClusterStartPort, TestConfig.Current.ClusterServerCount)); + var endpoints = conn.GetEndPoints(); + if (TestConfig.Current.ClusterServerCount != endpoints.Length) + { + PrintEndpoints(endpoints); + } + + Assert.Equal(TestConfig.Current.ClusterServerCount, endpoints.Length); + int primaries = 0, replicas = 0; + var failed = new List(); + foreach (var endpoint in endpoints) + { + var server = conn.GetServer(endpoint); + if (!server.IsConnected) + { + failed.Add(endpoint); + } + Log("endpoint:" + endpoint); + Assert.Equal(endpoint, server.EndPoint); + + Log("endpoint-type:" + endpoint); + Assert.IsType(endpoint); + + Log("port:" + endpoint); + Assert.True(expectedPorts.Remove(((IPEndPoint)endpoint).Port)); + + Log("server-type:" + endpoint); + Assert.Equal(ServerType.Cluster, server.ServerType); + + if (server.IsReplica) replicas++; + else primaries++; + } + if (failed.Count != 0) + { + Log("{0} failues", failed.Count); + foreach (var fail in failed) + { + Log(fail.ToString()); + } + Assert.Fail("not all servers connected"); + } + + Assert.Equal(TestConfig.Current.ClusterServerCount / 2, replicas); + Assert.Equal(TestConfig.Current.ClusterServerCount / 2, primaries); + } + + [Fact] + public async Task TestIdentity() + { + await using var conn = Create(); + + RedisKey key = Guid.NewGuid().ToByteArray(); + var ep = conn.GetDatabase().IdentifyEndpoint(key); + Assert.NotNull(ep); + Assert.Equal(ep, conn.GetServer(ep).ClusterConfiguration?.GetBySlot(key)?.EndPoint); + } + + [Fact] + public async Task IntentionalWrongServer() + { + static string? StringGet(IServer server, RedisKey key, CommandFlags flags = CommandFlags.None) + => (string?)server.Execute("GET", [key], flags); + + await using var conn = Create(); + + var endpoints = conn.GetEndPoints(); + var servers = endpoints.Select(e => conn.GetServer(e)).ToList(); + + var key = Me(); + const string value = "abc"; + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.StringSet(key, value, flags: CommandFlags.FireAndForget); + await servers[0].PingAsync(); + var config = servers[0].ClusterConfiguration; + Assert.NotNull(config); + int slot = conn.HashSlot(key); + var rightPrimaryNode = config.GetBySlot(key); + Assert.NotNull(rightPrimaryNode); + Log($"Right Primary: {rightPrimaryNode.EndPoint} {rightPrimaryNode.NodeId}"); + + Assert.NotNull(rightPrimaryNode.EndPoint); + string? a = StringGet(conn.GetServer(rightPrimaryNode.EndPoint), key); + Assert.Equal(value, a); // right primary + + var node = config.Nodes.FirstOrDefault(x => !x.IsReplica && x.NodeId != rightPrimaryNode.NodeId); + Assert.NotNull(node); + Log($"Using Primary: {node.EndPoint} {node.NodeId}"); + { + Assert.NotNull(node.EndPoint); + string? b = StringGet(conn.GetServer(node.EndPoint), key); + Assert.Equal(value, b); // wrong primary, allow redirect + + var ex = Assert.Throws(() => StringGet(conn.GetServer(node.EndPoint), key, CommandFlags.NoRedirect)); + Assert.StartsWith($"Key has MOVED to Endpoint {rightPrimaryNode.EndPoint} and hashslot {slot}", ex.Message); + } + + node = config.Nodes.FirstOrDefault(x => x.IsReplica && x.ParentNodeId == rightPrimaryNode.NodeId); + Assert.NotNull(node); + { + Assert.NotNull(node.EndPoint); + string? d = StringGet(conn.GetServer(node.EndPoint), key); + Assert.Equal(value, d); // right replica + } + + node = config.Nodes.FirstOrDefault(x => x.IsReplica && x.ParentNodeId != rightPrimaryNode.NodeId); + Assert.NotNull(node); + { + Assert.NotNull(node.EndPoint); + string? e = StringGet(conn.GetServer(node.EndPoint), key); + Assert.Equal(value, e); // wrong replica, allow redirect + + var ex = Assert.Throws(() => StringGet(conn.GetServer(node.EndPoint), key, CommandFlags.NoRedirect)); + Assert.StartsWith($"Key has MOVED to Endpoint {rightPrimaryNode.EndPoint} and hashslot {slot}", ex.Message); + } + } + + [Fact] + public async Task TransactionWithMultiServerKeys() + { + await using var conn = Create(); + var ex = await Assert.ThrowsAsync(async () => + { + // connect + var cluster = conn.GetDatabase(); + var anyServer = conn.GetServer(conn.GetEndPoints()[0]); + await anyServer.PingAsync(); + Assert.Equal(ServerType.Cluster, anyServer.ServerType); + var config = anyServer.ClusterConfiguration; + Assert.NotNull(config); + + // invent 2 keys that we believe are served by different nodes + string x = Guid.NewGuid().ToString(), y; + var xNode = config.GetBySlot(x); + Assert.NotNull(xNode); + int abort = 1000; + do + { + y = Guid.NewGuid().ToString(); + } + while (--abort > 0 && config.GetBySlot(y) == xNode); + if (abort == 0) Assert.Skip("failed to find a different node to use"); + var yNode = config.GetBySlot(y); + Assert.NotNull(yNode); + Log("x={0}, served by {1}", x, xNode.NodeId); + Log("y={0}, served by {1}", y, yNode.NodeId); + Assert.NotEqual(xNode.NodeId, yNode.NodeId); + + // wipe those keys + cluster.KeyDelete(x, CommandFlags.FireAndForget); + cluster.KeyDelete(y, CommandFlags.FireAndForget); + + // create a transaction that attempts to assign both keys + var tran = cluster.CreateTransaction(); + tran.AddCondition(Condition.KeyNotExists(x)); + tran.AddCondition(Condition.KeyNotExists(y)); + _ = tran.StringSetAsync(x, "x-val"); + _ = tran.StringSetAsync(y, "y-val"); + tran.Execute(); + + Assert.Fail("Expected single-slot rules to apply"); + // the rest no longer applies while we are following single-slot rules + + //// check that everything was aborted + // Assert.False(success, "tran aborted"); + // Assert.True(setX.IsCanceled, "set x cancelled"); + // Assert.True(setY.IsCanceled, "set y cancelled"); + // var existsX = cluster.KeyExistsAsync(x); + // var existsY = cluster.KeyExistsAsync(y); + // Assert.False(cluster.Wait(existsX), "x exists"); + // Assert.False(cluster.Wait(existsY), "y exists"); + }); + Assert.Equal("Multi-key operations must involve a single slot; keys can use 'hash tags' to help this, i.e. '{/users/12345}/account' and '{/users/12345}/contacts' will always be in the same slot", ex.Message); + } + + [Fact] + public async Task TransactionWithSameServerKeys() + { + await using var conn = Create(); + var ex = await Assert.ThrowsAsync(async () => + { + // connect + var cluster = conn.GetDatabase(); + var anyServer = conn.GetServer(conn.GetEndPoints()[0]); + await anyServer.PingAsync(); + var config = anyServer.ClusterConfiguration; + Assert.NotNull(config); + + // invent 2 keys that we believe are served by different nodes + string x = Guid.NewGuid().ToString(), y; + var xNode = config.GetBySlot(x); + int abort = 1000; + do + { + y = Guid.NewGuid().ToString(); + } + while (--abort > 0 && config.GetBySlot(y) != xNode); + Assert.SkipWhen(abort == 0, "failed to find a key with the same node to use"); + var yNode = config.GetBySlot(y); + Assert.NotNull(xNode); + Log("x={0}, served by {1}", x, xNode.NodeId); + Assert.NotNull(yNode); + Log("y={0}, served by {1}", y, yNode.NodeId); + Assert.Equal(xNode.NodeId, yNode.NodeId); + + // wipe those keys + cluster.KeyDelete(x, CommandFlags.FireAndForget); + cluster.KeyDelete(y, CommandFlags.FireAndForget); + + // create a transaction that attempts to assign both keys + var tran = cluster.CreateTransaction(); + tran.AddCondition(Condition.KeyNotExists(x)); + tran.AddCondition(Condition.KeyNotExists(y)); + _ = tran.StringSetAsync(x, "x-val"); + _ = tran.StringSetAsync(y, "y-val"); + tran.Execute(); + + Assert.Fail("Expected single-slot rules to apply"); + // the rest no longer applies while we are following single-slot rules + + //// check that everything was aborted + // Assert.True(success, "tran aborted"); + // Assert.False(setX.IsCanceled, "set x cancelled"); + // Assert.False(setY.IsCanceled, "set y cancelled"); + // var existsX = cluster.KeyExistsAsync(x); + // var existsY = cluster.KeyExistsAsync(y); + // Assert.True(cluster.Wait(existsX), "x exists"); + // Assert.True(cluster.Wait(existsY), "y exists"); + }); + Assert.Equal("Multi-key operations must involve a single slot; keys can use 'hash tags' to help this, i.e. '{/users/12345}/account' and '{/users/12345}/contacts' will always be in the same slot", ex.Message); + } + + [Fact] + public async Task TransactionWithSameSlotKeys() + { + await using var conn = Create(); + + // connect + var cluster = conn.GetDatabase(); + var anyServer = conn.GetServer(conn.GetEndPoints()[0]); + await anyServer.PingAsync(); + var config = anyServer.ClusterConfiguration; + Assert.NotNull(config); + + // invent 2 keys that we believe are in the same slot + var guid = Guid.NewGuid().ToString(); + string x = "/{" + guid + "}/foo", y = "/{" + guid + "}/bar"; + + Assert.Equal(conn.HashSlot(x), conn.HashSlot(y)); + var xNode = config.GetBySlot(x); + var yNode = config.GetBySlot(y); + Assert.NotNull(xNode); + Log("x={0}, served by {1}", x, xNode.NodeId); + Assert.NotNull(yNode); + Log("y={0}, served by {1}", y, yNode.NodeId); + Assert.Equal(xNode.NodeId, yNode.NodeId); + + // wipe those keys + cluster.KeyDelete(x, CommandFlags.FireAndForget); + cluster.KeyDelete(y, CommandFlags.FireAndForget); + + // create a transaction that attempts to assign both keys + var tran = cluster.CreateTransaction(); + tran.AddCondition(Condition.KeyNotExists(x)); + tran.AddCondition(Condition.KeyNotExists(y)); + var setX = tran.StringSetAsync(x, "x-val"); + var setY = tran.StringSetAsync(y, "y-val"); + bool success = tran.Execute(); + + // check that everything was aborted + Assert.True(success, "tran aborted"); + Assert.False(setX.IsCanceled, "set x cancelled"); + Assert.False(setY.IsCanceled, "set y cancelled"); + var existsX = cluster.KeyExistsAsync(x); + var existsY = cluster.KeyExistsAsync(y); + Assert.True(cluster.Wait(existsX), "x exists"); + Assert.True(cluster.Wait(existsY), "y exists"); + } + + [Theory] + [InlineData(null, 10)] + [InlineData(null, 100)] + [InlineData("abc", 10)] + [InlineData("abc", 100)] + public async Task Keys(string? pattern, int pageSize) + { + await using var conn = Create(allowAdmin: true); + + var dbId = TestConfig.GetDedicatedDB(conn); + var server = conn.GetEndPoints().Select(x => conn.GetServer(x)).First(x => !x.IsReplica); + await server.FlushDatabaseAsync(dbId); + try + { + Assert.False(server.Keys(dbId, pattern: pattern, pageSize: pageSize).Any()); + Log($"Complete: '{pattern}' / {pageSize}"); + } + catch + { + Log($"Failed: '{pattern}' / {pageSize}"); + throw; + } + } + + [Theory] + [InlineData("", 0)] + [InlineData("abc", 7638)] + [InlineData("{abc}", 7638)] + [InlineData("abcdef", 15101)] + [InlineData("abc{abc}def", 7638)] + [InlineData("c", 7365)] + [InlineData("g", 7233)] + [InlineData("d", 11298)] + + [InlineData("user1000", 3443)] + [InlineData("{user1000}", 3443)] + [InlineData("abc{user1000}", 3443)] + [InlineData("abc{user1000}def", 3443)] + [InlineData("{user1000}.following", 3443)] + [InlineData("{user1000}.followers", 3443)] + + [InlineData("foo{}{bar}", 8363)] + + [InlineData("foo{{bar}}zap", 4015)] + [InlineData("{bar", 4015)] + + [InlineData("foo{bar}{zap}", 5061)] + [InlineData("bar", 5061)] + + public async Task HashSlots(string key, int slot) + { + await using var conn = Create(connectTimeout: 5000); + + Assert.Equal(slot, conn.HashSlot(key)); + } + + [Fact] + public async Task SScan() + { + await using var conn = Create(); + + RedisKey key = "a"; + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + int totalUnfiltered = 0, totalFiltered = 0; + for (int i = 0; i < 1000; i++) + { + db.SetAdd(key, i, CommandFlags.FireAndForget); + totalUnfiltered += i; + if (i.ToString().Contains('3')) totalFiltered += i; + } + var unfilteredActual = db.SetScan(key).Select(x => (int)x).Sum(); + var filteredActual = db.SetScan(key, "*3*").Select(x => (int)x).Sum(); + Assert.Equal(totalUnfiltered, unfilteredActual); + Assert.Equal(totalFiltered, filteredActual); + } + + [Fact] + public async Task GetConfig() + { + await using var conn = Create(allowAdmin: true, log: Writer); + + var endpoints = conn.GetEndPoints(); + var server = conn.GetServer(endpoints[0]); + var nodes = server.ClusterNodes(); + Assert.NotNull(nodes); + + Log("Endpoints:"); + foreach (var endpoint in endpoints) + { + Log(endpoint.ToString()); + } + Log("Nodes:"); + foreach (var node in nodes.Nodes.OrderBy(x => x)) + { + Log(node.ToString()); + } + + Assert.Equal(TestConfig.Current.ClusterServerCount, endpoints.Length); + Assert.Equal(TestConfig.Current.ClusterServerCount, nodes.Nodes.Count); + } + + [System.Diagnostics.CodeAnalysis.SuppressMessage("Usage", "xUnit1004:Test methods should not be skipped", Justification = "Because.")] + [Fact(Skip = "FlushAllDatabases")] + public async Task AccessRandomKeys() + { + await using var conn = Create(allowAdmin: true); + + var cluster = conn.GetDatabase(); + int slotMovedCount = 0; + conn.HashSlotMoved += (s, a) => + { + Assert.NotNull(a.OldEndPoint); + Log("{0} moved from {1} to {2}", a.HashSlot, Describe(a.OldEndPoint), Describe(a.NewEndPoint)); + Interlocked.Increment(ref slotMovedCount); + }; + var pairs = new Dictionary(); + const int COUNT = 500; + int index = 0; + + var servers = conn.GetEndPoints().Select(x => conn.GetServer(x)).ToList(); + foreach (var server in servers) + { + if (!server.IsReplica) + { + await server.PingAsync(); + await server.FlushAllDatabasesAsync(); + } + } + + for (int i = 0; i < COUNT; i++) + { + var key = Guid.NewGuid().ToString(); + var value = Guid.NewGuid().ToString(); + pairs.Add(key, value); + cluster.StringSet(key, value, flags: CommandFlags.FireAndForget); + } + + var expected = new string[COUNT]; + var actual = new Task[COUNT]; + index = 0; + foreach (var pair in pairs) + { + expected[index] = pair.Value; + actual[index] = cluster.StringGetAsync(pair.Key); + index++; + } + cluster.WaitAll(actual); + for (int i = 0; i < COUNT; i++) + { + Assert.Equal(expected[i], actual[i].Result); + } + + int total = 0; + Parallel.ForEach(servers, server => + { + if (!server.IsReplica) + { + int count = server.Keys(pageSize: 100).Count(); + Log("{0} has {1} keys", server.EndPoint, count); + Interlocked.Add(ref total, count); + } + }); + + foreach (var server in servers) + { + var counters = server.GetCounters(); + Log(counters.ToString()); + } + int final = Interlocked.CompareExchange(ref total, 0, 0); + Assert.Equal(COUNT, final); + Assert.Equal(0, Interlocked.CompareExchange(ref slotMovedCount, 0, 0)); + } + + [Theory] + [InlineData(CommandFlags.DemandMaster, false)] + [InlineData(CommandFlags.DemandReplica, true)] + [InlineData(CommandFlags.PreferMaster, false)] + [InlineData(CommandFlags.PreferReplica, true)] + public async Task GetFromRightNodeBasedOnFlags(CommandFlags flags, bool isReplica) + { + await using var conn = Create(allowAdmin: true); + + var db = conn.GetDatabase(); + for (int i = 0; i < 500; i++) + { + var key = Guid.NewGuid().ToString(); + var endpoint = db.IdentifyEndpoint(key, flags); + Assert.NotNull(endpoint); + var server = conn.GetServer(endpoint); + Assert.Equal(isReplica, server.IsReplica); + } + } + + private static string Describe(EndPoint endpoint) => endpoint?.ToString() ?? "(unknown)"; + + [Fact] + public async Task SimpleProfiling() + { + await using var conn = Create(log: Writer); + + var profiler = new ProfilingSession(); + var key = Me(); + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + conn.RegisterProfiler(() => profiler); + db.StringSet(key, "world"); + var val = db.StringGet(key); + Assert.Equal("world", val); + + var msgs = profiler.FinishProfiling().Where(m => m.Command == "GET" || m.Command == "SET").ToList(); + foreach (var msg in msgs) + { + Log("Profiler Message: " + Environment.NewLine + msg); + } + Log("Checking GET..."); + Assert.Contains(msgs, m => m.Command == "GET"); + Log("Checking SET..."); + Assert.Contains(msgs, m => m.Command == "SET"); + Assert.Equal(2, msgs.Count(m => m.RetransmissionOf is null)); + + var arr = msgs.Where(m => m.RetransmissionOf is null).ToArray(); + Assert.Equal("SET", arr[0].Command); + Assert.Equal("GET", arr[1].Command); + } + + [Fact] + public async Task MultiKeyQueryFails() + { + var keys = InventKeys(); // note the rules expected of this data are enforced in GroupedQueriesWork + + await using var conn = Create(); + + var ex = Assert.Throws(() => conn.GetDatabase(0).StringGet(keys)); + Assert.Contains("Multi-key operations must involve a single slot", ex.Message); + } + + private static RedisKey[] InventKeys() + { + RedisKey[] keys = new RedisKey[256]; + Random rand = new Random(12324); + string InventString() + { + const string alphabet = "abcdefghijklmnopqrstuvwxyz012345689"; + var len = rand.Next(10, 50); + char[] chars = new char[len]; + for (int i = 0; i < len; i++) + chars[i] = alphabet[rand.Next(alphabet.Length)]; + return new string(chars); + } + + for (int i = 0; i < keys.Length; i++) + { + keys[i] = InventString(); + } + return keys; + } + + [Fact] + public async Task GroupedQueriesWork() + { + // note it doesn't matter that the data doesn't exist for this; + // the point here is that the entire thing *won't work* otherwise, + // as per above test + var keys = InventKeys(); + await using var conn = Create(); + + var grouped = keys.GroupBy(key => conn.GetHashSlot(key)).ToList(); + Assert.True(grouped.Count > 1); // check not all a super-group + Assert.True(grouped.Count < keys.Length); // check not all singleton groups + Assert.Equal(keys.Length, grouped.Sum(x => x.Count())); // check they're all there + Assert.Contains(grouped, x => x.Count() > 1); // check at least one group with multiple items (redundant from above, but... meh) + + Log($"{grouped.Count} groups, min: {grouped.Min(x => x.Count())}, max: {grouped.Max(x => x.Count())}, avg: {grouped.Average(x => x.Count())}"); + + var db = conn.GetDatabase(0); + var all = grouped.SelectMany(grp => + { + var grpKeys = grp.ToArray(); + var values = db.StringGet(grpKeys); + return grpKeys.Zip(values, (key, val) => new { key, val }); + }).ToDictionary(x => x.key, x => x.val); + + Assert.Equal(keys.Length, all.Count); + } + + [Fact] + public async Task MovedProfiling() + { + var key = Me(); + const string Value = "redirected-value"; + + var profiler = new ProfilingTests.PerThreadProfiler(); + + await using var conn = Create(); + + conn.RegisterProfiler(profiler.GetSession); + + var endpoints = conn.GetEndPoints(); + var servers = endpoints.Select(e => conn.GetServer(e)); + + var db = conn.GetDatabase(); + db.KeyDelete(key); + db.StringSet(key, Value); + var config = servers.First().ClusterConfiguration; + Assert.NotNull(config); + + // int slot = conn.HashSlot(Key); + var rightPrimaryNode = config.GetBySlot(key); + Assert.NotNull(rightPrimaryNode); + + Assert.NotNull(rightPrimaryNode.EndPoint); + string? a = (string?)conn.GetServer(rightPrimaryNode.EndPoint).Execute("GET", key); + Assert.Equal(Value, a); // right primary + + var wrongPrimaryNode = config.Nodes.FirstOrDefault(x => !x.IsReplica && x.NodeId != rightPrimaryNode.NodeId); + Assert.NotNull(wrongPrimaryNode); + + Assert.NotNull(wrongPrimaryNode.EndPoint); + string? b = (string?)conn.GetServer(wrongPrimaryNode.EndPoint).Execute("GET", key); + Assert.Equal(Value, b); // wrong primary, allow redirect + + var msgs = profiler.GetSession().FinishProfiling().ToList(); + + // verify that things actually got recorded properly, and the retransmission profilings are connected as expected + { + // expect 1 DEL, 1 SET, 1 GET (to right primary), 1 GET (to wrong primary) that was responded to by an ASK, and 1 GET (to right primary or a replica of it) + Assert.Equal(5, msgs.Count); + Assert.Equal(1, msgs.Count(c => c.Command == "DEL" || c.Command == "UNLINK")); + Assert.Equal(1, msgs.Count(c => c.Command == "SET")); + Assert.Equal(3, msgs.Count(c => c.Command == "GET")); + + var toRightPrimaryNotRetransmission = msgs.Where(m => m.Command == "GET" && m.EndPoint.Equals(rightPrimaryNode.EndPoint) && m.RetransmissionOf == null); + Assert.Single(toRightPrimaryNotRetransmission); + + var toWrongPrimaryWithoutRetransmission = msgs.Where(m => m.Command == "GET" && m.EndPoint.Equals(wrongPrimaryNode.EndPoint) && m.RetransmissionOf == null).ToList(); + Assert.Single(toWrongPrimaryWithoutRetransmission); + + var toRightPrimaryOrReplicaAsRetransmission = msgs.Where(m => m.Command == "GET" && (m.EndPoint.Equals(rightPrimaryNode.EndPoint) || rightPrimaryNode.Children.Any(c => m.EndPoint.Equals(c.EndPoint))) && m.RetransmissionOf != null).ToList(); + Assert.Single(toRightPrimaryOrReplicaAsRetransmission); + + var originalWrongPrimary = toWrongPrimaryWithoutRetransmission.Single(); + var retransmissionToRight = toRightPrimaryOrReplicaAsRetransmission.Single(); + + Assert.True(ReferenceEquals(originalWrongPrimary, retransmissionToRight.RetransmissionOf)); + } + + foreach (var msg in msgs) + { + Assert.True(msg.CommandCreated != default(DateTime)); + Assert.True(msg.CreationToEnqueued > TimeSpan.Zero); + Assert.True(msg.EnqueuedToSending > TimeSpan.Zero); + Assert.True(msg.SentToResponse > TimeSpan.Zero); + Assert.True(msg.ResponseToCompletion >= TimeSpan.Zero); // this can be immeasurably fast + Assert.True(msg.ElapsedTime > TimeSpan.Zero); + + if (msg.RetransmissionOf != null) + { + // imprecision of DateTime.UtcNow makes this pretty approximate + Assert.True(msg.RetransmissionOf.CommandCreated <= msg.CommandCreated); + Assert.Equal(RetransmissionReasonType.Moved, msg.RetransmissionReason); + } + else + { + Assert.False(msg.RetransmissionReason.HasValue); + } + } + } + + [Fact] + public async Task ConnectIncludesSubscriber() + { + await using var conn = Create(keepAlive: 1, connectTimeout: 3000, shared: false); + + var db = conn.GetDatabase(); + await db.PingAsync(); + Assert.True(conn.IsConnected); + + foreach (var server in conn.GetServerSnapshot()) + { + Assert.Equal(PhysicalBridge.State.ConnectedEstablished, server.InteractiveConnectionState); + Assert.Equal(PhysicalBridge.State.ConnectedEstablished, server.SubscriptionConnectionState); + } + } + + [Theory] + [InlineData(true, false, false)] + [InlineData(true, true, false)] + [InlineData(false, false, false)] + [InlineData(false, true, false)] + [InlineData(true, false, true)] + [InlineData(true, true, true)] + [InlineData(false, false, true)] + [InlineData(false, true, true)] + public async Task ClusterPubSub(bool sharded, bool withKeyRouting, bool withKeyPrefix) + { + var guid = Guid.NewGuid().ToString(); + var channel = sharded ? RedisChannel.Sharded(guid) : RedisChannel.Literal(guid); + if (withKeyRouting) + { + channel = channel.WithKeyRouting(); + } + await using var conn = Create( + keepAlive: 1, + connectTimeout: 3000, + shared: false, + require: sharded ? RedisFeatures.v7_0_0_rc1 : RedisFeatures.v2_0_0, + channelPrefix: withKeyPrefix ? "c_prefix:" : null); + Assert.True(conn.IsConnected); + + var pubsub = conn.GetSubscriber(); + HashSet eps = []; + for (int i = 0; i < 10; i++) + { + var ep = Format.ToString(await pubsub.IdentifyEndpointAsync(channel)); + Log($"Channel {channel} => {ep}"); + eps.Add(ep); + } + + if (sharded | withKeyRouting) + { + Assert.Single(eps); + } + else + { + // if not routed: we should have at least two different endpoints + Assert.True(eps.Count > 1); + } + + List<(RedisChannel, RedisValue)> received = []; + var queue = await pubsub.SubscribeAsync(channel, CommandFlags.NoRedirect); + _ = Task.Run(async () => + { + // use queue API to have control over order + await foreach (var item in queue) + { + lock (received) + { + received.Add((item.Channel, item.Message)); + } + } + }); + var subscribedEp = Format.ToString(pubsub.SubscribedEndpoint(channel)); + Log($"Subscribed to {subscribedEp}"); + Assert.NotNull(subscribedEp); + if (sharded | withKeyRouting) + { + Assert.Equal(eps.Single(), subscribedEp); + } + var db = conn.GetDatabase(); + await Task.Delay(50); // let the sub settle (this isn't needed on RESP3, note) + await db.PingAsync(); + for (int i = 0; i < 10; i++) + { + // publish + var receivers = await db.PublishAsync(channel, i.ToString()); + + // check we get a hit (we are the only subscriber, and because we prefer to + // use our own subscribed connection: we can reliably expect to see this hit) + Log($"Published {i} to {receivers} receiver(s) against the receiving server."); + Assert.Equal(1, receivers); + } + + await Task.Delay(250); // let the sub settle (this isn't needed on RESP3, note) + await db.PingAsync(); + await pubsub.UnsubscribeAsync(channel); + + (RedisChannel Channel, RedisValue Value)[] snap; + lock (received) + { + snap = received.ToArray(); // in case of concurrency + } + Log("items received: {0}", snap.Length); + Assert.Equal(10, snap.Length); + // separate log and validate loop here simplifies debugging (ask me how I know!) + for (int i = 0; i < 10; i++) + { + var pair = snap[i]; + Log("element {0}: {1}/{2}", i, pair.Channel, pair.Value); + } + // even if not routed: we can expect the *order* to be correct, since there's + // only one publisher (us), and we prefer to publish via our own subscription + for (int i = 0; i < 10; i++) + { + var pair = snap[i]; + Assert.Equal(channel, pair.Channel); + Assert.Equal(i, pair.Value); + } + } +} diff --git a/tests/StackExchange.Redis.Tests/CommandTests.cs b/tests/StackExchange.Redis.Tests/CommandTests.cs new file mode 100644 index 000000000..42df92dd1 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/CommandTests.cs @@ -0,0 +1,56 @@ +using System.Net; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class CommandTests +{ + [Fact] + public void CommandByteLength() + { + Assert.Equal(31, CommandBytes.MaxLength); + } + + [Fact] + public void CheckCommandContents() + { + for (int len = 0; len <= CommandBytes.MaxLength; len++) + { + var s = new string('A', len); + CommandBytes b = s; + Assert.Equal(len, b.Length); + + var t = b.ToString(); + Assert.Equal(s, t); + + CommandBytes b2 = t; + Assert.Equal(b, b2); + + Assert.Equal(len == 0, ReferenceEquals(s, t)); + } + } + + [Fact] + public void Basic() + { + var config = ConfigurationOptions.Parse(".,$PING=p"); + Assert.Single(config.EndPoints); + config.SetDefaultPorts(); + Assert.Contains(new DnsEndPoint(".", 6379), config.EndPoints); + var map = config.CommandMap; + Assert.Equal("$PING=P", map.ToString()); + Assert.Equal(".:6379,$PING=P", config.ToString()); + } + + [Theory] + [InlineData("redisql.CREATE_STATEMENT")] + [InlineData("INSERTINTOTABLE1STMT")] + public void CanHandleNonTrivialCommands(string command) + { + var cmd = new CommandBytes(command); + Assert.Equal(command.Length, cmd.Length); + Assert.Equal(command.ToUpperInvariant(), cmd.ToString()); + + Assert.Equal(31, CommandBytes.MaxLength); + } +} diff --git a/tests/StackExchange.Redis.Tests/CommandTimeoutTests.cs b/tests/StackExchange.Redis.Tests/CommandTimeoutTests.cs new file mode 100644 index 000000000..04e1ca624 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/CommandTimeoutTests.cs @@ -0,0 +1,63 @@ +using System; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +[Collection(NonParallelCollection.Name)] +public class CommandTimeoutTests(ITestOutputHelper output) : TestBase(output) +{ + [Fact] + public async Task DefaultHeartbeatTimeout() + { + Skip.UnlessLongRunning(); + var options = ConfigurationOptions.Parse(TestConfig.Current.PrimaryServerAndPort); + options.AllowAdmin = true; + options.AsyncTimeout = 1000; + + await using var pauseConn = ConnectionMultiplexer.Connect(options); + await using var conn = ConnectionMultiplexer.Connect(options); + + var pauseServer = GetServer(pauseConn); + var pauseTask = pauseServer.ExecuteAsync("CLIENT", "PAUSE", 5000); + + var key = Me(); + var db = conn.GetDatabase(); + var sw = ValueStopwatch.StartNew(); + var ex = await Assert.ThrowsAsync(async () => await db.StringGetAsync(key)); + Log(ex.Message); + var duration = sw.GetElapsedTime(); + Assert.True(duration < TimeSpan.FromSeconds(4000), $"Duration ({duration.Milliseconds} ms) should be less than 4000ms"); + + // Await as to not bias the next test + await pauseTask; + } + +#if DEBUG + [Fact] + public async Task DefaultHeartbeatLowTimeout() + { + var options = ConfigurationOptions.Parse(TestConfig.Current.PrimaryServerAndPort); + options.AllowAdmin = true; + options.AsyncTimeout = 50; + options.HeartbeatInterval = TimeSpan.FromMilliseconds(100); + + await using var pauseConn = await ConnectionMultiplexer.ConnectAsync(options); + await using var conn = await ConnectionMultiplexer.ConnectAsync(options); + + var pauseServer = GetServer(pauseConn); + var pauseTask = pauseServer.ExecuteAsync("CLIENT", "PAUSE", 2000); + + var key = Me(); + var db = conn.GetDatabase(); + var sw = ValueStopwatch.StartNew(); + var ex = await Assert.ThrowsAsync(async () => await db.StringGetAsync(key)); + Log(ex.Message); + var duration = sw.GetElapsedTime(); + Assert.True(duration < TimeSpan.FromSeconds(250), $"Duration ({duration.Milliseconds} ms) should be less than 250ms"); + + // Await as to not bias the next test + await pauseTask; + } +#endif +} diff --git a/tests/StackExchange.Redis.Tests/Commands.cs b/tests/StackExchange.Redis.Tests/Commands.cs deleted file mode 100644 index 036477d90..000000000 --- a/tests/StackExchange.Redis.Tests/Commands.cs +++ /dev/null @@ -1,58 +0,0 @@ -using System; -using System.Net; -using Xunit; - -namespace StackExchange.Redis.Tests -{ - public class Commands - { - [Fact] - public void CommandByteLength() - { - Assert.Equal(31, CommandBytes.MaxLength); - } - - [Fact] - public void CheckCommandContents() - { - for (int len = 0; len <= CommandBytes.MaxLength; len++) - { - var s = new string('A', len); - CommandBytes b = s; - Assert.Equal(len, b.Length); - - var t = b.ToString(); - Assert.Equal(s, t); - - CommandBytes b2 = t; - Assert.Equal(b, b2); - - Assert.Equal(len == 0, ReferenceEquals(s, t)); - } - } - - [Fact] - public void Basic() - { - var config = ConfigurationOptions.Parse(".,$PING=p"); - Assert.Single(config.EndPoints); - config.SetDefaultPorts(); - Assert.Contains(new DnsEndPoint(".", 6379), config.EndPoints); - var map = config.CommandMap; - Assert.Equal("$PING=P", map.ToString()); - Assert.Equal(".:6379,$PING=P", config.ToString()); - } - - [Theory] - [InlineData("redisql.CREATE_STATEMENT")] - [InlineData("INSERTINTOTABLE1STMT")] - public void CanHandleNonTrivialCommands(string command) - { - var cmd = new CommandBytes(command); - Assert.Equal(command.Length, cmd.Length); - Assert.Equal(command.ToUpperInvariant(), cmd.ToString()); - - Assert.Equal(31, CommandBytes.MaxLength); - } - } -} diff --git a/tests/StackExchange.Redis.Tests/Config.cs b/tests/StackExchange.Redis.Tests/Config.cs deleted file mode 100644 index a6f34a9f3..000000000 --- a/tests/StackExchange.Redis.Tests/Config.cs +++ /dev/null @@ -1,495 +0,0 @@ -using System; -using System.Globalization; -using System.IO; -using System.IO.Pipelines; -using System.Linq; -using System.Net; -using System.Net.Sockets; -using System.Security.Authentication; -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - public class Config : TestBase - { - public Config(ITestOutputHelper output) : base(output) { } - - [Fact] - public void SslProtocols_SingleValue() - { - var options = ConfigurationOptions.Parse("myhost,sslProtocols=Tls11"); - Assert.Equal(SslProtocols.Tls11, options.SslProtocols.GetValueOrDefault()); - } - - [Fact] - public void SslProtocols_MultipleValues() - { - var options = ConfigurationOptions.Parse("myhost,sslProtocols=Tls11|Tls12"); - Assert.Equal(SslProtocols.Tls11 | SslProtocols.Tls12, options.SslProtocols.GetValueOrDefault()); - } - - [Theory] - [InlineData("checkCertificateRevocation=false", false)] - [InlineData("checkCertificateRevocation=true", true)] - [InlineData("", true)] - public void ConfigurationOption_CheckCertificateRevocation(string conString, bool expectedValue) - { - var options = ConfigurationOptions.Parse($"host,{conString}"); - Assert.Equal(expectedValue, options.CheckCertificateRevocation); - var toString = options.ToString(); - Assert.True(toString.IndexOf(conString, StringComparison.CurrentCultureIgnoreCase) >= 0); - } - - [Fact] - public void SslProtocols_UsingIntegerValue() - { - // The below scenario is for cases where the *targeted* - // .NET framework version (e.g. .NET 4.0) doesn't define an enum value (e.g. Tls11) - // but the OS has been patched with support - const int integerValue = (int)(SslProtocols.Tls11 | SslProtocols.Tls12); - var options = ConfigurationOptions.Parse("myhost,sslProtocols=" + integerValue); - Assert.Equal(SslProtocols.Tls11 | SslProtocols.Tls12, options.SslProtocols.GetValueOrDefault()); - } - - [Fact] - public void SslProtocols_InvalidValue() - { - Assert.Throws(() => ConfigurationOptions.Parse("myhost,sslProtocols=InvalidSslProtocol")); - } - - [Fact] - public void ConfigurationOptionsDefaultForAzure() - { - var options = ConfigurationOptions.Parse("contoso.redis.cache.windows.net"); - Assert.True(options.DefaultVersion.Equals(new Version(3, 0, 0))); - Assert.False(options.AbortOnConnectFail); - } - - [Fact] - public void ConfigurationOptionsForAzureWhenSpecified() - { - var options = ConfigurationOptions.Parse("contoso.redis.cache.windows.net,abortConnect=true, version=2.1.1"); - Assert.True(options.DefaultVersion.Equals(new Version(2, 1, 1))); - Assert.True(options.AbortOnConnectFail); - } - - [Fact] - public void ConfigurationOptionsDefaultForAzureChina() - { - // added a few upper case chars to validate comparison - var options = ConfigurationOptions.Parse("contoso.REDIS.CACHE.chinacloudapi.cn"); - Assert.True(options.DefaultVersion.Equals(new Version(3, 0, 0))); - Assert.False(options.AbortOnConnectFail); - } - - [Fact] - public void ConfigurationOptionsDefaultForAzureGermany() - { - var options = ConfigurationOptions.Parse("contoso.redis.cache.cloudapi.de"); - Assert.True(options.DefaultVersion.Equals(new Version(3, 0, 0))); - Assert.False(options.AbortOnConnectFail); - } - - [Fact] - public void ConfigurationOptionsDefaultForAzureUSGov() - { - var options = ConfigurationOptions.Parse("contoso.redis.cache.usgovcloudapi.net"); - Assert.True(options.DefaultVersion.Equals(new Version(3, 0, 0))); - Assert.False(options.AbortOnConnectFail); - } - - [Fact] - public void ConfigurationOptionsDefaultForNonAzure() - { - var options = ConfigurationOptions.Parse("redis.contoso.com"); - Assert.True(options.DefaultVersion.Equals(new Version(2, 0, 0))); - Assert.True(options.AbortOnConnectFail); - } - - [Fact] - public void ConfigurationOptionsDefaultWhenNoEndpointsSpecifiedYet() - { - var options = new ConfigurationOptions(); - Assert.True(options.DefaultVersion.Equals(new Version(2, 0, 0))); - Assert.True(options.AbortOnConnectFail); - } - - [Fact] - public void ConfigurationOptionsSyncTimeout() - { - // Default check - var options = new ConfigurationOptions(); - Assert.Equal(5000, options.SyncTimeout); - - options = ConfigurationOptions.Parse("syncTimeout=20"); - Assert.Equal(20, options.SyncTimeout); - } - - [Theory] - [InlineData("127.1:6379", AddressFamily.InterNetwork, "127.0.0.1", 6379)] - [InlineData("127.0.0.1:6379", AddressFamily.InterNetwork, "127.0.0.1", 6379)] - [InlineData("2a01:9820:1:24::1:1:6379", AddressFamily.InterNetworkV6, "2a01:9820:1:24:0:1:1:6379", 0)] - [InlineData("[2a01:9820:1:24::1:1]:6379", AddressFamily.InterNetworkV6, "2a01:9820:1:24::1:1", 6379)] - public void ConfigurationOptionsIPv6Parsing(string configString, AddressFamily family, string address, int port) - { - var options = ConfigurationOptions.Parse(configString); - Assert.Single(options.EndPoints); - var ep = Assert.IsType(options.EndPoints[0]); - Assert.Equal(family, ep.AddressFamily); - Assert.Equal(address, ep.Address.ToString()); - Assert.Equal(port, ep.Port); - } - - [Fact] - public void CanParseAndFormatUnixDomainSocket() - { - const string ConfigString = "!/some/path,allowAdmin=True"; -#if NET472 - var ex = Assert.Throws(() => ConfigurationOptions.Parse(ConfigString)); - Assert.Equal("Unix domain sockets require .NET Core 3 or above", ex.Message); -#else - var config = ConfigurationOptions.Parse(ConfigString); - Assert.True(config.AllowAdmin); - var ep = Assert.IsType(Assert.Single(config.EndPoints)); - Assert.Equal("/some/path", ep.ToString()); - Assert.Equal(ConfigString, config.ToString()); -#endif - } - - [Fact] - public void TalkToNonsenseServer() - { - var config = new ConfigurationOptions - { - AbortOnConnectFail = false, - EndPoints = - { - { "127.0.0.1:1234" } - }, - ConnectTimeout = 200 - }; - var log = new StringWriter(); - using (var conn = ConnectionMultiplexer.Connect(config, log)) - { - Log(log.ToString()); - Assert.False(conn.IsConnected); - } - } - - [Fact] - public async Task TestManaulHeartbeat() - { - using (var muxer = Create(keepAlive: 2)) - { - var conn = muxer.GetDatabase(); - conn.Ping(); - - var before = muxer.OperationCount; - - Log("sleeping to test heartbeat..."); - await Task.Delay(5000).ForAwait(); - - var after = muxer.OperationCount; - - Assert.True(after >= before + 2, $"after: {after}, before: {before}"); - } - } - - [Theory] - [InlineData(0)] - [InlineData(10)] - [InlineData(100)] - [InlineData(200)] - public void GetSlowlog(int count) - { - using (var muxer = Create(allowAdmin: true)) - { - var rows = GetAnyMaster(muxer).SlowlogGet(count); - Assert.NotNull(rows); - } - } - - [Fact] - public void ClearSlowlog() - { - using (var muxer = Create(allowAdmin: true)) - { - GetAnyMaster(muxer).SlowlogReset(); - } - } - - [Fact] - public void ClientName() - { - using (var muxer = Create(clientName: "Test Rig", allowAdmin: true)) - { - Assert.Equal("Test Rig", muxer.ClientName); - - var conn = muxer.GetDatabase(); - conn.Ping(); - - var name = (string)GetAnyMaster(muxer).Execute("CLIENT", "GETNAME"); - Assert.Equal("TestRig", name); - } - } - - [Fact] - public void DefaultClientName() - { - using (var muxer = Create(allowAdmin: true, caller: null)) // force default naming to kick in - { - Assert.Equal(Environment.MachineName, muxer.ClientName); - var conn = muxer.GetDatabase(); - conn.Ping(); - - var name = (string)GetAnyMaster(muxer).Execute("CLIENT", "GETNAME"); - Assert.Equal(Environment.MachineName, name); - } - } - - [Fact] - public void ReadConfigWithConfigDisabled() - { - using (var muxer = Create(allowAdmin: true, disabledCommands: new[] { "config", "info" })) - { - var conn = GetAnyMaster(muxer); - var ex = Assert.Throws(() => conn.ConfigGet()); - Assert.Equal("This operation has been disabled in the command-map and cannot be used: CONFIG", ex.Message); - } - } - - [Fact] - public void ReadConfig() - { - using (var muxer = Create(allowAdmin: true)) - { - Log("about to get config"); - var conn = GetAnyMaster(muxer); - var all = conn.ConfigGet(); - Assert.True(all.Length > 0, "any"); - - var pairs = all.ToDictionary(x => (string)x.Key, x => (string)x.Value, StringComparer.InvariantCultureIgnoreCase); - - Assert.Equal(all.Length, pairs.Count); - Assert.True(pairs.ContainsKey("timeout"), "timeout"); - var val = int.Parse(pairs["timeout"]); - - Assert.True(pairs.ContainsKey("port"), "port"); - val = int.Parse(pairs["port"]); - Assert.Equal(TestConfig.Current.MasterPort, val); - } - } - - [Fact] - public void GetTime() - { - using (var muxer = Create()) - { - var server = GetAnyMaster(muxer); - var serverTime = server.Time(); - var localTime = DateTime.UtcNow; - Log("Server: " + serverTime.ToString(CultureInfo.InvariantCulture)); - Log("Local: " + localTime.ToString(CultureInfo.InvariantCulture)); - Assert.Equal(localTime, serverTime, TimeSpan.FromSeconds(10)); - } - } - - [Fact] - public void DebugObject() - { - using (var muxer = Create(allowAdmin: true)) - { - var db = muxer.GetDatabase(); - RedisKey key = Me(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.StringIncrement(key, flags: CommandFlags.FireAndForget); - var debug = (string)db.DebugObject(key); - Assert.NotNull(debug); - Assert.Contains("encoding:int serializedlength:2", debug); - } - } - - [Fact] - public void GetInfo() - { - using (var muxer = Create(allowAdmin: true)) - { - var server = GetAnyMaster(muxer); - var info1 = server.Info(); - Assert.True(info1.Length > 5); - Log("All sections"); - foreach (var group in info1) - { - Log(group.Key); - } - var first = info1[0]; - Log("Full info for: " + first.Key); - foreach (var setting in first) - { - Log("{0} ==> {1}", setting.Key, setting.Value); - } - - var info2 = server.Info("cpu"); - Assert.Single(info2); - var cpu = info2.Single(); - var cpuCount = cpu.Count(); - Assert.True(cpuCount > 2); - Assert.Equal("CPU", cpu.Key); - Assert.Contains(cpu, x => x.Key == "used_cpu_sys"); - Assert.Contains(cpu, x => x.Key == "used_cpu_user"); - } - } - - [Fact] - public void GetInfoRaw() - { - using (var muxer = Create(allowAdmin: true)) - { - var server = GetAnyMaster(muxer); - var info = server.InfoRaw(); - Assert.Contains("used_cpu_sys", info); - Assert.Contains("used_cpu_user", info); - } - } - - [Fact] - public void GetClients() - { - var name = Guid.NewGuid().ToString(); - using (var muxer = Create(clientName: name, allowAdmin: true)) - { - var server = GetAnyMaster(muxer); - var clients = server.ClientList(); - Assert.True(clients.Length > 0, "no clients"); // ourselves! - Assert.True(clients.Any(x => x.Name == name), "expected: " + name); - } - } - - [Fact] - public void SlowLog() - { - using (var muxer = Create(allowAdmin: true)) - { - var server = GetAnyMaster(muxer); - server.SlowlogGet(); - server.SlowlogReset(); - } - } - - [Fact] - public async Task TestAutomaticHeartbeat() - { - RedisValue oldTimeout = RedisValue.Null; - using (var configMuxer = Create(allowAdmin: true)) - { - try - { - configMuxer.GetDatabase(); - var srv = GetAnyMaster(configMuxer); - oldTimeout = srv.ConfigGet("timeout")[0].Value; - srv.ConfigSet("timeout", 5); - - using (var innerMuxer = Create()) - { - var innerConn = innerMuxer.GetDatabase(); - innerConn.Ping(); // need to wait to pick up configuration etc - - var before = innerMuxer.OperationCount; - - Log("sleeping to test heartbeat..."); - await Task.Delay(8000).ForAwait(); - - var after = innerMuxer.OperationCount; - Assert.True(after >= before + 2, $"after: {after}, before: {before}"); - } - } - finally - { - if (!oldTimeout.IsNull) - { - var srv = GetAnyMaster(configMuxer); - srv.ConfigSet("timeout", oldTimeout); - } - } - } - } - - [Fact] - public void EndpointIteratorIsReliableOverChanges() - { - var eps = new EndPointCollection - { - { IPAddress.Loopback, 7999 }, - { IPAddress.Loopback, 8000 }, - }; - - using var iter = eps.GetEnumerator(); - Assert.True(iter.MoveNext()); - Assert.Equal(7999, ((IPEndPoint)iter.Current).Port); - eps[1] = new IPEndPoint(IPAddress.Loopback, 8001); // boom - Assert.True(iter.MoveNext()); - Assert.Equal(8001, ((IPEndPoint)iter.Current).Port); - Assert.False(iter.MoveNext()); - } - - [Fact] - public void ThreadPoolManagerIsDetected() - { - var config = new ConfigurationOptions - { - EndPoints = { { IPAddress.Loopback, 6379 } }, - SocketManager = SocketManager.ThreadPool - }; - using var muxer = ConnectionMultiplexer.Connect(config); - Assert.Same(PipeScheduler.ThreadPool, muxer.SocketManager.Scheduler); - } - - [Fact] - public void DefaultThreadPoolManagerIsDetected() - { - var config = new ConfigurationOptions - { - EndPoints = { { IPAddress.Loopback, 6379 } }, - }; - using var muxer = ConnectionMultiplexer.Connect(config); - Assert.Same(SocketManager.Shared.Scheduler, muxer.SocketManager.Scheduler); - } - - [Theory] - [InlineData("myDNS:myPort,password=myPassword,connectRetry=3,connectTimeout=15000,syncTimeout=15000,defaultDatabase=0,abortConnect=false,ssl=true,sslProtocols=Tls12", SslProtocols.Tls12)] - [InlineData("myDNS:myPort,password=myPassword,abortConnect=false,ssl=true,sslProtocols=Tls12", SslProtocols.Tls12)] -#pragma warning disable CS0618 // obsolete - [InlineData("myDNS:myPort,password=myPassword,abortConnect=false,ssl=true,sslProtocols=Ssl3", SslProtocols.Ssl3)] -#pragma warning restore CS0618 // obsolete - [InlineData("myDNS:myPort,password=myPassword,abortConnect=false,ssl=true,sslProtocols=Tls12 ", SslProtocols.Tls12)] - public void ParseTlsWithoutTrailingComma(string configString, SslProtocols expected) - { - var config = ConfigurationOptions.Parse(configString); - Assert.Equal(expected, config.SslProtocols); - } - - [Theory] - [InlineData("foo,sslProtocols=NotAThing", "Keyword 'sslProtocols' requires an SslProtocol value (multiple values separated by '|'); the value 'NotAThing' is not recognised.", "sslProtocols")] - [InlineData("foo,SyncTimeout=ten", "Keyword 'SyncTimeout' requires an integer value; the value 'ten' is not recognised.", "SyncTimeout")] - [InlineData("foo,syncTimeout=-42", "Keyword 'syncTimeout' has a minimum value of '1'; the value '-42' is not permitted.", "syncTimeout")] - [InlineData("foo,AllowAdmin=maybe", "Keyword 'AllowAdmin' requires a boolean value; the value 'maybe' is not recognised.", "AllowAdmin")] - [InlineData("foo,Version=current", "Keyword 'Version' requires a version value; the value 'current' is not recognised.", "Version")] - [InlineData("foo,proxy=epoxy", "Keyword 'proxy' requires a proxy value; the value 'epoxy' is not recognised.", "proxy")] - public void ConfigStringErrorsGiveMeaningfulMessages(string configString, string expected, string paramName) - { - var ex = Assert.Throws(() => ConfigurationOptions.Parse(configString)); - Assert.StartsWith(expected, ex.Message); // param name gets concatenated sometimes - Assert.Equal(paramName, ex.ParamName); // param name gets concatenated sometimes - } - - [Fact] - public void ConfigStringInvalidOptionErrorGiveMeaningfulMessages() - { - var ex = Assert.Throws(() => ConfigurationOptions.Parse("foo,flibble=value")); - Assert.StartsWith("Keyword 'flibble' is not supported.", ex.Message); // param name gets concatenated sometimes - Assert.Equal("flibble", ex.ParamName); - } - } -} diff --git a/tests/StackExchange.Redis.Tests/ConfigTests.cs b/tests/StackExchange.Redis.Tests/ConfigTests.cs new file mode 100644 index 000000000..3dfa4f99a --- /dev/null +++ b/tests/StackExchange.Redis.Tests/ConfigTests.cs @@ -0,0 +1,786 @@ +using System; +using System.Globalization; +using System.IO; +using System.IO.Pipelines; +using System.Linq; +using System.Net; +using System.Net.Sockets; +using System.Reflection; +using System.Security.Authentication; +using System.Text; +using System.Text.RegularExpressions; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.Logging.Abstractions; +using StackExchange.Redis.Configuration; +using Xunit; + +namespace StackExchange.Redis.Tests; + +[RunPerProtocol] +public class ConfigTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + public Version DefaultVersion = new(3, 0, 0); + + [Fact] + public void ExpectedFields() + { + // if this test fails, check that you've updated ConfigurationOptions.Clone(), then: fix the test! + // this is a simple but pragmatic "have you considered?" check + var fields = Array.ConvertAll( + typeof(ConfigurationOptions).GetFields(BindingFlags.Public | BindingFlags.NonPublic | BindingFlags.Instance), + x => Regex.Replace(x.Name, """^<(\w+)>k__BackingField$""", "$1")); + Array.Sort(fields); + Assert.Equal( + new[] + { + "abortOnConnectFail", + "allowAdmin", + "asyncTimeout", + "backlogPolicy", + "BeforeSocketConnect", + "CertificateSelection", + "CertificateValidation", + "ChannelPrefix", + "checkCertificateRevocation", + "ClientName", + "commandMap", + "configChannel", + "configCheckSeconds", + "connectRetry", + "connectTimeout", + "DefaultDatabase", + "defaultOptions", + "defaultVersion", + "EndPoints", + "heartbeatConsistencyChecks", + "heartbeatInterval", + "highIntegrity", + "includeDetailInExceptions", + "includePerformanceCountersInExceptions", + "keepAlive", + "LibraryName", + "loggerFactory", + "password", + "Protocol", + "proxy", + "reconnectRetryPolicy", + "resolveDns", + "responseTimeout", + "ServiceName", + "setClientLibrary", + "SocketManager", + "ssl", + #if !NETFRAMEWORK + "SslClientAuthenticationOptions", + #endif + "sslHost", + "SslProtocols", + "syncTimeout", + "tieBreaker", + "Tunnel", + "user", + }, + fields); + } + + [Fact] + public void SslProtocols_SingleValue() + { + var options = ConfigurationOptions.Parse("myhost,sslProtocols=Tls12"); + Assert.Equal(SslProtocols.Tls12, options.SslProtocols.GetValueOrDefault()); + } + + [Fact] + public void SslProtocols_MultipleValues() + { + var options = ConfigurationOptions.Parse("myhost,sslProtocols=Tls12|Tls13"); + Assert.Equal(SslProtocols.Tls12 | SslProtocols.Tls13, options.SslProtocols.GetValueOrDefault()); + } + + [Theory] + [InlineData("checkCertificateRevocation=false", false)] + [InlineData("checkCertificateRevocation=true", true)] + [InlineData("", true)] + public void ConfigurationOption_CheckCertificateRevocation(string conString, bool expectedValue) + { + var options = ConfigurationOptions.Parse($"host,{conString}"); + Assert.Equal(expectedValue, options.CheckCertificateRevocation); + var toString = options.ToString(); + Assert.Contains(conString, toString, StringComparison.CurrentCultureIgnoreCase); + } + + [Fact] + public void SslProtocols_UsingIntegerValue() + { + // The below scenario is for cases where the *targeted* + // .NET framework version (e.g. .NET 4.0) doesn't define an enum value (e.g. Tls11) + // but the OS has been patched with support + const int integerValue = (int)(SslProtocols.Tls12 | SslProtocols.Tls13); + var options = ConfigurationOptions.Parse("myhost,sslProtocols=" + integerValue); + Assert.Equal(SslProtocols.Tls12 | SslProtocols.Tls13, options.SslProtocols.GetValueOrDefault()); + } + + [Fact] + public void SslProtocols_InvalidValue() + { + Assert.Throws(() => ConfigurationOptions.Parse("myhost,sslProtocols=InvalidSslProtocol")); + } + + [Theory] + [InlineData("contoso.redis.cache.windows.net:6380", true)] + [InlineData("contoso.REDIS.CACHE.chinacloudapi.cn:6380", true)] // added a few upper case chars to validate comparison + [InlineData("contoso.redis.cache.usgovcloudapi.net:6380", true)] + [InlineData("contoso.redis.cache.sovcloud-api.de:6380", true)] + [InlineData("contoso.redis.cache.sovcloud-api.fr:6380", true)] + public void ConfigurationOptionsDefaultForAzure(string hostAndPort, bool sslShouldBeEnabled) + { + Version defaultAzureVersion = new(6, 0, 0); + var options = ConfigurationOptions.Parse(hostAndPort); + Assert.True(options.DefaultVersion.Equals(defaultAzureVersion)); + Assert.False(options.AbortOnConnectFail); + Assert.Equal(sslShouldBeEnabled, options.Ssl); + } + + [Theory] + [InlineData("contoso.redis.azure.net:10000", true)] + [InlineData("contoso.redis.chinacloudapi.cn:10000", true)] + [InlineData("contoso.redis.usgovcloudapi.net:10000", true)] + [InlineData("contoso.redisenterprise.cache.azure.net:10000", true)] + public void ConfigurationOptionsDefaultForAzureManagedRedis(string hostAndPort, bool sslShouldBeEnabled) + { + Version defaultAzureVersion = new(7, 4, 0); + var options = ConfigurationOptions.Parse(hostAndPort); + Assert.True(options.DefaultVersion.Equals(defaultAzureVersion)); + Assert.False(options.AbortOnConnectFail); + Assert.Equal(sslShouldBeEnabled, options.Ssl); + } + + [Fact] + public void ConfigurationOptionsForAzureWhenSpecified() + { + var options = ConfigurationOptions.Parse("contoso.redis.cache.windows.net,abortConnect=true, version=2.1.1"); + Assert.True(options.DefaultVersion.Equals(new Version(2, 1, 1))); + Assert.True(options.AbortOnConnectFail); + } + + [Fact] + public void ConfigurationOptionsDefaultForNonAzure() + { + var options = ConfigurationOptions.Parse("redis.contoso.com"); + Assert.True(options.DefaultVersion.Equals(DefaultVersion)); + Assert.True(options.AbortOnConnectFail); + } + + [Fact] + public void ConfigurationOptionsDefaultWhenNoEndpointsSpecifiedYet() + { + var options = new ConfigurationOptions(); + Assert.True(options.DefaultVersion.Equals(DefaultVersion)); + Assert.True(options.AbortOnConnectFail); + } + + [Fact] + public void ConfigurationOptionsSyncTimeout() + { + // Default check + var options = new ConfigurationOptions(); + Assert.Equal(5000, options.SyncTimeout); + + options = ConfigurationOptions.Parse("syncTimeout=20"); + Assert.Equal(20, options.SyncTimeout); + } + + [Theory] + [InlineData("127.1:6379", AddressFamily.InterNetwork, "127.0.0.1", 6379)] + [InlineData("127.0.0.1:6379", AddressFamily.InterNetwork, "127.0.0.1", 6379)] + [InlineData("2a01:9820:1:24::1:1:6379", AddressFamily.InterNetworkV6, "2a01:9820:1:24:0:1:1:6379", 0)] + [InlineData("[2a01:9820:1:24::1:1]:6379", AddressFamily.InterNetworkV6, "2a01:9820:1:24::1:1", 6379)] + public void ConfigurationOptionsIPv6Parsing(string configString, AddressFamily family, string address, int port) + { + var options = ConfigurationOptions.Parse(configString); + Assert.Single(options.EndPoints); + var ep = Assert.IsType(options.EndPoints[0]); + Assert.Equal(family, ep.AddressFamily); + Assert.Equal(address, ep.Address.ToString()); + Assert.Equal(port, ep.Port); + } + + [Fact] + public void CanParseAndFormatUnixDomainSocket() + { + const string ConfigString = "!/some/path,allowAdmin=True"; +#if NETFRAMEWORK + var ex = Assert.Throws(() => ConfigurationOptions.Parse(ConfigString)); + Assert.Equal("Unix domain sockets require .NET Core 3 or above", ex.Message); +#else + var config = ConfigurationOptions.Parse(ConfigString); + Assert.True(config.AllowAdmin); + var ep = Assert.IsType(Assert.Single(config.EndPoints)); + Assert.Equal("/some/path", ep.ToString()); + Assert.Equal(ConfigString, config.ToString()); +#endif + } + + [Fact] + public async Task TalkToNonsenseServer() + { + var config = new ConfigurationOptions + { + AbortOnConnectFail = false, + EndPoints = + { + { "127.0.0.1:1234" }, + }, + ConnectTimeout = 200, + }; + var log = new StringWriter(); + await using (var conn = ConnectionMultiplexer.Connect(config, log)) + { + Log(log.ToString()); + Assert.False(conn.IsConnected); + } + } + + [Fact] + public async Task TestManualHeartbeat() + { + var options = ConfigurationOptions.Parse(GetConfiguration()); + options.HeartbeatInterval = TimeSpan.FromMilliseconds(100); + await using var conn = await ConnectionMultiplexer.ConnectAsync(options); + + foreach (var ep in conn.GetServerSnapshot().ToArray()) + { + ep.WriteEverySeconds = 1; + } + + var db = conn.GetDatabase(); + await db.PingAsync(); + + var before = conn.OperationCount; + + Log("Sleeping to test heartbeat..."); + await UntilConditionAsync(TimeSpan.FromSeconds(5), () => conn.OperationCount > before + 1).ForAwait(); + var after = conn.OperationCount; + + Assert.True(after >= before + 1, $"after: {after}, before: {before}"); + } + + [Theory] + [InlineData(0)] + [InlineData(10)] + [InlineData(100)] + [InlineData(200)] + public async Task GetSlowlog(int count) + { + await using var conn = Create(allowAdmin: true); + + var rows = GetAnyPrimary(conn).SlowlogGet(count); + Assert.NotNull(rows); + } + + [Fact] + public async Task ClearSlowlog() + { + await using var conn = Create(allowAdmin: true); + + GetAnyPrimary(conn).SlowlogReset(); + } + + [Fact] + public async Task ClientName() + { + await using var conn = Create(clientName: "Test Rig", allowAdmin: true, shared: false); + + Assert.Equal("Test Rig", conn.ClientName); + + var db = conn.GetDatabase(); + await db.PingAsync(); + + var name = (string?)(await GetAnyPrimary(conn).ExecuteAsync("CLIENT", "GETNAME")); + Assert.Equal("TestRig", name); + } + + [Fact] + public async Task ClientLibraryName() + { + await using var conn = Create(allowAdmin: true, shared: false); + var server = GetAnyPrimary(conn); + + await server.PingAsync(); + var possibleId = conn.GetConnectionId(server.EndPoint, ConnectionType.Interactive); + + if (possibleId is null) + { + Log("(client id not available)"); + return; + } + var id = possibleId.Value; + var libName = server.ClientList().Single(x => x.Id == id).LibraryName; + if (libName is not null) // server-version dependent + { + Log("library name: {0}", libName); + Assert.Equal("SE.Redis", libName); + + conn.AddLibraryNameSuffix("foo"); + conn.AddLibraryNameSuffix("bar"); + conn.AddLibraryNameSuffix("foo"); + + libName = (await server.ClientListAsync()).Single(x => x.Id == id).LibraryName; + Log($"library name: {libName}"); + Assert.Equal("SE.Redis-bar-foo", libName); + } + else + { + Log("(library name not available)"); + } + } + + [Fact] + public async Task DefaultClientName() + { + await using var conn = Create(allowAdmin: true, caller: "", shared: false); // force default naming to kick in + + Assert.Equal($"{Environment.MachineName}(SE.Redis-v{Utils.GetLibVersion()})", conn.ClientName); + var db = conn.GetDatabase(); + await db.PingAsync(); + + var name = (string?)GetAnyPrimary(conn).Execute("CLIENT", "GETNAME"); + Assert.Equal($"{Environment.MachineName}(SE.Redis-v{Utils.GetLibVersion()})", name); + } + + [Fact] + public async Task ReadConfigWithConfigDisabled() + { + await using var conn = Create(allowAdmin: true, disabledCommands: ["config", "info"]); + + var server = GetAnyPrimary(conn); + var ex = Assert.Throws(() => server.ConfigGet()); + Assert.Equal("This operation has been disabled in the command-map and cannot be used: CONFIG", ex.Message); + } + + [Fact] + public async Task ConnectWithSubscribeDisabled() + { + await using var conn = Create(allowAdmin: true, disabledCommands: ["subscribe"]); + + Assert.True(conn.IsConnected); + var servers = conn.GetServerSnapshot(); + Assert.True(servers[0].IsConnected); + if (!TestContext.Current.IsResp3()) + { + Assert.False(servers[0].IsSubscriberConnected); + } + + var ex = Assert.Throws(() => conn.GetSubscriber().Subscribe(RedisChannel.Literal(Me()), (_, _) => GC.KeepAlive(this))); + Assert.Equal("This operation has been disabled in the command-map and cannot be used: SUBSCRIBE", ex.Message); + } + + [Fact] + public async Task ReadConfig() + { + await using var conn = Create(allowAdmin: true); + + Log("about to get config"); + var server = GetAnyPrimary(conn); + var all = server.ConfigGet(); + Assert.True(all.Length > 0, "any"); + + var pairs = all.ToDictionary(x => (string)x.Key, x => (string)x.Value, StringComparer.InvariantCultureIgnoreCase); + + Assert.Equal(all.Length, pairs.Count); + Assert.True(pairs.ContainsKey("timeout"), "timeout"); + var val = int.Parse(pairs["timeout"]); + + Assert.True(pairs.ContainsKey("port"), "port"); + val = int.Parse(pairs["port"]); + Assert.Equal(TestConfig.Current.PrimaryPort, val); + } + + [Fact] + public async Task GetTime() + { + await using var conn = Create(); + + var server = GetAnyPrimary(conn); + var serverTime = server.Time(); + var localTime = DateTime.UtcNow; + Log("Server: " + serverTime.ToString(CultureInfo.InvariantCulture)); + Log("Local: " + localTime.ToString(CultureInfo.InvariantCulture)); + Assert.Equal(localTime, serverTime, TimeSpan.FromSeconds(10)); + } + + [Fact] + public async Task DebugObject() + { + await using var conn = Create(allowAdmin: true); + + var db = conn.GetDatabase(); + RedisKey key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.StringIncrement(key, flags: CommandFlags.FireAndForget); + var debug = (string?)db.DebugObject(key); + Assert.NotNull(debug); + Assert.Contains("encoding:int serializedlength:2", debug); + } + + [Fact] + public async Task GetInfo() + { + await using var conn = Create(allowAdmin: true); + + var server = GetAnyPrimary(conn); + var info1 = server.Info(); + Assert.True(info1.Length > 5); + Log("All sections"); + foreach (var group in info1) + { + Log(group.Key); + } + var first = info1[0]; + Log("Full info for: " + first.Key); + foreach (var setting in first) + { + Log(" {0} ==> {1}", setting.Key, setting.Value); + } + + var info2 = server.Info("cpu"); + Assert.Single(info2); + var cpu = info2.Single(); + Log("Full info for: " + cpu.Key); + foreach (var setting in cpu) + { + Log(" {0} ==> {1}", setting.Key, setting.Value); + } + var cpuCount = cpu.Count(); + Assert.True(cpuCount > 2); + if (cpu.Key != "CPU") + { + // seem to be seeing this in logs; add lots of detail + var sb = new StringBuilder("Expected CPU, got ").AppendLine(cpu.Key); + foreach (var setting in cpu) + { + sb.Append(setting.Key).Append('=').AppendLine(setting.Value); + } + Assert.Fail(sb.ToString()); + } + Assert.Equal("CPU", cpu.Key); + Assert.Contains(cpu, x => x.Key == "used_cpu_sys"); + Assert.Contains(cpu, x => x.Key == "used_cpu_user"); + } + + [Fact] + public async Task GetInfoRaw() + { + await using var conn = Create(allowAdmin: true); + + var server = GetAnyPrimary(conn); + var info = server.InfoRaw(); + Assert.Contains("used_cpu_sys", info); + Assert.Contains("used_cpu_user", info); + } + + [Fact] + public async Task GetClients() + { + var name = Guid.NewGuid().ToString(); + await using var conn = Create(clientName: name, allowAdmin: true, shared: false); + + var server = GetAnyPrimary(conn); + var clients = server.ClientList(); + Assert.True(clients.Length > 0, "no clients"); // ourselves! + Assert.True(clients.Any(x => x.Name == name), "expected: " + name); + + if (server.Features.ClientId) + { + var id = conn.GetConnectionId(server.EndPoint, ConnectionType.Interactive); + Log("client id: " + id); + Assert.NotNull(id); + Assert.True(clients.Any(x => x.Id == id), "expected: " + id); + id = conn.GetConnectionId(server.EndPoint, ConnectionType.Subscription); + Assert.NotNull(id); + Assert.True(clients.Any(x => x.Id == id), "expected: " + id); + + var self = clients.First(x => x.Id == id); + if (server.Version.Major >= 7) + { + Assert.Equal(TestContext.Current.GetProtocol(), self.Protocol); + } + else + { + Assert.Null(self.Protocol); + } + } + } + + [Fact] + public async Task SlowLog() + { + await using var conn = Create(allowAdmin: true); + + var server = GetAnyPrimary(conn); + server.SlowlogGet(); + server.SlowlogReset(); + } + + [Fact] + public void EndpointIteratorIsReliableOverChanges() + { + var eps = new EndPointCollection + { + { IPAddress.Loopback, 7999 }, + { IPAddress.Loopback, 8000 }, + }; + + using var iter = eps.GetEnumerator(); + Assert.True(iter.MoveNext()); + Assert.Equal(7999, ((IPEndPoint)iter.Current).Port); + eps[1] = new IPEndPoint(IPAddress.Loopback, 8001); // boom + Assert.True(iter.MoveNext()); + Assert.Equal(8001, ((IPEndPoint)iter.Current).Port); + Assert.False(iter.MoveNext()); + } + + [Fact] + public async Task ThreadPoolManagerIsDetected() + { + var config = new ConfigurationOptions + { + EndPoints = { { IPAddress.Loopback, 6379 } }, + SocketManager = SocketManager.ThreadPool, + }; + + await using var conn = ConnectionMultiplexer.Connect(config); + + Assert.Same(PipeScheduler.ThreadPool, conn.SocketManager?.Scheduler); + } + + [Fact] + public async Task DefaultThreadPoolManagerIsDetected() + { + var config = new ConfigurationOptions + { + EndPoints = { { IPAddress.Loopback, 6379 } }, + }; + + await using var conn = ConnectionMultiplexer.Connect(config); + + Assert.Same(SocketManager.Shared.Scheduler, conn.SocketManager?.Scheduler); + } + + [Theory] + [InlineData("myDNS:myPort,password=myPassword,connectRetry=3,connectTimeout=15000,syncTimeout=15000,defaultDatabase=0,abortConnect=false,ssl=true,sslProtocols=Tls12", SslProtocols.Tls12)] + [InlineData("myDNS:myPort,password=myPassword,abortConnect=false,ssl=true,sslProtocols=Tls12", SslProtocols.Tls12)] +#pragma warning disable CS0618 // Type or member is obsolete + [InlineData("myDNS:myPort,password=myPassword,abortConnect=false,ssl=true,sslProtocols=Ssl3", SslProtocols.Ssl3)] +#pragma warning restore CS0618 + [InlineData("myDNS:myPort,password=myPassword,abortConnect=false,ssl=true,sslProtocols=Tls12 ", SslProtocols.Tls12)] + public void ParseTlsWithoutTrailingComma(string configString, SslProtocols expected) + { + var config = ConfigurationOptions.Parse(configString); + Assert.Equal(expected, config.SslProtocols); + } + + [Theory] + [InlineData("foo,sslProtocols=NotAThing", "Keyword 'sslProtocols' requires an SslProtocol value (multiple values separated by '|'); the value 'NotAThing' is not recognised.", "sslProtocols")] + [InlineData("foo,SyncTimeout=ten", "Keyword 'SyncTimeout' requires an integer value; the value 'ten' is not recognised.", "SyncTimeout")] + [InlineData("foo,syncTimeout=-42", "Keyword 'syncTimeout' has a minimum value of '1'; the value '-42' is not permitted.", "syncTimeout")] + [InlineData("foo,AllowAdmin=maybe", "Keyword 'AllowAdmin' requires a boolean value; the value 'maybe' is not recognised.", "AllowAdmin")] + [InlineData("foo,Version=current", "Keyword 'Version' requires a version value; the value 'current' is not recognised.", "Version")] + [InlineData("foo,proxy=epoxy", "Keyword 'proxy' requires a proxy value; the value 'epoxy' is not recognised.", "proxy")] + public void ConfigStringErrorsGiveMeaningfulMessages(string configString, string expected, string paramName) + { + var ex = Assert.Throws(() => ConfigurationOptions.Parse(configString)); + Assert.StartsWith(expected, ex.Message); // param name gets concatenated sometimes + Assert.Equal(paramName, ex.ParamName); // param name gets concatenated sometimes + } + + [Fact] + public void ConfigStringInvalidOptionErrorGiveMeaningfulMessages() + { + var ex = Assert.Throws(() => ConfigurationOptions.Parse("foo,flibble=value")); + Assert.StartsWith("Keyword 'flibble' is not supported.", ex.Message); // param name gets concatenated sometimes + Assert.Equal("flibble", ex.ParamName); + } + + [Fact] + public void NullApply() + { + var options = ConfigurationOptions.Parse("127.0.0.1,name=FooApply"); + Assert.Equal("FooApply", options.ClientName); + + // Doesn't go boom + var result = options.Apply(null!); + Assert.Equal("FooApply", options.ClientName); + Assert.Equal(result, options); + } + + [Fact] + public void Apply() + { + var options = ConfigurationOptions.Parse("127.0.0.1,name=FooApply"); + Assert.Equal("FooApply", options.ClientName); + + var randomName = Guid.NewGuid().ToString(); + var result = options.Apply(options => options.ClientName = randomName); + + Assert.Equal(randomName, options.ClientName); + Assert.Equal(randomName, result.ClientName); + Assert.Equal(result, options); + } + + [Fact] + public async Task BeforeSocketConnect() + { + var options = ConfigurationOptions.Parse(TestConfig.Current.PrimaryServerAndPort); + int count = 0; + options.BeforeSocketConnect = (endpoint, connType, socket) => + { + Interlocked.Increment(ref count); + Log($"Endpoint: {endpoint}, ConnType: {connType}, Socket: {socket}"); + socket.DontFragment = true; + socket.Ttl = (short)(connType == ConnectionType.Interactive ? 12 : 123); + }; + await using var conn = ConnectionMultiplexer.Connect(options); + Assert.True(conn.IsConnected); + Assert.Equal(2, count); + + var endpoint = conn.GetServerSnapshot()[0]; + var interactivePhysical = endpoint.GetBridge(ConnectionType.Interactive)?.TryConnect(null); + var subscriptionPhysical = endpoint.GetBridge(ConnectionType.Subscription)?.TryConnect(null); + Assert.NotNull(interactivePhysical); + Assert.NotNull(subscriptionPhysical); + + var interactiveSocket = interactivePhysical.VolatileSocket; + var subscriptionSocket = subscriptionPhysical.VolatileSocket; + Assert.NotNull(interactiveSocket); + Assert.NotNull(subscriptionSocket); + + Assert.Equal(12, interactiveSocket.Ttl); + Assert.Equal(123, subscriptionSocket.Ttl); + Assert.True(interactiveSocket.DontFragment); + Assert.True(subscriptionSocket.DontFragment); + } + + [Fact] + public async Task MutableOptions() + { + var options = ConfigurationOptions.Parse(TestConfig.Current.PrimaryServerAndPort + ",name=Details"); + options.LoggerFactory = NullLoggerFactory.Instance; + var originalConfigChannel = options.ConfigurationChannel = "originalConfig"; + var originalUser = options.User = "originalUser"; + var originalPassword = options.Password = "originalPassword"; + Assert.Equal("Details", options.ClientName); + await using var conn = await ConnectionMultiplexer.ConnectAsync(options); + + // Same instance + Assert.Same(options, conn.RawConfig); + // Copies + Assert.NotSame(options.EndPoints, conn.EndPoints); + + // Same until forked - it's not cloned + Assert.Same(options.CommandMap, conn.CommandMap); + options.CommandMap = CommandMap.Envoyproxy; + Assert.NotSame(options.CommandMap, conn.CommandMap); + +#pragma warning disable CS0618 // Type or member is obsolete + // Defaults true + Assert.True(options.IncludeDetailInExceptions); + Assert.True(conn.IncludeDetailInExceptions); + options.IncludeDetailInExceptions = false; + Assert.False(options.IncludeDetailInExceptions); + Assert.False(conn.IncludeDetailInExceptions); + + // Defaults false + Assert.False(options.IncludePerformanceCountersInExceptions); + Assert.False(conn.IncludePerformanceCountersInExceptions); + options.IncludePerformanceCountersInExceptions = true; + Assert.True(options.IncludePerformanceCountersInExceptions); + Assert.True(conn.IncludePerformanceCountersInExceptions); +#pragma warning restore CS0618 + + var newName = Guid.NewGuid().ToString(); + options.ClientName = newName; + Assert.Equal(newName, conn.ClientName); + + // TODO: This forks due to memoization of the byte[] for efficiency + // If we could cheaply detect change it'd be good to let this change + const string newConfigChannel = "newConfig"; + options.ConfigurationChannel = newConfigChannel; + Assert.Equal(newConfigChannel, options.ConfigurationChannel); + Assert.NotNull(conn.ConfigurationChangedChannel); + Assert.Equal(Encoding.UTF8.GetString(conn.ConfigurationChangedChannel), originalConfigChannel); + + Assert.Equal(originalUser, conn.RawConfig.User); + Assert.Equal(originalPassword, conn.RawConfig.Password); + var newPass = options.Password = "newPassword"; + Assert.Equal(newPass, conn.RawConfig.Password); + Assert.Equal(options.LoggerFactory, conn.RawConfig.LoggerFactory); + } + + [Theory] + [InlineData("http://somewhere:22", "http:somewhere:22")] + [InlineData("http:somewhere:22", "http:somewhere:22")] + public void HttpTunnelCanRoundtrip(string input, string expected) + { + var config = ConfigurationOptions.Parse($"127.0.0.1:6380,tunnel={input}"); + var ip = Assert.IsType(Assert.Single(config.EndPoints)); + Assert.Equal(6380, ip.Port); + Assert.Equal("127.0.0.1", ip.Address.ToString()); + + Assert.NotNull(config.Tunnel); + Assert.Equal(expected, config.Tunnel.ToString()); + + var cs = config.ToString(); + Assert.Equal($"127.0.0.1:6380,tunnel={expected}", cs); + } + + private sealed class CustomTunnel : Tunnel { } + + [Fact] + public void CustomTunnelCanRoundtripMinusTunnel() + { + // we don't expect to be able to parse custom tunnels, but we should still be able to round-trip + // the rest of the config, which means ignoring them *in both directions* (unless first party) + var options = ConfigurationOptions.Parse("127.0.0.1,Ssl=true"); + options.Tunnel = new CustomTunnel(); + var cs = options.ToString(); + Assert.Equal("127.0.0.1,ssl=True", cs); + options = ConfigurationOptions.Parse(cs); + Assert.Null(options.Tunnel); + } + + [Theory] + [InlineData("server:6379", true)] + [InlineData("server:6379,setlib=True", true)] + [InlineData("server:6379,setlib=False", false)] + public void DefaultConfigOptionsForSetLib(string configurationString, bool setlib) + { + var options = ConfigurationOptions.Parse(configurationString); + Assert.Equal(setlib, options.SetClientLibrary); + Assert.Equal(configurationString, options.ToString()); + options = options.Clone(); + Assert.Equal(setlib, options.SetClientLibrary); + Assert.Equal(configurationString, options.ToString()); + } + + [Theory] + [InlineData(null, false, "dummy")] + [InlineData(false, false, "dummy,highIntegrity=False")] + [InlineData(true, true, "dummy,highIntegrity=True")] + public void CheckHighIntegrity(bool? assigned, bool expected, string cs) + { + var options = ConfigurationOptions.Parse("dummy"); + if (assigned.HasValue) options.HighIntegrity = assigned.Value; + + Assert.Equal(expected, options.HighIntegrity); + Assert.Equal(cs, options.ToString()); + + var clone = options.Clone(); + Assert.Equal(expected, clone.HighIntegrity); + Assert.Equal(cs, clone.ToString()); + + var parsed = ConfigurationOptions.Parse(cs); + Assert.Equal(expected, parsed.HighIntegrity); + } +} diff --git a/tests/StackExchange.Redis.Tests/ConnectByIP.cs b/tests/StackExchange.Redis.Tests/ConnectByIP.cs deleted file mode 100644 index 4295e0f7d..000000000 --- a/tests/StackExchange.Redis.Tests/ConnectByIP.cs +++ /dev/null @@ -1,108 +0,0 @@ -using System.Collections.Generic; -using System.Linq; -using System.Net; -using System.Net.Sockets; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - public class ConnectByIP : TestBase - { - public ConnectByIP(ITestOutputHelper output) : base (output) { } - - [Fact] - public void ParseEndpoints() - { - var eps = new EndPointCollection - { - { "127.0.0.1", 1000 }, - { "::1", 1001 }, - { "localhost", 1002 } - }; - - Assert.Equal(AddressFamily.InterNetwork, eps[0].AddressFamily); - Assert.Equal(AddressFamily.InterNetworkV6, eps[1].AddressFamily); - Assert.Equal(AddressFamily.Unspecified, eps[2].AddressFamily); - - Assert.Equal("127.0.0.1:1000", eps[0].ToString()); - Assert.Equal("[::1]:1001", eps[1].ToString()); - Assert.Equal("Unspecified/localhost:1002", eps[2].ToString()); - } - - [Fact] - public void IPv4Connection() - { - var config = new ConfigurationOptions - { - EndPoints = { { TestConfig.Current.IPv4Server, TestConfig.Current.IPv4Port } } - }; - using (var conn = ConnectionMultiplexer.Connect(config)) - { - var server = conn.GetServer(config.EndPoints[0]); - Assert.Equal(AddressFamily.InterNetwork, server.EndPoint.AddressFamily); - server.Ping(); - } - } - - [Fact] - public void IPv6Connection() - { - var config = new ConfigurationOptions - { - EndPoints = { { TestConfig.Current.IPv6Server, TestConfig.Current.IPv6Port } } - }; - using (var conn = ConnectionMultiplexer.Connect(config)) - { - var server = conn.GetServer(config.EndPoints[0]); - Assert.Equal(AddressFamily.InterNetworkV6, server.EndPoint.AddressFamily); - server.Ping(); - } - } - - [Theory] - [MemberData(nameof(ConnectByVariousEndpointsData))] - public void ConnectByVariousEndpoints(EndPoint ep, AddressFamily expectedFamily) - { - Assert.Equal(expectedFamily, ep.AddressFamily); - var config = new ConfigurationOptions - { - EndPoints = { ep } - }; - if (ep.AddressFamily != AddressFamily.InterNetworkV6) // I don't have IPv6 servers - { - using (var conn = ConnectionMultiplexer.Connect(config)) - { - var actual = conn.GetEndPoints().Single(); - var server = conn.GetServer(actual); - server.Ping(); - } - } - } - - public static IEnumerable ConnectByVariousEndpointsData() - { - yield return new object[] { new IPEndPoint(IPAddress.Loopback, 6379), AddressFamily.InterNetwork }; - - yield return new object[] { new IPEndPoint(IPAddress.IPv6Loopback, 6379), AddressFamily.InterNetworkV6 }; - - yield return new object[] { new DnsEndPoint("localhost", 6379), AddressFamily.Unspecified }; - - yield return new object[] { new DnsEndPoint("localhost", 6379, AddressFamily.InterNetwork), AddressFamily.InterNetwork }; - - yield return new object[] { new DnsEndPoint("localhost", 6379, AddressFamily.InterNetworkV6), AddressFamily.InterNetworkV6 }; - - yield return new object[] { ConfigurationOptions.Parse("localhost:6379").EndPoints.Single(), AddressFamily.Unspecified }; - - yield return new object[] { ConfigurationOptions.Parse("localhost").EndPoints.Single(), AddressFamily.Unspecified }; - - yield return new object[] { ConfigurationOptions.Parse("127.0.0.1:6379").EndPoints.Single(), AddressFamily.InterNetwork }; - - yield return new object[] { ConfigurationOptions.Parse("127.0.0.1").EndPoints.Single(), AddressFamily.InterNetwork }; - - yield return new object[] { ConfigurationOptions.Parse("[::1]").EndPoints.Single(), AddressFamily.InterNetworkV6 }; - - yield return new object[] { ConfigurationOptions.Parse("[::1]:6379").EndPoints.Single(), AddressFamily.InterNetworkV6 }; - } - } -} diff --git a/tests/StackExchange.Redis.Tests/ConnectByIPTests.cs b/tests/StackExchange.Redis.Tests/ConnectByIPTests.cs new file mode 100644 index 000000000..3b6fc4d49 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/ConnectByIPTests.cs @@ -0,0 +1,103 @@ +using System.Collections.Generic; +using System.Linq; +using System.Net; +using System.Net.Sockets; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class ConnectByIPTests(ITestOutputHelper output) : TestBase(output) +{ + [Fact] + public void ParseEndpoints() + { + var eps = new EndPointCollection + { + { "127.0.0.1", 1000 }, + { "::1", 1001 }, + { "localhost", 1002 }, + }; + + Assert.Equal(AddressFamily.InterNetwork, eps[0].AddressFamily); + Assert.Equal(AddressFamily.InterNetworkV6, eps[1].AddressFamily); + Assert.Equal(AddressFamily.Unspecified, eps[2].AddressFamily); + + Assert.Equal("127.0.0.1:1000", eps[0].ToString()); + Assert.Equal("[::1]:1001", eps[1].ToString()); + Assert.Equal("Unspecified/localhost:1002", eps[2].ToString()); + } + + [Fact] + public async Task IPv4Connection() + { + var config = new ConfigurationOptions + { + EndPoints = { { TestConfig.Current.IPv4Server, TestConfig.Current.IPv4Port } }, + }; + await using var conn = ConnectionMultiplexer.Connect(config); + + var server = conn.GetServer(config.EndPoints[0]); + Assert.Equal(AddressFamily.InterNetwork, server.EndPoint.AddressFamily); + await server.PingAsync(); + } + + [Fact] + public async Task IPv6Connection() + { + var config = new ConfigurationOptions + { + EndPoints = { { TestConfig.Current.IPv6Server, TestConfig.Current.IPv6Port } }, + }; + await using var conn = ConnectionMultiplexer.Connect(config); + + var server = conn.GetServer(config.EndPoints[0]); + Assert.Equal(AddressFamily.InterNetworkV6, server.EndPoint.AddressFamily); + await server.PingAsync(); + } + + [Theory] + [MemberData(nameof(ConnectByVariousEndpointsData))] + public async Task ConnectByVariousEndpoints(EndPoint ep, AddressFamily expectedFamily) + { + Assert.Equal(expectedFamily, ep.AddressFamily); + var config = new ConfigurationOptions + { + EndPoints = { ep }, + }; + if (ep.AddressFamily != AddressFamily.InterNetworkV6) // I don't have IPv6 servers + { + await using (var conn = ConnectionMultiplexer.Connect(config)) + { + var actual = conn.GetEndPoints().Single(); + var server = conn.GetServer(actual); + await server.PingAsync(); + } + } + } + + public static IEnumerable ConnectByVariousEndpointsData() + { + yield return new object[] { new IPEndPoint(IPAddress.Loopback, 6379), AddressFamily.InterNetwork }; + + yield return new object[] { new IPEndPoint(IPAddress.IPv6Loopback, 6379), AddressFamily.InterNetworkV6 }; + + yield return new object[] { new DnsEndPoint("localhost", 6379), AddressFamily.Unspecified }; + + yield return new object[] { new DnsEndPoint("localhost", 6379, AddressFamily.InterNetwork), AddressFamily.InterNetwork }; + + yield return new object[] { new DnsEndPoint("localhost", 6379, AddressFamily.InterNetworkV6), AddressFamily.InterNetworkV6 }; + + yield return new object[] { ConfigurationOptions.Parse("localhost:6379").EndPoints.Single(), AddressFamily.Unspecified }; + + yield return new object[] { ConfigurationOptions.Parse("localhost").EndPoints.Single(), AddressFamily.Unspecified }; + + yield return new object[] { ConfigurationOptions.Parse("127.0.0.1:6379").EndPoints.Single(), AddressFamily.InterNetwork }; + + yield return new object[] { ConfigurationOptions.Parse("127.0.0.1").EndPoints.Single(), AddressFamily.InterNetwork }; + + yield return new object[] { ConfigurationOptions.Parse("[::1]").EndPoints.Single(), AddressFamily.InterNetworkV6 }; + + yield return new object[] { ConfigurationOptions.Parse("[::1]:6379").EndPoints.Single(), AddressFamily.InterNetworkV6 }; + } +} diff --git a/tests/StackExchange.Redis.Tests/ConnectCustomConfigTests.cs b/tests/StackExchange.Redis.Tests/ConnectCustomConfigTests.cs new file mode 100644 index 000000000..d0e67f35f --- /dev/null +++ b/tests/StackExchange.Redis.Tests/ConnectCustomConfigTests.cs @@ -0,0 +1,125 @@ +using System; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class ConnectCustomConfigTests(ITestOutputHelper output) : TestBase(output) +{ + // So we're triggering tiebreakers here + protected override string GetConfiguration() => TestConfig.Current.PrimaryServerAndPort + "," + TestConfig.Current.ReplicaServerAndPort; + + [Theory] + [InlineData("config")] + [InlineData("info")] + [InlineData("get")] + [InlineData("config,get")] + [InlineData("info,get")] + [InlineData("config,info,get")] + public async Task DisabledCommandsStillConnect(string disabledCommands) + { + await using var conn = Create(allowAdmin: true, disabledCommands: disabledCommands.Split(','), log: Writer); + + var db = conn.GetDatabase(); + await db.PingAsync(); + Assert.True(db.IsConnected(default(RedisKey))); + } + + [Theory] + [InlineData("config")] + [InlineData("info")] + [InlineData("get")] + [InlineData("cluster")] + [InlineData("config,get")] + [InlineData("info,get")] + [InlineData("config,info,get")] + [InlineData("config,info,get,cluster")] + public async Task DisabledCommandsStillConnectCluster(string disabledCommands) + { + await using var conn = Create(allowAdmin: true, configuration: TestConfig.Current.ClusterServersAndPorts, disabledCommands: disabledCommands.Split(','), log: Writer); + + var db = conn.GetDatabase(); + await db.PingAsync(); + Assert.True(db.IsConnected(default(RedisKey))); + } + + [Fact] + public async Task TieBreakerIntact() + { + await using var conn = Create(allowAdmin: true, log: Writer); + + var tiebreaker = conn.GetDatabase().StringGet(conn.RawConfig.TieBreaker); + Log($"Tiebreaker: {tiebreaker}"); + + foreach (var server in conn.GetServerSnapshot()) + { + Assert.Equal(tiebreaker, server.TieBreakerResult); + } + } + + [Fact] + public async Task TieBreakerSkips() + { + await using var conn = Create(allowAdmin: true, disabledCommands: ["get"], log: Writer); + Assert.Throws(() => conn.GetDatabase().StringGet(conn.RawConfig.TieBreaker)); + + foreach (var server in conn.GetServerSnapshot()) + { + Assert.True(server.IsConnected); + Assert.Null(server.TieBreakerResult); + } + } + + [Fact] + public async Task TiebreakerIncorrectType() + { + var tiebreakerKey = Me(); + await using var fubarConn = Create(allowAdmin: true, log: Writer); + // Store something nonsensical in the tiebreaker key: + fubarConn.GetDatabase().HashSet(tiebreakerKey, "foo", "bar"); + + // Ensure the next connection getting an invalid type still connects + await using var conn = Create(allowAdmin: true, tieBreaker: tiebreakerKey, log: Writer); + + var db = conn.GetDatabase(); + await db.PingAsync(); + Assert.True(db.IsConnected(default(RedisKey))); + + var ex = Assert.Throws(() => db.StringGet(tiebreakerKey)); + Assert.Contains("WRONGTYPE", ex.Message); + } + + [Theory] + [InlineData(true, 2, 15)] + [InlineData(false, 0, 0)] + public async Task HeartbeatConsistencyCheckPingsAsync(bool enableConsistencyChecks, int minExpected, int maxExpected) + { + var options = new ConfigurationOptions() + { + HeartbeatConsistencyChecks = enableConsistencyChecks, + HeartbeatInterval = TimeSpan.FromMilliseconds(100), + }; + options.EndPoints.Add(TestConfig.Current.PrimaryServerAndPort); + + await using var conn = await ConnectionMultiplexer.ConnectAsync(options, Writer); + + var db = conn.GetDatabase(); + await db.PingAsync(); + Assert.True(db.IsConnected(default)); + + var preCount = conn.OperationCount; + Log("OperationCount (pre-delay): " + preCount); + + // Allow several heartbeats to happen, but don't need to be strict here + // e.g. allow thread pool starvation flex with the test suite's load (just check for a few) + await Task.Delay(TimeSpan.FromSeconds(1)); + + var postCount = conn.OperationCount; + Log("OperationCount (post-delay): " + postCount); + + var opCount = postCount - preCount; + Log("OperationCount (diff): " + opCount); + + Assert.True(minExpected <= opCount && opCount >= minExpected, $"Expected opcount ({opCount}) between {minExpected}-{maxExpected}"); + } +} diff --git a/tests/StackExchange.Redis.Tests/ConnectFailTimeout.cs b/tests/StackExchange.Redis.Tests/ConnectFailTimeout.cs deleted file mode 100644 index 6ca4ef63c..000000000 --- a/tests/StackExchange.Redis.Tests/ConnectFailTimeout.cs +++ /dev/null @@ -1,49 +0,0 @@ -using System; -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - public class ConnectFailTimeout : TestBase - { - public ConnectFailTimeout(ITestOutputHelper output) : base (output) { } - - [Fact] - public async Task NoticesConnectFail() - { - SetExpectedAmbientFailureCount(-1); - using (var conn = Create(allowAdmin: true)) - { - var server = conn.GetServer(conn.GetEndPoints()[0]); - - await RunBlockingSynchronousWithExtraThreadAsync(innerScenario).ForAwait(); - void innerScenario() - { - conn.ConnectionFailed += (s, a) => - Log("Disconnected: " + EndPointCollection.ToString(a.EndPoint)); - conn.ConnectionRestored += (s, a) => - Log("Reconnected: " + EndPointCollection.ToString(a.EndPoint)); - - // No need to delay, we're going to try a disconnected connection immediately so it'll fail... - conn.IgnoreConnect = true; - Log("simulating failure"); - server.SimulateConnectionFailure(); - Log("simulated failure"); - conn.IgnoreConnect = false; - Log("pinging - expect failure"); - Assert.Throws(() => server.Ping()); - Log("pinged"); - } - - // Heartbeat should reconnect by now - await UntilCondition(TimeSpan.FromSeconds(10), () => server.IsConnected); - - Log("pinging - expect success"); - var time = server.Ping(); - Log("pinged"); - Log(time.ToString()); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/ConnectFailTimeoutTests.cs b/tests/StackExchange.Redis.Tests/ConnectFailTimeoutTests.cs new file mode 100644 index 000000000..6a7e253d7 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/ConnectFailTimeoutTests.cs @@ -0,0 +1,45 @@ +using System; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class ConnectFailTimeoutTests(ITestOutputHelper output) : TestBase(output) +{ + [Fact] + public async Task NoticesConnectFail() + { + SetExpectedAmbientFailureCount(-1); + await using var conn = Create(allowAdmin: true, shared: false, backlogPolicy: BacklogPolicy.FailFast); + + var server = conn.GetServer(conn.GetEndPoints()[0]); + + await RunBlockingSynchronousWithExtraThreadAsync(InnerScenario).ForAwait(); + + void InnerScenario() + { + conn.ConnectionFailed += (s, a) => + Log("Disconnected: " + EndPointCollection.ToString(a.EndPoint)); + conn.ConnectionRestored += (s, a) => + Log("Reconnected: " + EndPointCollection.ToString(a.EndPoint)); + + // No need to delay, we're going to try a disconnected connection immediately so it'll fail... + conn.IgnoreConnect = true; + Log("simulating failure"); + server.SimulateConnectionFailure(SimulatedFailureType.All); + Log("simulated failure"); + conn.IgnoreConnect = false; + Log("pinging - expect failure"); + Assert.Throws(() => server.Ping()); + Log("pinged"); + } + + // Heartbeat should reconnect by now + await UntilConditionAsync(TimeSpan.FromSeconds(10), () => server.IsConnected); + + Log("pinging - expect success"); + var time = await server.PingAsync(); + Log("pinged"); + Log(time.ToString()); + } +} diff --git a/tests/StackExchange.Redis.Tests/ConnectToUnexistingHost.cs b/tests/StackExchange.Redis.Tests/ConnectToUnexistingHost.cs deleted file mode 100644 index e2c454a5c..000000000 --- a/tests/StackExchange.Redis.Tests/ConnectToUnexistingHost.cs +++ /dev/null @@ -1,95 +0,0 @@ -using System; -using System.Diagnostics; -using System.Linq; -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - public class ConnectToUnexistingHost : TestBase - { - public ConnectToUnexistingHost(ITestOutputHelper output) : base (output) { } - - [Fact] - public async Task FailsWithinTimeout() - { - const int timeout = 1000; - var sw = Stopwatch.StartNew(); - try - { - var config = new ConfigurationOptions - { - EndPoints = { { "invalid", 1234 } }, - ConnectTimeout = timeout - }; - - using (ConnectionMultiplexer.Connect(config, Writer)) - { - await Task.Delay(10000).ForAwait(); - } - - Assert.True(false, "Connect should fail with RedisConnectionException exception"); - } - catch (RedisConnectionException) - { - var elapsed = sw.ElapsedMilliseconds; - Log("Elapsed time: " + elapsed); - Log("Timeout: " + timeout); - Assert.True(elapsed < 9000, "Connect should fail within ConnectTimeout, ElapsedMs: " + elapsed); - } - } - - [Fact] - public async Task CanNotOpenNonsenseConnection_IP() - { - await RunBlockingSynchronousWithExtraThreadAsync(innerScenario).ForAwait(); - void innerScenario() - { - var ex = Assert.Throws(() => - { - using (ConnectionMultiplexer.Connect(TestConfig.Current.MasterServer + ":6500,connectTimeout=1000", Writer)) { } - }); - Log(ex.ToString()); - } - } - - [Fact] - public async Task CanNotOpenNonsenseConnection_DNS() - { - var ex = await Assert.ThrowsAsync(async () => - { - using (await ConnectionMultiplexer.ConnectAsync($"doesnot.exist.ds.{Guid.NewGuid():N}.com:6500,connectTimeout=1000", Writer).ForAwait()) { } - }).ForAwait(); - Log(ex.ToString()); - } - - [Fact] - public async Task CreateDisconnectedNonsenseConnection_IP() - { - await RunBlockingSynchronousWithExtraThreadAsync(innerScenario).ForAwait(); - void innerScenario() - { - using (var conn = ConnectionMultiplexer.Connect(TestConfig.Current.MasterServer + ":6500,abortConnect=false,connectTimeout=1000", Writer)) - { - Assert.False(conn.GetServer(conn.GetEndPoints().Single()).IsConnected); - Assert.False(conn.GetDatabase().IsConnected(default(RedisKey))); - } - } - } - - [Fact] - public async Task CreateDisconnectedNonsenseConnection_DNS() - { - await RunBlockingSynchronousWithExtraThreadAsync(innerScenario).ForAwait(); - void innerScenario() - { - using (var conn = ConnectionMultiplexer.Connect($"doesnot.exist.ds.{Guid.NewGuid():N}.com:6500,abortConnect=false,connectTimeout=1000", Writer)) - { - Assert.False(conn.GetServer(conn.GetEndPoints().Single()).IsConnected); - Assert.False(conn.GetDatabase().IsConnected(default(RedisKey))); - } - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/ConnectToUnexistingHostTests.cs b/tests/StackExchange.Redis.Tests/ConnectToUnexistingHostTests.cs new file mode 100644 index 000000000..cc015c711 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/ConnectToUnexistingHostTests.cs @@ -0,0 +1,91 @@ +using System; +using System.Diagnostics; +using System.Linq; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class ConnectToUnexistingHostTests(ITestOutputHelper output) : TestBase(output) +{ + [Fact] + public async Task FailsWithinTimeout() + { + const int timeout = 1000; + var sw = Stopwatch.StartNew(); + try + { + var config = new ConfigurationOptions + { + EndPoints = { { "invalid", 1234 } }, + ConnectTimeout = timeout, + }; + + await using (ConnectionMultiplexer.Connect(config, Writer)) + { + await Task.Delay(10000).ForAwait(); + } + + Assert.Fail("Connect should fail with RedisConnectionException exception"); + } + catch (RedisConnectionException) + { + var elapsed = sw.ElapsedMilliseconds; + Log("Elapsed time: " + elapsed); + Log("Timeout: " + timeout); + Assert.True(elapsed < 9000, "Connect should fail within ConnectTimeout, ElapsedMs: " + elapsed); + } + } + + [Fact] + public async Task CanNotOpenNonsenseConnection_IP() + { + await RunBlockingSynchronousWithExtraThreadAsync(InnerScenario).ForAwait(); + void InnerScenario() + { + var ex = Assert.Throws(() => + { + using (ConnectionMultiplexer.Connect(TestConfig.Current.PrimaryServer + ":6500,connectTimeout=1000,connectRetry=0", Writer)) { } + }); + Log(ex.ToString()); + } + } + + [Fact] + public async Task CanNotOpenNonsenseConnection_DNS() + { + var ex = await Assert.ThrowsAsync(async () => + { + using (await ConnectionMultiplexer.ConnectAsync($"doesnot.exist.ds.{Guid.NewGuid():N}.com:6500,connectTimeout=1000,connectRetry=0", Writer).ForAwait()) { } + }).ForAwait(); + Log(ex.ToString()); + } + + [Fact] + public async Task CreateDisconnectedNonsenseConnection_IP() + { + await RunBlockingSynchronousWithExtraThreadAsync(InnerScenario).ForAwait(); + void InnerScenario() + { + using (var conn = ConnectionMultiplexer.Connect(TestConfig.Current.PrimaryServer + ":6500,abortConnect=false,connectTimeout=1000,connectRetry=0", Writer)) + { + Assert.False(conn.GetServer(conn.GetEndPoints().Single()).IsConnected); + Assert.False(conn.GetDatabase().IsConnected(default(RedisKey))); + } + } + } + + [Fact] + public async Task CreateDisconnectedNonsenseConnection_DNS() + { + await RunBlockingSynchronousWithExtraThreadAsync(InnerScenario).ForAwait(); + void InnerScenario() + { + using (var conn = ConnectionMultiplexer.Connect($"doesnot.exist.ds.{Guid.NewGuid():N}.com:6500,abortConnect=false,connectTimeout=1000,connectRetry=0", Writer)) + { + Assert.False(conn.GetServer(conn.GetEndPoints().Single()).IsConnected); + Assert.False(conn.GetDatabase().IsConnected(default(RedisKey))); + } + } + } +} diff --git a/tests/StackExchange.Redis.Tests/ConnectingFailDetection.cs b/tests/StackExchange.Redis.Tests/ConnectingFailDetection.cs deleted file mode 100644 index c78a1c4df..000000000 --- a/tests/StackExchange.Redis.Tests/ConnectingFailDetection.cs +++ /dev/null @@ -1,147 +0,0 @@ -using System; -using System.Threading; -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - public class ConnectingFailDetection : TestBase - { - public ConnectingFailDetection(ITestOutputHelper output) : base (output) { } - - protected override string GetConfiguration() => TestConfig.Current.MasterServerAndPort + "," + TestConfig.Current.ReplicaServerAndPort; - -#if DEBUG - [Fact] - public async Task FastNoticesFailOnConnectingSyncCompletion() - { - try - { - using (var muxer = Create(keepAlive: 1, connectTimeout: 10000, allowAdmin: true)) - { - var conn = muxer.GetDatabase(); - conn.Ping(); - - var server = muxer.GetServer(muxer.GetEndPoints()[0]); - var server2 = muxer.GetServer(muxer.GetEndPoints()[1]); - - muxer.AllowConnect = false; - - // muxer.IsConnected is true of *any* are connected, simulate failure for all cases. - server.SimulateConnectionFailure(); - Assert.False(server.IsConnected); - Assert.True(server2.IsConnected); - Assert.True(muxer.IsConnected); - - server2.SimulateConnectionFailure(); - Assert.False(server.IsConnected); - Assert.False(server2.IsConnected); - Assert.False(muxer.IsConnected); - - // should reconnect within 1 keepalive interval - muxer.AllowConnect = true; - Log("Waiting for reconnect"); - await Task.Delay(2000).ForAwait(); - - Assert.True(muxer.IsConnected); - } - } - finally - { - ClearAmbientFailures(); - } - } - - [Fact] - public async Task FastNoticesFailOnConnectingAsyncCompletion() - { - try - { - using (var muxer = Create(keepAlive: 1, connectTimeout: 10000, allowAdmin: true)) - { - var conn = muxer.GetDatabase(); - conn.Ping(); - - var server = muxer.GetServer(muxer.GetEndPoints()[0]); - var server2 = muxer.GetServer(muxer.GetEndPoints()[1]); - - muxer.AllowConnect = false; - - // muxer.IsConnected is true of *any* are connected, simulate failure for all cases. - server.SimulateConnectionFailure(); - Assert.False(server.IsConnected); - Assert.True(server2.IsConnected); - Assert.True(muxer.IsConnected); - - server2.SimulateConnectionFailure(); - Assert.False(server.IsConnected); - Assert.False(server2.IsConnected); - Assert.False(muxer.IsConnected); - - // should reconnect within 1 keepalive interval - muxer.AllowConnect = true; - Log("Waiting for reconnect"); - await Task.Delay(2000).ForAwait(); - - Assert.True(muxer.IsConnected); - } - } - finally - { - ClearAmbientFailures(); - } - } - - [Fact] - public async Task Issue922_ReconnectRaised() - { - var config = ConfigurationOptions.Parse(TestConfig.Current.MasterServerAndPort); - config.AbortOnConnectFail = true; - config.KeepAlive = 10; - config.SyncTimeout = 1000; - config.ReconnectRetryPolicy = new ExponentialRetry(5000); - config.AllowAdmin = true; - - int failCount = 0, restoreCount = 0; - - using (var muxer = ConnectionMultiplexer.Connect(config)) - { - muxer.ConnectionFailed += delegate { Interlocked.Increment(ref failCount); }; - muxer.ConnectionRestored += delegate { Interlocked.Increment(ref restoreCount); }; - - muxer.GetDatabase(); - Assert.Equal(0, Volatile.Read(ref failCount)); - Assert.Equal(0, Volatile.Read(ref restoreCount)); - - var server = muxer.GetServer(TestConfig.Current.MasterServerAndPort); - server.SimulateConnectionFailure(); - - await UntilCondition(TimeSpan.FromSeconds(10), () => Volatile.Read(ref failCount) + Volatile.Read(ref restoreCount) == 4); - // interactive+subscriber = 2 - Assert.Equal(2, Volatile.Read(ref failCount)); - Assert.Equal(2, Volatile.Read(ref restoreCount)); - } - } -#endif - - [Fact] - public void ConnectsWhenBeginConnectCompletesSynchronously() - { - try - { - using (var muxer = Create(keepAlive: 1, connectTimeout: 3000)) - { - var conn = muxer.GetDatabase(); - conn.Ping(); - - Assert.True(muxer.IsConnected); - } - } - finally - { - ClearAmbientFailures(); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/ConnectingFailDetectionTests.cs b/tests/StackExchange.Redis.Tests/ConnectingFailDetectionTests.cs new file mode 100644 index 000000000..a905c613a --- /dev/null +++ b/tests/StackExchange.Redis.Tests/ConnectingFailDetectionTests.cs @@ -0,0 +1,169 @@ +using System; +using System.Threading; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class ConnectingFailDetectionTests(ITestOutputHelper output) : TestBase(output) +{ + protected override string GetConfiguration() => TestConfig.Current.PrimaryServerAndPort + "," + TestConfig.Current.ReplicaServerAndPort; + + [Fact] + public async Task FastNoticesFailOnConnectingSyncCompletion() + { + try + { + await using var conn = Create(keepAlive: 1, connectTimeout: 10000, allowAdmin: true, shared: false); + conn.RawConfig.ReconnectRetryPolicy = new LinearRetry(200); + + var db = conn.GetDatabase(); + await db.PingAsync(); + + var server = conn.GetServer(conn.GetEndPoints()[0]); + var server2 = conn.GetServer(conn.GetEndPoints()[1]); + + conn.AllowConnect = false; + + // muxer.IsConnected is true of *any* are connected, simulate failure for all cases. + server.SimulateConnectionFailure(SimulatedFailureType.All); + Assert.False(server.IsConnected); + Assert.True(server2.IsConnected); + Assert.True(conn.IsConnected); + + server2.SimulateConnectionFailure(SimulatedFailureType.All); + Assert.False(server.IsConnected); + Assert.False(server2.IsConnected); + Assert.False(conn.IsConnected); + + // should reconnect within 1 keepalive interval + conn.AllowConnect = true; + Log("Waiting for reconnect"); + await UntilConditionAsync(TimeSpan.FromSeconds(2), () => conn.IsConnected).ForAwait(); + + Assert.True(conn.IsConnected); + } + finally + { + ClearAmbientFailures(); + } + } + + [Fact] + public async Task FastNoticesFailOnConnectingAsyncCompletion() + { + try + { + await using var conn = Create(keepAlive: 1, connectTimeout: 10000, allowAdmin: true, shared: false); + conn.RawConfig.ReconnectRetryPolicy = new LinearRetry(200); + + var db = conn.GetDatabase(); + await db.PingAsync(); + + var server = conn.GetServer(conn.GetEndPoints()[0]); + var server2 = conn.GetServer(conn.GetEndPoints()[1]); + + conn.AllowConnect = false; + + // muxer.IsConnected is true of *any* are connected, simulate failure for all cases. + server.SimulateConnectionFailure(SimulatedFailureType.All); + Assert.False(server.IsConnected); + Assert.True(server2.IsConnected); + Assert.True(conn.IsConnected); + + server2.SimulateConnectionFailure(SimulatedFailureType.All); + Assert.False(server.IsConnected); + Assert.False(server2.IsConnected); + Assert.False(conn.IsConnected); + + // should reconnect within 1 keepalive interval + conn.AllowConnect = true; + Log("Waiting for reconnect"); + await UntilConditionAsync(TimeSpan.FromSeconds(2), () => conn.IsConnected).ForAwait(); + + Assert.True(conn.IsConnected); + } + finally + { + ClearAmbientFailures(); + } + } + + [Fact] + public async Task Issue922_ReconnectRaised() + { + var config = ConfigurationOptions.Parse(TestConfig.Current.PrimaryServerAndPort); + config.AbortOnConnectFail = true; + config.KeepAlive = 1; + config.SyncTimeout = 1000; + config.AsyncTimeout = 1000; + config.ReconnectRetryPolicy = new ExponentialRetry(5000); + config.AllowAdmin = true; + config.BacklogPolicy = BacklogPolicy.FailFast; + + int failCount = 0, restoreCount = 0; + + await using var conn = await ConnectionMultiplexer.ConnectAsync(config); + + conn.ConnectionFailed += (s, e) => + { + Interlocked.Increment(ref failCount); + Log($"Connection Failed ({e.ConnectionType}, {e.FailureType}): {e.Exception}"); + }; + conn.ConnectionRestored += (s, e) => + { + Interlocked.Increment(ref restoreCount); + Log($"Connection Restored ({e.ConnectionType}, {e.FailureType})"); + }; + + conn.GetDatabase(); + Assert.Equal(0, Volatile.Read(ref failCount)); + Assert.Equal(0, Volatile.Read(ref restoreCount)); + + var server = conn.GetServer(TestConfig.Current.PrimaryServerAndPort); + server.SimulateConnectionFailure(SimulatedFailureType.All); + + await UntilConditionAsync(TimeSpan.FromSeconds(10), () => Volatile.Read(ref failCount) >= 2 && Volatile.Read(ref restoreCount) >= 2); + + // interactive+subscriber = 2 + var failCountSnapshot = Volatile.Read(ref failCount); + Assert.True(failCountSnapshot >= 2, $"failCount {failCountSnapshot} >= 2"); + + var restoreCountSnapshot = Volatile.Read(ref restoreCount); + Assert.True(restoreCountSnapshot >= 2, $"restoreCount ({restoreCountSnapshot}) >= 2"); + } + + [Fact] + public async Task ConnectsWhenBeginConnectCompletesSynchronously() + { + try + { + await using var conn = Create(keepAlive: 1, connectTimeout: 3000); + + var db = conn.GetDatabase(); + await db.PingAsync(); + + Assert.True(conn.IsConnected); + } + finally + { + ClearAmbientFailures(); + } + } + + [Fact] + public async Task ConnectIncludesSubscriber() + { + await using var conn = Create(keepAlive: 1, connectTimeout: 3000, shared: false); + + var db = conn.GetDatabase(); + await db.PingAsync(); + Assert.True(conn.IsConnected); + + foreach (var server in conn.GetServerSnapshot()) + { + Assert.Equal(PhysicalBridge.State.ConnectedEstablished, server.InteractiveConnectionState); + Assert.Equal(PhysicalBridge.State.ConnectedEstablished, server.SubscriptionConnectionState); + } + } +} diff --git a/tests/StackExchange.Redis.Tests/ConnectionFailedErrors.cs b/tests/StackExchange.Redis.Tests/ConnectionFailedErrors.cs deleted file mode 100644 index 1f280a95b..000000000 --- a/tests/StackExchange.Redis.Tests/ConnectionFailedErrors.cs +++ /dev/null @@ -1,206 +0,0 @@ -using System; -using System.Linq; -using System.Security.Authentication; -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - public class ConnectionFailedErrors : TestBase - { - public ConnectionFailedErrors(ITestOutputHelper output) : base (output) { } - - [Theory] - [InlineData(true)] - [InlineData(false)] - public async Task SSLCertificateValidationError(bool isCertValidationSucceeded) - { - Skip.IfNoConfig(nameof(TestConfig.Config.AzureCacheServer), TestConfig.Current.AzureCacheServer); - Skip.IfNoConfig(nameof(TestConfig.Config.AzureCachePassword), TestConfig.Current.AzureCachePassword); - - var options = new ConfigurationOptions(); - options.EndPoints.Add(TestConfig.Current.AzureCacheServer); - options.Ssl = true; - options.Password = TestConfig.Current.AzureCachePassword; - options.CertificateValidation += (sender, cert, chain, errors) => isCertValidationSucceeded; - options.AbortOnConnectFail = false; - - using (var connection = ConnectionMultiplexer.Connect(options)) - { - await RunBlockingSynchronousWithExtraThreadAsync(innerScenario).ForAwait(); - void innerScenario() - { - connection.ConnectionFailed += (sender, e) => - Assert.Equal(ConnectionFailureType.AuthenticationFailure, e.FailureType); - if (!isCertValidationSucceeded) - { - //validate that in this case it throws an certificatevalidation exception - var outer = Assert.Throws(() => connection.GetDatabase().Ping()); - Assert.Equal(ConnectionFailureType.UnableToResolvePhysicalConnection, outer.FailureType); - - Assert.NotNull(outer.InnerException); - var inner = Assert.IsType(outer.InnerException); - Assert.Equal(ConnectionFailureType.AuthenticationFailure, inner.FailureType); - - Assert.NotNull(inner.InnerException); - var innerMost = Assert.IsType(inner.InnerException); - Assert.Equal("The remote certificate is invalid according to the validation procedure.", innerMost.Message); - } - else - { - connection.GetDatabase().Ping(); - } - } - - // wait for a second for connectionfailed event to fire - await Task.Delay(1000).ForAwait(); - } - } - - [Fact] - public async Task AuthenticationFailureError() - { - Skip.IfNoConfig(nameof(TestConfig.Config.AzureCacheServer), TestConfig.Current.AzureCacheServer); - - var options = new ConfigurationOptions(); - options.EndPoints.Add(TestConfig.Current.AzureCacheServer); - options.Ssl = true; - options.Password = ""; - options.AbortOnConnectFail = false; - options.CertificateValidation += SSL.ShowCertFailures(Writer); - using (var muxer = ConnectionMultiplexer.Connect(options)) - { - await RunBlockingSynchronousWithExtraThreadAsync(innerScenario).ForAwait(); - void innerScenario() - { - muxer.ConnectionFailed += (sender, e) => - { - if (e.FailureType == ConnectionFailureType.SocketFailure) Skip.Inconclusive("socket fail"); // this is OK too - Assert.Equal(ConnectionFailureType.AuthenticationFailure, e.FailureType); - }; - var ex = Assert.Throws(() => muxer.GetDatabase().Ping()); - - Assert.NotNull(ex.InnerException); - var rde = Assert.IsType(ex.InnerException); - Assert.Equal(CommandStatus.WaitingToBeSent, ex.CommandStatus); - Assert.Equal(ConnectionFailureType.AuthenticationFailure, rde.FailureType); - Assert.Equal("Error: NOAUTH Authentication required. Verify if the Redis password provided is correct.", rde.InnerException.Message); - } - - //wait for a second for connectionfailed event to fire - await Task.Delay(1000).ForAwait(); - } - } - - [Fact] - public async Task SocketFailureError() - { - await RunBlockingSynchronousWithExtraThreadAsync(innerScenario).ForAwait(); - void innerScenario() - { - var options = new ConfigurationOptions(); - options.EndPoints.Add($"{Guid.NewGuid():N}.redis.cache.windows.net"); - options.Ssl = true; - options.Password = ""; - options.AbortOnConnectFail = false; - options.ConnectTimeout = 1000; - var outer = Assert.Throws(() => - { - using (var muxer = ConnectionMultiplexer.Connect(options)) - { - muxer.GetDatabase().Ping(); - } - }); - Assert.Equal(ConnectionFailureType.UnableToResolvePhysicalConnection, outer.FailureType); - - Assert.NotNull(outer.InnerException); - if (outer.InnerException is RedisConnectionException rce) - { - Assert.Equal(ConnectionFailureType.UnableToConnect, rce.FailureType); - } - else if (outer.InnerException is AggregateException ae - && ae.InnerExceptions.Any(e => e is RedisConnectionException rce2 - && rce2.FailureType == ConnectionFailureType.UnableToConnect)) - { - // fine; at least *one* of them is the one we were hoping to see - } - else - { - Writer.WriteLine(outer.InnerException.ToString()); - if (outer.InnerException is AggregateException inner) - { - foreach (var ex in inner.InnerExceptions) - { - Writer.WriteLine(ex.ToString()); - } - } - Assert.False(true); // force fail - } - } - } - - [Fact] - public async Task AbortOnConnectFailFalseConnectTimeoutError() - { - await RunBlockingSynchronousWithExtraThreadAsync(innerScenario).ForAwait(); - void innerScenario() - { - Skip.IfNoConfig(nameof(TestConfig.Config.AzureCacheServer), TestConfig.Current.AzureCacheServer); - Skip.IfNoConfig(nameof(TestConfig.Config.AzureCachePassword), TestConfig.Current.AzureCachePassword); - - var options = new ConfigurationOptions(); - options.EndPoints.Add(TestConfig.Current.AzureCacheServer); - options.Ssl = true; - options.ConnectTimeout = 0; - options.Password = TestConfig.Current.AzureCachePassword; - using (var muxer = ConnectionMultiplexer.Connect(options)) - { - var ex = Assert.Throws(() => muxer.GetDatabase().Ping()); - Assert.Contains("ConnectTimeout", ex.Message); - } - } - } - - [Fact] - public void TryGetAzureRoleInstanceIdNoThrow() - { - Assert.Null(ConnectionMultiplexer.TryGetAzureRoleInstanceIdNoThrow()); - } - -#if DEBUG - [Fact] - public async Task CheckFailureRecovered() - { - try - { - using (var muxer = Create(keepAlive: 1, connectTimeout: 10000, allowAdmin: true)) - { - await RunBlockingSynchronousWithExtraThreadAsync(innerScenario).ForAwait(); - void innerScenario() - { - muxer.GetDatabase(); - var server = muxer.GetServer(muxer.GetEndPoints()[0]); - - muxer.AllowConnect = false; - - server.SimulateConnectionFailure(); - - Assert.Equal(ConnectionFailureType.SocketFailure, ((RedisConnectionException)muxer.GetServerSnapshot()[0].LastException).FailureType); - - // should reconnect within 1 keepalive interval - muxer.AllowConnect = true; - } - await Task.Delay(2000).ForAwait(); - - Assert.Null(muxer.GetServerSnapshot()[0].LastException); - } - } - finally - { - ClearAmbientFailures(); - } - } -#endif - } -} diff --git a/tests/StackExchange.Redis.Tests/ConnectionFailedErrorsTests.cs b/tests/StackExchange.Redis.Tests/ConnectionFailedErrorsTests.cs new file mode 100644 index 000000000..ce1a31980 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/ConnectionFailedErrorsTests.cs @@ -0,0 +1,205 @@ +using System; +using System.Linq; +using System.Security.Authentication; +using System.Threading.Tasks; +using StackExchange.Redis.Configuration; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class ConnectionFailedErrorsTests(ITestOutputHelper output) : TestBase(output) +{ + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task SSLCertificateValidationError(bool isCertValidationSucceeded) + { + Skip.IfNoConfig(nameof(TestConfig.Config.AzureCacheServer), TestConfig.Current.AzureCacheServer); + Skip.IfNoConfig(nameof(TestConfig.Config.AzureCachePassword), TestConfig.Current.AzureCachePassword); + + var options = new ConfigurationOptions(); + options.EndPoints.Add(TestConfig.Current.AzureCacheServer); + options.Ssl = true; + options.Password = TestConfig.Current.AzureCachePassword; + options.CertificateValidation += (sender, cert, chain, errors) => isCertValidationSucceeded; + options.AbortOnConnectFail = false; + + await using var conn = await ConnectionMultiplexer.ConnectAsync(options); + + await RunBlockingSynchronousWithExtraThreadAsync(InnerScenario).ForAwait(); + + void InnerScenario() + { + conn.ConnectionFailed += (sender, e) => + Assert.Equal(ConnectionFailureType.AuthenticationFailure, e.FailureType); + if (!isCertValidationSucceeded) + { + // Validate that in this case it throws an certificatevalidation exception + var outer = Assert.Throws(() => conn.GetDatabase().Ping()); + Assert.Equal(ConnectionFailureType.UnableToResolvePhysicalConnection, outer.FailureType); + + Assert.NotNull(outer.InnerException); + var inner = Assert.IsType(outer.InnerException); + Assert.Equal(ConnectionFailureType.AuthenticationFailure, inner.FailureType); + + Assert.NotNull(inner.InnerException); + var innerMost = Assert.IsType(inner.InnerException); + Assert.Equal("The remote certificate is invalid according to the validation procedure.", innerMost.Message); + } + else + { + conn.GetDatabase().Ping(); + } + } + + // wait for a second for connectionfailed event to fire + await Task.Delay(1000).ForAwait(); + } + + [Fact] + public async Task AuthenticationFailureError() + { + Skip.IfNoConfig(nameof(TestConfig.Config.AzureCacheServer), TestConfig.Current.AzureCacheServer); + + var options = new ConfigurationOptions(); + options.EndPoints.Add(TestConfig.Current.AzureCacheServer); + options.Ssl = true; + options.Password = ""; + options.AbortOnConnectFail = false; + options.CertificateValidation += SSLTests.ShowCertFailures(Writer); + + await using var conn = await ConnectionMultiplexer.ConnectAsync(options); + + await RunBlockingSynchronousWithExtraThreadAsync(InnerScenario).ForAwait(); + void InnerScenario() + { + conn.ConnectionFailed += (sender, e) => + { + if (e.FailureType == ConnectionFailureType.SocketFailure) Assert.Skip("socket fail"); // this is OK too + Assert.Equal(ConnectionFailureType.AuthenticationFailure, e.FailureType); + }; + var ex = Assert.Throws(() => conn.GetDatabase().Ping()); + + Assert.NotNull(ex.InnerException); + var rde = Assert.IsType(ex.InnerException); + Assert.Equal(CommandStatus.WaitingToBeSent, ex.CommandStatus); + Assert.Equal(ConnectionFailureType.AuthenticationFailure, rde.FailureType); + Assert.NotNull(rde.InnerException); + Assert.Equal("Error: NOAUTH Authentication required. Verify if the Redis password provided is correct.", rde.InnerException.Message); + } + + // Wait for a second for connectionfailed event to fire + await Task.Delay(1000).ForAwait(); + } + + [Fact] + public async Task SocketFailureError() + { + await RunBlockingSynchronousWithExtraThreadAsync(InnerScenario).ForAwait(); + void InnerScenario() + { + var options = new ConfigurationOptions(); + options.EndPoints.Add($"{Guid.NewGuid():N}.redis.cache.windows.net"); + options.Ssl = true; + options.Password = ""; + options.AbortOnConnectFail = false; + options.ConnectTimeout = 1000; + options.BacklogPolicy = BacklogPolicy.FailFast; + var outer = Assert.Throws(() => + { + using var conn = ConnectionMultiplexer.Connect(options); + + conn.GetDatabase().Ping(); + }); + Assert.Equal(ConnectionFailureType.UnableToResolvePhysicalConnection, outer.FailureType); + + Assert.NotNull(outer.InnerException); + if (outer.InnerException is RedisConnectionException rce) + { + Assert.Equal(ConnectionFailureType.UnableToConnect, rce.FailureType); + } + else if (outer.InnerException is AggregateException ae + && ae.InnerExceptions.Any(e => e is RedisConnectionException rce2 + && rce2.FailureType == ConnectionFailureType.UnableToConnect)) + { + // fine; at least *one* of them is the one we were hoping to see + } + else + { + Log(outer.InnerException.ToString()); + if (outer.InnerException is AggregateException inner) + { + foreach (var ex in inner.InnerExceptions) + { + Log(ex.ToString()); + } + } + Assert.False(true); // force fail + } + } + } + + [Fact] + public async Task AbortOnConnectFailFalseConnectTimeoutError() + { + await RunBlockingSynchronousWithExtraThreadAsync(InnerScenario).ForAwait(); + void InnerScenario() + { + Skip.IfNoConfig(nameof(TestConfig.Config.AzureCacheServer), TestConfig.Current.AzureCacheServer); + Skip.IfNoConfig(nameof(TestConfig.Config.AzureCachePassword), TestConfig.Current.AzureCachePassword); + + var options = new ConfigurationOptions(); + options.EndPoints.Add(TestConfig.Current.AzureCacheServer); + options.Ssl = true; + options.ConnectTimeout = 0; + options.Password = TestConfig.Current.AzureCachePassword; + + using var conn = ConnectionMultiplexer.Connect(options); + + var ex = Assert.Throws(() => conn.GetDatabase().Ping()); + Assert.Contains("ConnectTimeout", ex.Message); + } + } + + [Fact] + public void TryGetAzureRoleInstanceIdNoThrow() + { + Assert.Null(DefaultOptionsProvider.TryGetAzureRoleInstanceIdNoThrow()); + } + +#if DEBUG + [Fact] + public async Task CheckFailureRecovered() + { + try + { + await using var conn = Create(keepAlive: 1, connectTimeout: 10000, allowAdmin: true, log: Writer, shared: false); + + await RunBlockingSynchronousWithExtraThreadAsync(InnerScenario).ForAwait(); + void InnerScenario() + { + conn.GetDatabase(); + var server = conn.GetServer(conn.GetEndPoints()[0]); + + conn.AllowConnect = false; + + server.SimulateConnectionFailure(SimulatedFailureType.All); + + var lastFailure = ((RedisConnectionException?)conn.GetServerSnapshot()[0].LastException)!.FailureType; + // Depending on heartbeat races, the last exception will be a socket failure or an internal (follow-up) failure + Assert.Contains(lastFailure, new[] { ConnectionFailureType.SocketFailure, ConnectionFailureType.InternalFailure }); + + // should reconnect within 1 keepalive interval + conn.AllowConnect = true; + } + await Task.Delay(2000).ForAwait(); + + Assert.Null(conn.GetServerSnapshot()[0].LastException); + } + finally + { + ClearAmbientFailures(); + } + } +#endif +} diff --git a/tests/StackExchange.Redis.Tests/ConnectionReconnectRetryPolicyTests.cs b/tests/StackExchange.Redis.Tests/ConnectionReconnectRetryPolicyTests.cs index a3d0e95bc..db5f541b3 100644 --- a/tests/StackExchange.Redis.Tests/ConnectionReconnectRetryPolicyTests.cs +++ b/tests/StackExchange.Redis.Tests/ConnectionReconnectRetryPolicyTests.cs @@ -1,37 +1,49 @@ using System; using Xunit; -using Xunit.Abstractions; -namespace StackExchange.Redis.Tests +namespace StackExchange.Redis.Tests; + +public class TransientErrorTests(ITestOutputHelper output) : TestBase(output) { - public class TransientErrorTests : TestBase + [Fact] + public void TestExponentialRetry() + { + IReconnectRetryPolicy exponentialRetry = new ExponentialRetry(5000); + Assert.False(exponentialRetry.ShouldRetry(0, 0)); + Assert.True(exponentialRetry.ShouldRetry(1, 5600)); + Assert.True(exponentialRetry.ShouldRetry(2, 6050)); + Assert.False(exponentialRetry.ShouldRetry(2, 4050)); + } + + [Fact] + public void TestExponentialMaxRetry() + { + IReconnectRetryPolicy exponentialRetry = new ExponentialRetry(5000); + Assert.True(exponentialRetry.ShouldRetry(long.MaxValue, (int)TimeSpan.FromSeconds(30).TotalMilliseconds)); + } + + [Fact] + public void TestExponentialRetryArgs() + { + _ = new ExponentialRetry(5000); + _ = new ExponentialRetry(5000, 10000); + + var ex = Assert.Throws(() => new ExponentialRetry(-1)); + Assert.Equal("deltaBackOffMilliseconds", ex.ParamName); + + ex = Assert.Throws(() => new ExponentialRetry(5000, -1)); + Assert.Equal("maxDeltaBackOffMilliseconds", ex.ParamName); + + ex = Assert.Throws(() => new ExponentialRetry(10000, 5000)); + Assert.Equal("maxDeltaBackOffMilliseconds", ex.ParamName); + } + + [Fact] + public void TestLinearRetry() { - public TransientErrorTests(ITestOutputHelper output) : base (output) { } - - [Fact] - public void TestExponentialRetry() - { - IReconnectRetryPolicy exponentialRetry = new ExponentialRetry(5000); - Assert.False(exponentialRetry.ShouldRetry(0, 0)); - Assert.True(exponentialRetry.ShouldRetry(1, 5600)); - Assert.True(exponentialRetry.ShouldRetry(2, 6050)); - Assert.False(exponentialRetry.ShouldRetry(2, 4050)); - } - - [Fact] - public void TestExponentialMaxRetry() - { - IReconnectRetryPolicy exponentialRetry = new ExponentialRetry(5000); - Assert.True(exponentialRetry.ShouldRetry(long.MaxValue, (int)TimeSpan.FromSeconds(30).TotalMilliseconds)); - } - - [Fact] - public void TestLinearRetry() - { - IReconnectRetryPolicy linearRetry = new LinearRetry(5000); - Assert.False(linearRetry.ShouldRetry(0, 0)); - Assert.False(linearRetry.ShouldRetry(2, 4999)); - Assert.True(linearRetry.ShouldRetry(1, 5000)); - } + IReconnectRetryPolicy linearRetry = new LinearRetry(5000); + Assert.False(linearRetry.ShouldRetry(0, 0)); + Assert.False(linearRetry.ShouldRetry(2, 4999)); + Assert.True(linearRetry.ShouldRetry(1, 5000)); } -} \ No newline at end of file +} diff --git a/tests/StackExchange.Redis.Tests/ConnectionShutdown.cs b/tests/StackExchange.Redis.Tests/ConnectionShutdown.cs deleted file mode 100644 index ee7dc0b5c..000000000 --- a/tests/StackExchange.Redis.Tests/ConnectionShutdown.cs +++ /dev/null @@ -1,56 +0,0 @@ -using System.Diagnostics; -using System.Threading; -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - public class ConnectionShutdown : TestBase - { - protected override string GetConfiguration() => TestConfig.Current.MasterServerAndPort; - public ConnectionShutdown(ITestOutputHelper output) : base(output) { } - - [Fact(Skip = "Unfriendly")] - public async Task ShutdownRaisesConnectionFailedAndRestore() - { - using (var conn = Create(allowAdmin: true)) - { - int failed = 0, restored = 0; - Stopwatch watch = Stopwatch.StartNew(); - conn.ConnectionFailed += (sender, args) => - { - Log(watch.Elapsed + ": failed: " + EndPointCollection.ToString(args.EndPoint) + "/" + args.ConnectionType + ": " + args); - Interlocked.Increment(ref failed); - }; - conn.ConnectionRestored += (sender, args) => - { - Log(watch.Elapsed + ": restored: " + EndPointCollection.ToString(args.EndPoint) + "/" + args.ConnectionType + ": " + args); - Interlocked.Increment(ref restored); - }; - var db = conn.GetDatabase(); - db.Ping(); - Assert.Equal(0, Interlocked.CompareExchange(ref failed, 0, 0)); - Assert.Equal(0, Interlocked.CompareExchange(ref restored, 0, 0)); - await Task.Delay(1).ForAwait(); // To make compiler happy in Release - - conn.AllowConnect = false; - var server = conn.GetServer(TestConfig.Current.MasterServer, TestConfig.Current.MasterPort); - - SetExpectedAmbientFailureCount(2); - server.SimulateConnectionFailure(); - - db.Ping(CommandFlags.FireAndForget); - await Task.Delay(250).ForAwait(); - Assert.Equal(2, Interlocked.CompareExchange(ref failed, 0, 0)); - Assert.Equal(0, Interlocked.CompareExchange(ref restored, 0, 0)); - conn.AllowConnect = true; - db.Ping(CommandFlags.FireAndForget); - await Task.Delay(1500).ForAwait(); - Assert.Equal(2, Interlocked.CompareExchange(ref failed, 0, 0)); - Assert.Equal(2, Interlocked.CompareExchange(ref restored, 0, 0)); - watch.Stop(); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/ConnectionShutdownTests.cs b/tests/StackExchange.Redis.Tests/ConnectionShutdownTests.cs new file mode 100644 index 000000000..279d8bd23 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/ConnectionShutdownTests.cs @@ -0,0 +1,50 @@ +using System.Diagnostics; +using System.Threading; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class ConnectionShutdownTests(ITestOutputHelper output) : TestBase(output) +{ + [Fact(Skip = "Unfriendly")] + public async Task ShutdownRaisesConnectionFailedAndRestore() + { + await using var conn = Create(allowAdmin: true, shared: false); + + int failed = 0, restored = 0; + Stopwatch watch = Stopwatch.StartNew(); + conn.ConnectionFailed += (sender, args) => + { + Log(watch.Elapsed + ": failed: " + EndPointCollection.ToString(args.EndPoint) + "/" + args.ConnectionType + ": " + args); + Interlocked.Increment(ref failed); + }; + conn.ConnectionRestored += (sender, args) => + { + Log(watch.Elapsed + ": restored: " + EndPointCollection.ToString(args.EndPoint) + "/" + args.ConnectionType + ": " + args); + Interlocked.Increment(ref restored); + }; + var db = conn.GetDatabase(); + await db.PingAsync(); + Assert.Equal(0, Interlocked.CompareExchange(ref failed, 0, 0)); + Assert.Equal(0, Interlocked.CompareExchange(ref restored, 0, 0)); + await Task.Delay(1).ForAwait(); // To make compiler happy in Release + + conn.AllowConnect = false; + var server = conn.GetServer(TestConfig.Current.PrimaryServer, TestConfig.Current.PrimaryPort); + + SetExpectedAmbientFailureCount(2); + server.SimulateConnectionFailure(SimulatedFailureType.All); + + db.Ping(CommandFlags.FireAndForget); + await Task.Delay(250).ForAwait(); + Assert.Equal(2, Interlocked.CompareExchange(ref failed, 0, 0)); + Assert.Equal(0, Interlocked.CompareExchange(ref restored, 0, 0)); + conn.AllowConnect = true; + db.Ping(CommandFlags.FireAndForget); + await Task.Delay(1500).ForAwait(); + Assert.Equal(2, Interlocked.CompareExchange(ref failed, 0, 0)); + Assert.Equal(2, Interlocked.CompareExchange(ref restored, 0, 0)); + watch.Stop(); + } +} diff --git a/tests/StackExchange.Redis.Tests/Constraints.cs b/tests/StackExchange.Redis.Tests/Constraints.cs deleted file mode 100644 index de1388d6d..000000000 --- a/tests/StackExchange.Redis.Tests/Constraints.cs +++ /dev/null @@ -1,50 +0,0 @@ -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - [Collection(SharedConnectionFixture.Key)] - public class Constraints : TestBase - { - public Constraints(ITestOutputHelper output, SharedConnectionFixture fixture) : base(output, fixture) { } - - [Fact] - public void ValueEquals() - { - RedisValue x = 1, y = "1"; - Assert.True(x.Equals(y), "equals"); - Assert.True(x == y, "operator"); - } - - [Fact] - public async Task TestManualIncr() - { - using (var muxer = Create(syncTimeout: 120000)) // big timeout while debugging - { - var key = Me(); - var conn = muxer.GetDatabase(); - for (int i = 0; i < 10; i++) - { - conn.KeyDelete(key, CommandFlags.FireAndForget); - Assert.Equal(1, await ManualIncrAsync(conn, key).ForAwait()); - Assert.Equal(2, await ManualIncrAsync(conn, key).ForAwait()); - Assert.Equal(2, (long)conn.StringGet(key)); - } - } - } - - public async Task ManualIncrAsync(IDatabase connection, RedisKey key) - { - var oldVal = (long?)await connection.StringGetAsync(key).ForAwait(); - var newVal = (oldVal ?? 0) + 1; - var tran = connection.CreateTransaction(); - { // check hasn't changed - tran.AddCondition(Condition.StringEqual(key, oldVal)); - _ = tran.StringSetAsync(key, newVal); - if (!await tran.ExecuteAsync().ForAwait()) return null; // aborted - return newVal; - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/ConstraintsTests.cs b/tests/StackExchange.Redis.Tests/ConstraintsTests.cs new file mode 100644 index 000000000..6740fe2b3 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/ConstraintsTests.cs @@ -0,0 +1,44 @@ +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class ConstraintsTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + [Fact] + public void ValueEquals() + { + RedisValue x = 1, y = "1"; + Assert.True(x.Equals(y), "equals"); + Assert.True(x == y, "operator"); + } + + [Fact] + public async Task TestManualIncr() + { + await using var conn = Create(syncTimeout: 120000); // big timeout while debugging + + var key = Me(); + var db = conn.GetDatabase(); + for (int i = 0; i < 10; i++) + { + db.KeyDelete(key, CommandFlags.FireAndForget); + Assert.Equal(1, await ManualIncrAsync(db, key).ForAwait()); + Assert.Equal(2, await ManualIncrAsync(db, key).ForAwait()); + Assert.Equal(2, (long)db.StringGet(key)); + } + } + + public static async Task ManualIncrAsync(IDatabase connection, RedisKey key) + { + var oldVal = (long?)await connection.StringGetAsync(key).ForAwait(); + var newVal = (oldVal ?? 0) + 1; + var tran = connection.CreateTransaction(); + { // check hasn't changed + tran.AddCondition(Condition.StringEqual(key, oldVal)); + _ = tran.StringSetAsync(key, newVal); + if (!await tran.ExecuteAsync().ForAwait()) return null; // aborted + return newVal; + } + } +} diff --git a/tests/StackExchange.Redis.Tests/CopyTests.cs b/tests/StackExchange.Redis.Tests/CopyTests.cs new file mode 100644 index 000000000..e0003136c --- /dev/null +++ b/tests/StackExchange.Redis.Tests/CopyTests.cs @@ -0,0 +1,65 @@ +using System; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class CopyTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + [Fact] + public async Task Basic() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var src = Me(); + var dest = Me() + "2"; + _ = db.KeyDelete(dest); + + _ = db.StringSetAsync(src, "Heyyyyy"); + var ke1 = db.KeyCopyAsync(src, dest).ForAwait(); + var ku1 = db.StringGet(dest); + Assert.True(await ke1); + Assert.True(ku1.Equals("Heyyyyy")); + } + + [Fact] + public async Task CrossDB() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var dbDestId = TestConfig.GetDedicatedDB(conn); + var dbDest = conn.GetDatabase(dbDestId); + + var src = Me(); + var dest = Me() + "2"; + dbDest.KeyDelete(dest); + + _ = db.StringSetAsync(src, "Heyyyyy"); + var ke1 = db.KeyCopyAsync(src, dest, dbDestId).ForAwait(); + var ku1 = dbDest.StringGet(dest); + Assert.True(await ke1); + Assert.True(ku1.Equals("Heyyyyy")); + + await Assert.ThrowsAsync(() => db.KeyCopyAsync(src, dest, destinationDatabase: -10)); + } + + [Fact] + public async Task WithReplace() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var src = Me(); + var dest = Me() + "2"; + _ = db.StringSetAsync(src, "foo1"); + _ = db.StringSetAsync(dest, "foo2"); + var ke1 = db.KeyCopyAsync(src, dest).ForAwait(); + var ke2 = db.KeyCopyAsync(src, dest, replace: true).ForAwait(); + var ku1 = db.StringGet(dest); + Assert.False(await ke1); // Should fail when not using replace and destination key exist + Assert.True(await ke2); + Assert.True(ku1.Equals("foo1")); + } +} diff --git a/tests/StackExchange.Redis.Tests/DatabaseTests.cs b/tests/StackExchange.Redis.Tests/DatabaseTests.cs new file mode 100644 index 000000000..bb134c4fd --- /dev/null +++ b/tests/StackExchange.Redis.Tests/DatabaseTests.cs @@ -0,0 +1,207 @@ +using System; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class DatabaseTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + [Fact] + public async Task CommandCount() + { + await using var conn = Create(); + var server = GetAnyPrimary(conn); + var count = server.CommandCount(); + Assert.True(count > 100); + + count = await server.CommandCountAsync(); + Assert.True(count > 100); + } + + [Fact] + public async Task CommandGetKeys() + { + await using var conn = Create(); + var server = GetAnyPrimary(conn); + + RedisValue[] command = ["MSET", "a", "b", "c", "d", "e", "f"]; + + RedisKey[] keys = server.CommandGetKeys(command); + RedisKey[] expected = ["a", "c", "e"]; + Assert.Equal(keys, expected); + + keys = await server.CommandGetKeysAsync(command); + Assert.Equal(keys, expected); + } + + [Fact] + public async Task CommandList() + { + await using var conn = Create(require: RedisFeatures.v7_0_0_rc1); + var server = GetAnyPrimary(conn); + + var commands = server.CommandList(); + Assert.True(commands.Length > 100); + commands = await server.CommandListAsync(); + Assert.True(commands.Length > 100); + + commands = server.CommandList(moduleName: "JSON"); + Assert.Empty(commands); + commands = await server.CommandListAsync(moduleName: "JSON"); + Assert.Empty(commands); + + commands = server.CommandList(category: "admin"); + Assert.True(commands.Length > 10); + commands = await server.CommandListAsync(category: "admin"); + Assert.True(commands.Length > 10); + + commands = server.CommandList(pattern: "a*"); + Assert.True(commands.Length > 10); + commands = await server.CommandListAsync(pattern: "a*"); + Assert.True(commands.Length > 10); + + Assert.Throws(() => server.CommandList(moduleName: "JSON", pattern: "a*")); + await Assert.ThrowsAsync(() => server.CommandListAsync(moduleName: "JSON", pattern: "a*")); + } + + [Fact] + public async Task CountKeys() + { + var db1Id = TestConfig.GetDedicatedDB(); + var db2Id = TestConfig.GetDedicatedDB(); + await using (var conn = Create(allowAdmin: true)) + { + Skip.IfMissingDatabase(conn, db1Id); + Skip.IfMissingDatabase(conn, db2Id); + var server = GetAnyPrimary(conn); + server.FlushDatabase(db1Id, CommandFlags.FireAndForget); + server.FlushDatabase(db2Id, CommandFlags.FireAndForget); + } + await using (var conn = Create(defaultDatabase: db2Id)) + { + Skip.IfMissingDatabase(conn, db1Id); + Skip.IfMissingDatabase(conn, db2Id); + RedisKey key = Me(); + var dba = conn.GetDatabase(db1Id); + var dbb = conn.GetDatabase(db2Id); + dba.StringSet("abc", "def", flags: CommandFlags.FireAndForget); + dba.StringIncrement(key, flags: CommandFlags.FireAndForget); + dbb.StringIncrement(key, flags: CommandFlags.FireAndForget); + + var server = GetAnyPrimary(conn); + var c0 = server.DatabaseSizeAsync(db1Id); + var c1 = server.DatabaseSizeAsync(db2Id); + var c2 = server.DatabaseSizeAsync(); // using default DB, which is db2Id + + Assert.Equal(2, await c0); + Assert.Equal(1, await c1); + Assert.Equal(1, await c2); + } + } + + [Fact] + public async Task DatabaseCount() + { + await using var conn = Create(allowAdmin: true); + + var server = GetAnyPrimary(conn); + var count = server.DatabaseCount; + Log("Count: " + count); + var configVal = server.ConfigGet("databases")[0].Value; + Log("Config databases: " + configVal); + Assert.Equal(int.Parse(configVal), count); + } + + [Fact] + public async Task MultiDatabases() + { + await using var conn = Create(); + + RedisKey key = Me(); + var db0 = conn.GetDatabase(TestConfig.GetDedicatedDB(conn)); + var db1 = conn.GetDatabase(TestConfig.GetDedicatedDB(conn)); + var db2 = conn.GetDatabase(TestConfig.GetDedicatedDB(conn)); + + db0.KeyDelete(key, CommandFlags.FireAndForget); + db1.KeyDelete(key, CommandFlags.FireAndForget); + db2.KeyDelete(key, CommandFlags.FireAndForget); + + db0.StringSet(key, "a", flags: CommandFlags.FireAndForget); + db1.StringSet(key, "b", flags: CommandFlags.FireAndForget); + db2.StringSet(key, "c", flags: CommandFlags.FireAndForget); + + var a = db0.StringGetAsync(key); + var b = db1.StringGetAsync(key); + var c = db2.StringGetAsync(key); + + Assert.Equal("a", await a); // db:0 + Assert.Equal("b", await b); // db:1 + Assert.Equal("c", await c); // db:2 + } + + [Fact] + public async Task SwapDatabases() + { + await using var conn = Create(allowAdmin: true, require: RedisFeatures.v4_0_0); + + RedisKey key = Me(); + var db0id = TestConfig.GetDedicatedDB(conn); + var db0 = conn.GetDatabase(db0id); + var db1id = TestConfig.GetDedicatedDB(conn); + var db1 = conn.GetDatabase(db1id); + + db0.KeyDelete(key, CommandFlags.FireAndForget); + db1.KeyDelete(key, CommandFlags.FireAndForget); + + db0.StringSet(key, "a", flags: CommandFlags.FireAndForget); + db1.StringSet(key, "b", flags: CommandFlags.FireAndForget); + + var a = db0.StringGetAsync(key); + var b = db1.StringGetAsync(key); + + Assert.Equal("a", await a); // db:0 + Assert.Equal("b", await b); // db:1 + + var server = GetServer(conn); + server.SwapDatabases(db0id, db1id); + + var aNew = db1.StringGetAsync(key); + var bNew = db0.StringGetAsync(key); + + Assert.Equal("a", await aNew); // db:1 + Assert.Equal("b", await bNew); // db:0 + } + + [Fact] + public async Task SwapDatabasesAsync() + { + await using var conn = Create(allowAdmin: true, require: RedisFeatures.v4_0_0); + + RedisKey key = Me(); + var db0id = TestConfig.GetDedicatedDB(conn); + var db0 = conn.GetDatabase(db0id); + var db1id = TestConfig.GetDedicatedDB(conn); + var db1 = conn.GetDatabase(db1id); + + db0.KeyDelete(key, CommandFlags.FireAndForget); + db1.KeyDelete(key, CommandFlags.FireAndForget); + + db0.StringSet(key, "a", flags: CommandFlags.FireAndForget); + db1.StringSet(key, "b", flags: CommandFlags.FireAndForget); + + var a = db0.StringGetAsync(key); + var b = db1.StringGetAsync(key); + + Assert.Equal("a", await a); // db:0 + Assert.Equal("b", await b); // db:1 + + var server = GetServer(conn); + _ = server.SwapDatabasesAsync(db0id, db1id).ForAwait(); + + var aNew = db1.StringGetAsync(key); + var bNew = db0.StringGetAsync(key); + + Assert.Equal("a", await aNew); // db:1 + Assert.Equal("b", await bNew); // db:0 + } +} diff --git a/tests/StackExchange.Redis.Tests/DatabaseWrapperTests.cs b/tests/StackExchange.Redis.Tests/DatabaseWrapperTests.cs deleted file mode 100644 index f9b539394..000000000 --- a/tests/StackExchange.Redis.Tests/DatabaseWrapperTests.cs +++ /dev/null @@ -1,1166 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Linq.Expressions; -using System.Net; -using System.Text; -using Moq; -using StackExchange.Redis.KeyspaceIsolation; -using Xunit; - -namespace StackExchange.Redis.Tests -{ - - [CollectionDefinition(nameof(MoqDependentCollection), DisableParallelization = true)] - public class MoqDependentCollection { } - - [Collection(nameof(MoqDependentCollection))] - public sealed class DatabaseWrapperTests - { - private readonly Mock mock; - private readonly IDatabase wrapper; - - public DatabaseWrapperTests() - { - mock = new Mock(); - wrapper = new DatabaseWrapper(mock.Object, Encoding.UTF8.GetBytes("prefix:")); - } - - [Fact] - public void CreateBatch() - { - object asyncState = new object(); - IBatch innerBatch = new Mock().Object; - mock.Setup(_ => _.CreateBatch(asyncState)).Returns(innerBatch); - IBatch wrappedBatch = wrapper.CreateBatch(asyncState); - mock.Verify(_ => _.CreateBatch(asyncState)); - Assert.IsType(wrappedBatch); - Assert.Same(innerBatch, ((BatchWrapper)wrappedBatch).Inner); - } - - [Fact] - public void CreateTransaction() - { - object asyncState = new object(); - ITransaction innerTransaction = new Mock().Object; - mock.Setup(_ => _.CreateTransaction(asyncState)).Returns(innerTransaction); - ITransaction wrappedTransaction = wrapper.CreateTransaction(asyncState); - mock.Verify(_ => _.CreateTransaction(asyncState)); - Assert.IsType(wrappedTransaction); - Assert.Same(innerTransaction, ((TransactionWrapper)wrappedTransaction).Inner); - } - - [Fact] - public void DebugObject() - { - wrapper.DebugObject("key", CommandFlags.None); - mock.Verify(_ => _.DebugObject("prefix:key", CommandFlags.None)); - } - - [Fact] - public void Get_Database() - { - mock.SetupGet(_ => _.Database).Returns(123); - Assert.Equal(123, wrapper.Database); - } - - [Fact] - public void HashDecrement_1() - { - wrapper.HashDecrement("key", "hashField", 123, CommandFlags.None); - mock.Verify(_ => _.HashDecrement("prefix:key", "hashField", 123, CommandFlags.None)); - } - - [Fact] - public void HashDecrement_2() - { - wrapper.HashDecrement("key", "hashField", 1.23, CommandFlags.None); - mock.Verify(_ => _.HashDecrement("prefix:key", "hashField", 1.23, CommandFlags.None)); - } - - [Fact] - public void HashDelete_1() - { - wrapper.HashDelete("key", "hashField", CommandFlags.None); - mock.Verify(_ => _.HashDelete("prefix:key", "hashField", CommandFlags.None)); - } - - [Fact] - public void HashDelete_2() - { - RedisValue[] hashFields = new RedisValue[0]; - wrapper.HashDelete("key", hashFields, CommandFlags.None); - mock.Verify(_ => _.HashDelete("prefix:key", hashFields, CommandFlags.None)); - } - - [Fact] - public void HashExists() - { - wrapper.HashExists("key", "hashField", CommandFlags.None); - mock.Verify(_ => _.HashExists("prefix:key", "hashField", CommandFlags.None)); - } - - [Fact] - public void HashGet_1() - { - wrapper.HashGet("key", "hashField", CommandFlags.None); - mock.Verify(_ => _.HashGet("prefix:key", "hashField", CommandFlags.None)); - } - - [Fact] - public void HashGet_2() - { - RedisValue[] hashFields = new RedisValue[0]; - wrapper.HashGet("key", hashFields, CommandFlags.None); - mock.Verify(_ => _.HashGet("prefix:key", hashFields, CommandFlags.None)); - } - - [Fact] - public void HashGetAll() - { - wrapper.HashGetAll("key", CommandFlags.None); - mock.Verify(_ => _.HashGetAll("prefix:key", CommandFlags.None)); - } - - [Fact] - public void HashIncrement_1() - { - wrapper.HashIncrement("key", "hashField", 123, CommandFlags.None); - mock.Verify(_ => _.HashIncrement("prefix:key", "hashField", 123, CommandFlags.None)); - } - - [Fact] - public void HashIncrement_2() - { - wrapper.HashIncrement("key", "hashField", 1.23, CommandFlags.None); - mock.Verify(_ => _.HashIncrement("prefix:key", "hashField", 1.23, CommandFlags.None)); - } - - [Fact] - public void HashKeys() - { - wrapper.HashKeys("key", CommandFlags.None); - mock.Verify(_ => _.HashKeys("prefix:key", CommandFlags.None)); - } - - [Fact] - public void HashLength() - { - wrapper.HashLength("key", CommandFlags.None); - mock.Verify(_ => _.HashLength("prefix:key", CommandFlags.None)); - } - - [Fact] - public void HashScan() - { - wrapper.HashScan("key", "pattern", 123, flags: CommandFlags.None); - mock.Verify(_ => _.HashScan("prefix:key", "pattern", 123, CommandFlags.None)); - } - - [Fact] - public void HashScan_Full() - { - wrapper.HashScan("key", "pattern", 123, 42, 64, flags: CommandFlags.None); - mock.Verify(_ => _.HashScan("prefix:key", "pattern", 123, 42, 64, CommandFlags.None)); - } - - [Fact] - public void HashSet_1() - { - HashEntry[] hashFields = new HashEntry[0]; - wrapper.HashSet("key", hashFields, CommandFlags.None); - mock.Verify(_ => _.HashSet("prefix:key", hashFields, CommandFlags.None)); - } - - [Fact] - public void HashSet_2() - { - wrapper.HashSet("key", "hashField", "value", When.Exists, CommandFlags.None); - mock.Verify(_ => _.HashSet("prefix:key", "hashField", "value", When.Exists, CommandFlags.None)); - } - - [Fact] - public void HashStringLength() - { - wrapper.HashStringLength("key","field", CommandFlags.None); - mock.Verify(_ => _.HashStringLength("prefix:key", "field", CommandFlags.None)); - } - - [Fact] - public void HashValues() - { - wrapper.HashValues("key", CommandFlags.None); - mock.Verify(_ => _.HashValues("prefix:key", CommandFlags.None)); - } - - [Fact] - public void HyperLogLogAdd_1() - { - wrapper.HyperLogLogAdd("key", "value", CommandFlags.None); - mock.Verify(_ => _.HyperLogLogAdd("prefix:key", "value", CommandFlags.None)); - } - - [Fact] - public void HyperLogLogAdd_2() - { - RedisValue[] values = new RedisValue[0]; - wrapper.HyperLogLogAdd("key", values, CommandFlags.None); - mock.Verify(_ => _.HyperLogLogAdd("prefix:key", values, CommandFlags.None)); - } - - [Fact] - public void HyperLogLogLength() - { - wrapper.HyperLogLogLength("key", CommandFlags.None); - mock.Verify(_ => _.HyperLogLogLength("prefix:key", CommandFlags.None)); - } - - [Fact] - public void HyperLogLogMerge_1() - { - wrapper.HyperLogLogMerge("destination", "first", "second", CommandFlags.None); - mock.Verify(_ => _.HyperLogLogMerge("prefix:destination", "prefix:first", "prefix:second", CommandFlags.None)); - } - - [Fact] - public void HyperLogLogMerge_2() - { - RedisKey[] keys = new RedisKey[] { "a", "b" }; - Expression> valid = _ => _.Length == 2 && _[0] == "prefix:a" && _[1] == "prefix:b"; - wrapper.HyperLogLogMerge("destination", keys, CommandFlags.None); - mock.Verify(_ => _.HyperLogLogMerge("prefix:destination", It.Is(valid), CommandFlags.None)); - } - - [Fact] - public void IdentifyEndpoint() - { - wrapper.IdentifyEndpoint("key", CommandFlags.None); - mock.Verify(_ => _.IdentifyEndpoint("prefix:key", CommandFlags.None)); - } - - [Fact] - public void KeyDelete_1() - { - wrapper.KeyDelete("key", CommandFlags.None); - mock.Verify(_ => _.KeyDelete("prefix:key", CommandFlags.None)); - } - - [Fact] - public void KeyDelete_2() - { - RedisKey[] keys = new RedisKey[] { "a", "b" }; - Expression> valid = _ => _.Length == 2 && _[0] == "prefix:a" && _[1] == "prefix:b"; - wrapper.KeyDelete(keys, CommandFlags.None); - mock.Verify(_ => _.KeyDelete(It.Is(valid), CommandFlags.None)); - } - - [Fact] - public void KeyDump() - { - wrapper.KeyDump("key", CommandFlags.None); - mock.Verify(_ => _.KeyDump("prefix:key", CommandFlags.None)); - } - - [Fact] - public void KeyExists() - { - wrapper.KeyExists("key", CommandFlags.None); - mock.Verify(_ => _.KeyExists("prefix:key", CommandFlags.None)); - } - - [Fact] - public void KeyExpire_1() - { - TimeSpan expiry = TimeSpan.FromSeconds(123); - wrapper.KeyExpire("key", expiry, CommandFlags.None); - mock.Verify(_ => _.KeyExpire("prefix:key", expiry, CommandFlags.None)); - } - - [Fact] - public void KeyExpire_2() - { - DateTime expiry = DateTime.Now; - wrapper.KeyExpire("key", expiry, CommandFlags.None); - mock.Verify(_ => _.KeyExpire("prefix:key", expiry, CommandFlags.None)); - } - - [Fact] - public void KeyMigrate() - { - EndPoint toServer = new IPEndPoint(IPAddress.Loopback, 123); - wrapper.KeyMigrate("key", toServer, 123, 456, MigrateOptions.Copy, CommandFlags.None); - mock.Verify(_ => _.KeyMigrate("prefix:key", toServer, 123, 456, MigrateOptions.Copy, CommandFlags.None)); - } - - [Fact] - public void KeyMove() - { - wrapper.KeyMove("key", 123, CommandFlags.None); - mock.Verify(_ => _.KeyMove("prefix:key", 123, CommandFlags.None)); - } - - [Fact] - public void KeyPersist() - { - wrapper.KeyPersist("key", CommandFlags.None); - mock.Verify(_ => _.KeyPersist("prefix:key", CommandFlags.None)); - } - - [Fact] - public void KeyRandom() - { - Assert.Throws(() => wrapper.KeyRandom()); - } - - [Fact] - public void KeyRename() - { - wrapper.KeyRename("key", "newKey", When.Exists, CommandFlags.None); - mock.Verify(_ => _.KeyRename("prefix:key", "prefix:newKey", When.Exists, CommandFlags.None)); - } - - [Fact] - public void KeyRestore() - { - byte[] value = new byte[0]; - TimeSpan expiry = TimeSpan.FromSeconds(123); - wrapper.KeyRestore("key", value, expiry, CommandFlags.None); - mock.Verify(_ => _.KeyRestore("prefix:key", value, expiry, CommandFlags.None)); - } - - [Fact] - public void KeyTimeToLive() - { - wrapper.KeyTimeToLive("key", CommandFlags.None); - mock.Verify(_ => _.KeyTimeToLive("prefix:key", CommandFlags.None)); - } - - [Fact] - public void KeyType() - { - wrapper.KeyType("key", CommandFlags.None); - mock.Verify(_ => _.KeyType("prefix:key", CommandFlags.None)); - } - - [Fact] - public void ListGetByIndex() - { - wrapper.ListGetByIndex("key", 123, CommandFlags.None); - mock.Verify(_ => _.ListGetByIndex("prefix:key", 123, CommandFlags.None)); - } - - [Fact] - public void ListInsertAfter() - { - wrapper.ListInsertAfter("key", "pivot", "value", CommandFlags.None); - mock.Verify(_ => _.ListInsertAfter("prefix:key", "pivot", "value", CommandFlags.None)); - } - - [Fact] - public void ListInsertBefore() - { - wrapper.ListInsertBefore("key", "pivot", "value", CommandFlags.None); - mock.Verify(_ => _.ListInsertBefore("prefix:key", "pivot", "value", CommandFlags.None)); - } - - [Fact] - public void ListLeftPop() - { - wrapper.ListLeftPop("key", CommandFlags.None); - mock.Verify(_ => _.ListLeftPop("prefix:key", CommandFlags.None)); - } - - [Fact] - public void ListLeftPush_1() - { - wrapper.ListLeftPush("key", "value", When.Exists, CommandFlags.None); - mock.Verify(_ => _.ListLeftPush("prefix:key", "value", When.Exists, CommandFlags.None)); - } - - [Fact] - public void ListLeftPush_2() - { - RedisValue[] values = new RedisValue[0]; - wrapper.ListLeftPush("key", values, CommandFlags.None); - mock.Verify(_ => _.ListLeftPush("prefix:key", values, CommandFlags.None)); - } - - [Fact] - public void ListLeftPush_3() - { - RedisValue[] values = new RedisValue[] { "value1", "value2" }; - wrapper.ListLeftPush("key", values, When.Exists, CommandFlags.None); - mock.Verify(_ => _.ListLeftPush("prefix:key", values, When.Exists, CommandFlags.None)); - } - - [Fact] - public void ListLength() - { - wrapper.ListLength("key", CommandFlags.None); - mock.Verify(_ => _.ListLength("prefix:key", CommandFlags.None)); - } - - [Fact] - public void ListRange() - { - wrapper.ListRange("key", 123, 456, CommandFlags.None); - mock.Verify(_ => _.ListRange("prefix:key", 123, 456, CommandFlags.None)); - } - - [Fact] - public void ListRemove() - { - wrapper.ListRemove("key", "value", 123, CommandFlags.None); - mock.Verify(_ => _.ListRemove("prefix:key", "value", 123, CommandFlags.None)); - } - - [Fact] - public void ListRightPop() - { - wrapper.ListRightPop("key", CommandFlags.None); - mock.Verify(_ => _.ListRightPop("prefix:key", CommandFlags.None)); - } - - [Fact] - public void ListRightPopLeftPush() - { - wrapper.ListRightPopLeftPush("source", "destination", CommandFlags.None); - mock.Verify(_ => _.ListRightPopLeftPush("prefix:source", "prefix:destination", CommandFlags.None)); - } - - [Fact] - public void ListRightPush_1() - { - wrapper.ListRightPush("key", "value", When.Exists, CommandFlags.None); - mock.Verify(_ => _.ListRightPush("prefix:key", "value", When.Exists, CommandFlags.None)); - } - - [Fact] - public void ListRightPush_2() - { - RedisValue[] values = new RedisValue[0]; - wrapper.ListRightPush("key", values, CommandFlags.None); - mock.Verify(_ => _.ListRightPush("prefix:key", values, CommandFlags.None)); - } - - [Fact] - public void ListRightPush_3() - { - RedisValue[] values = new RedisValue[] { "value1", "value2" }; - wrapper.ListRightPush("key", values, When.Exists, CommandFlags.None); - mock.Verify(_ => _.ListRightPush("prefix:key", values, When.Exists, CommandFlags.None)); - } - - [Fact] - public void ListSetByIndex() - { - wrapper.ListSetByIndex("key", 123, "value", CommandFlags.None); - mock.Verify(_ => _.ListSetByIndex("prefix:key", 123, "value", CommandFlags.None)); - } - - [Fact] - public void ListTrim() - { - wrapper.ListTrim("key", 123, 456, CommandFlags.None); - mock.Verify(_ => _.ListTrim("prefix:key", 123, 456, CommandFlags.None)); - } - - [Fact] - public void LockExtend() - { - TimeSpan expiry = TimeSpan.FromSeconds(123); - wrapper.LockExtend("key", "value", expiry, CommandFlags.None); - mock.Verify(_ => _.LockExtend("prefix:key", "value", expiry, CommandFlags.None)); - } - - [Fact] - public void LockQuery() - { - wrapper.LockQuery("key", CommandFlags.None); - mock.Verify(_ => _.LockQuery("prefix:key", CommandFlags.None)); - } - - [Fact] - public void LockRelease() - { - wrapper.LockRelease("key", "value", CommandFlags.None); - mock.Verify(_ => _.LockRelease("prefix:key", "value", CommandFlags.None)); - } - - [Fact] - public void LockTake() - { - TimeSpan expiry = TimeSpan.FromSeconds(123); - wrapper.LockTake("key", "value", expiry, CommandFlags.None); - mock.Verify(_ => _.LockTake("prefix:key", "value", expiry, CommandFlags.None)); - } - - [Fact] - public void Publish() - { - wrapper.Publish("channel", "message", CommandFlags.None); - mock.Verify(_ => _.Publish("prefix:channel", "message", CommandFlags.None)); - } - - [Fact] - public void ScriptEvaluate_1() - { - byte[] hash = new byte[0]; - RedisValue[] values = new RedisValue[0]; - RedisKey[] keys = new RedisKey[] { "a", "b" }; - Expression> valid = _ => _.Length == 2 && _[0] == "prefix:a" && _[1] == "prefix:b"; - wrapper.ScriptEvaluate(hash, keys, values, CommandFlags.None); - mock.Verify(_ => _.ScriptEvaluate(hash, It.Is(valid), values, CommandFlags.None)); - } - - [Fact] - public void ScriptEvaluate_2() - { - RedisValue[] values = new RedisValue[0]; - RedisKey[] keys = new RedisKey[] { "a", "b" }; - Expression> valid = _ => _.Length == 2 && _[0] == "prefix:a" && _[1] == "prefix:b"; - wrapper.ScriptEvaluate("script", keys, values, CommandFlags.None); - mock.Verify(_ => _.ScriptEvaluate("script", It.Is(valid), values, CommandFlags.None)); - } - - [Fact] - public void SetAdd_1() - { - wrapper.SetAdd("key", "value", CommandFlags.None); - mock.Verify(_ => _.SetAdd("prefix:key", "value", CommandFlags.None)); - } - - [Fact] - public void SetAdd_2() - { - RedisValue[] values = new RedisValue[0]; - wrapper.SetAdd("key", values, CommandFlags.None); - mock.Verify(_ => _.SetAdd("prefix:key", values, CommandFlags.None)); - } - - [Fact] - public void SetCombine_1() - { - wrapper.SetCombine(SetOperation.Intersect, "first", "second", CommandFlags.None); - mock.Verify(_ => _.SetCombine(SetOperation.Intersect, "prefix:first", "prefix:second", CommandFlags.None)); - } - - [Fact] - public void SetCombine_2() - { - RedisKey[] keys = new RedisKey[] { "a", "b" }; - Expression> valid = _ => _.Length == 2 && _[0] == "prefix:a" && _[1] == "prefix:b"; - wrapper.SetCombine(SetOperation.Intersect, keys, CommandFlags.None); - mock.Verify(_ => _.SetCombine(SetOperation.Intersect, It.Is(valid), CommandFlags.None)); - } - - [Fact] - public void SetCombineAndStore_1() - { - wrapper.SetCombineAndStore(SetOperation.Intersect, "destination", "first", "second", CommandFlags.None); - mock.Verify(_ => _.SetCombineAndStore(SetOperation.Intersect, "prefix:destination", "prefix:first", "prefix:second", CommandFlags.None)); - } - - [Fact] - public void SetCombineAndStore_2() - { - RedisKey[] keys = new RedisKey[] { "a", "b" }; - Expression> valid = _ => _.Length == 2 && _[0] == "prefix:a" && _[1] == "prefix:b"; - wrapper.SetCombineAndStore(SetOperation.Intersect, "destination", keys, CommandFlags.None); - mock.Verify(_ => _.SetCombineAndStore(SetOperation.Intersect, "prefix:destination", It.Is(valid), CommandFlags.None)); - } - - [Fact] - public void SetContains() - { - wrapper.SetContains("key", "value", CommandFlags.None); - mock.Verify(_ => _.SetContains("prefix:key", "value", CommandFlags.None)); - } - - [Fact] - public void SetLength() - { - wrapper.SetLength("key", CommandFlags.None); - mock.Verify(_ => _.SetLength("prefix:key", CommandFlags.None)); - } - - [Fact] - public void SetMembers() - { - wrapper.SetMembers("key", CommandFlags.None); - mock.Verify(_ => _.SetMembers("prefix:key", CommandFlags.None)); - } - - [Fact] - public void SetMove() - { - wrapper.SetMove("source", "destination", "value", CommandFlags.None); - mock.Verify(_ => _.SetMove("prefix:source", "prefix:destination", "value", CommandFlags.None)); - } - - [Fact] - public void SetPop_1() - { - wrapper.SetPop("key", CommandFlags.None); - mock.Verify(_ => _.SetPop("prefix:key", CommandFlags.None)); - - wrapper.SetPop("key", 5, CommandFlags.None); - mock.Verify(_ => _.SetPop("prefix:key", 5, CommandFlags.None)); - } - - [Fact] - public void SetPop_2() - { - wrapper.SetPop("key", 5, CommandFlags.None); - mock.Verify(_ => _.SetPop("prefix:key", 5, CommandFlags.None)); - } - - [Fact] - public void SetRandomMember() - { - wrapper.SetRandomMember("key", CommandFlags.None); - mock.Verify(_ => _.SetRandomMember("prefix:key", CommandFlags.None)); - } - - [Fact] - public void SetRandomMembers() - { - wrapper.SetRandomMembers("key", 123, CommandFlags.None); - mock.Verify(_ => _.SetRandomMembers("prefix:key", 123, CommandFlags.None)); - } - - [Fact] - public void SetRemove_1() - { - wrapper.SetRemove("key", "value", CommandFlags.None); - mock.Verify(_ => _.SetRemove("prefix:key", "value", CommandFlags.None)); - } - - [Fact] - public void SetRemove_2() - { - RedisValue[] values = new RedisValue[0]; - wrapper.SetRemove("key", values, CommandFlags.None); - mock.Verify(_ => _.SetRemove("prefix:key", values, CommandFlags.None)); - } - - [Fact] - public void SetScan() - { - wrapper.SetScan("key", "pattern", 123, flags: CommandFlags.None); - mock.Verify(_ => _.SetScan("prefix:key", "pattern", 123, CommandFlags.None)); - } - - [Fact] - public void SetScan_Full() - { - wrapper.SetScan("key", "pattern", 123, 42, 64, flags: CommandFlags.None); - mock.Verify(_ => _.SetScan("prefix:key", "pattern", 123, 42, 64, CommandFlags.None)); - } - - [Fact] - public void Sort() - { - RedisValue[] get = new RedisValue[] { "a", "#" }; - Expression> valid = _ => _.Length == 2 && _[0] == "prefix:a" && _[1] == "#"; - - wrapper.Sort("key", 123, 456, Order.Descending, SortType.Alphabetic, "nosort", get, CommandFlags.None); - wrapper.Sort("key", 123, 456, Order.Descending, SortType.Alphabetic, "by", get, CommandFlags.None); - - mock.Verify(_ => _.Sort("prefix:key", 123, 456, Order.Descending, SortType.Alphabetic, "nosort", It.Is(valid), CommandFlags.None)); - mock.Verify(_ => _.Sort("prefix:key", 123, 456, Order.Descending, SortType.Alphabetic, "prefix:by", It.Is(valid), CommandFlags.None)); - } - - [Fact] - public void SortAndStore() - { - RedisValue[] get = new RedisValue[] { "a", "#" }; - Expression> valid = _ => _.Length == 2 && _[0] == "prefix:a" && _[1] == "#"; - - wrapper.SortAndStore("destination", "key", 123, 456, Order.Descending, SortType.Alphabetic, "nosort", get, CommandFlags.None); - wrapper.SortAndStore("destination", "key", 123, 456, Order.Descending, SortType.Alphabetic, "by", get, CommandFlags.None); - - mock.Verify(_ => _.SortAndStore("prefix:destination", "prefix:key", 123, 456, Order.Descending, SortType.Alphabetic, "nosort", It.Is(valid), CommandFlags.None)); - mock.Verify(_ => _.SortAndStore("prefix:destination", "prefix:key", 123, 456, Order.Descending, SortType.Alphabetic, "prefix:by", It.Is(valid), CommandFlags.None)); - } - - [Fact] - public void SortedSetAdd_1() - { - wrapper.SortedSetAdd("key", "member", 1.23, When.Exists, CommandFlags.None); - mock.Verify(_ => _.SortedSetAdd("prefix:key", "member", 1.23, When.Exists, CommandFlags.None)); - } - - [Fact] - public void SortedSetAdd_2() - { - SortedSetEntry[] values = new SortedSetEntry[0]; - wrapper.SortedSetAdd("key", values, When.Exists, CommandFlags.None); - mock.Verify(_ => _.SortedSetAdd("prefix:key", values, When.Exists, CommandFlags.None)); - } - - [Fact] - public void SortedSetCombineAndStore_1() - { - wrapper.SortedSetCombineAndStore(SetOperation.Intersect, "destination", "first", "second", Aggregate.Max, CommandFlags.None); - mock.Verify(_ => _.SortedSetCombineAndStore(SetOperation.Intersect, "prefix:destination", "prefix:first", "prefix:second", Aggregate.Max, CommandFlags.None)); - } - - [Fact] - public void SortedSetCombineAndStore_2() - { - RedisKey[] keys = new RedisKey[] { "a", "b" }; - Expression> valid = _ => _.Length == 2 && _[0] == "prefix:a" && _[1] == "prefix:b"; - wrapper.SetCombineAndStore(SetOperation.Intersect, "destination", keys, CommandFlags.None); - mock.Verify(_ => _.SetCombineAndStore(SetOperation.Intersect, "prefix:destination", It.Is(valid), CommandFlags.None)); - } - - [Fact] - public void SortedSetDecrement() - { - wrapper.SortedSetDecrement("key", "member", 1.23, CommandFlags.None); - mock.Verify(_ => _.SortedSetDecrement("prefix:key", "member", 1.23, CommandFlags.None)); - } - - [Fact] - public void SortedSetIncrement() - { - wrapper.SortedSetIncrement("key", "member", 1.23, CommandFlags.None); - mock.Verify(_ => _.SortedSetIncrement("prefix:key", "member", 1.23, CommandFlags.None)); - } - - [Fact] - public void SortedSetLength() - { - wrapper.SortedSetLength("key", 1.23, 1.23, Exclude.Start, CommandFlags.None); - mock.Verify(_ => _.SortedSetLength("prefix:key", 1.23, 1.23, Exclude.Start, CommandFlags.None)); - } - - [Fact] - public void SortedSetLengthByValue() - { - wrapper.SortedSetLengthByValue("key", "min", "max", Exclude.Start, CommandFlags.None); - mock.Verify(_ => _.SortedSetLengthByValue("prefix:key", "min", "max", Exclude.Start, CommandFlags.None)); - } - - [Fact] - public void SortedSetRangeByRank() - { - wrapper.SortedSetRangeByRank("key", 123, 456, Order.Descending, CommandFlags.None); - mock.Verify(_ => _.SortedSetRangeByRank("prefix:key", 123, 456, Order.Descending, CommandFlags.None)); - } - - [Fact] - public void SortedSetRangeByRankWithScores() - { - wrapper.SortedSetRangeByRankWithScores("key", 123, 456, Order.Descending, CommandFlags.None); - mock.Verify(_ => _.SortedSetRangeByRankWithScores("prefix:key", 123, 456, Order.Descending, CommandFlags.None)); - } - - [Fact] - public void SortedSetRangeByScore() - { - wrapper.SortedSetRangeByScore("key", 1.23, 1.23, Exclude.Start, Order.Descending, 123, 456, CommandFlags.None); - mock.Verify(_ => _.SortedSetRangeByScore("prefix:key", 1.23, 1.23, Exclude.Start, Order.Descending, 123, 456, CommandFlags.None)); - } - - [Fact] - public void SortedSetRangeByScoreWithScores() - { - wrapper.SortedSetRangeByScoreWithScores("key", 1.23, 1.23, Exclude.Start, Order.Descending, 123, 456, CommandFlags.None); - mock.Verify(_ => _.SortedSetRangeByScoreWithScores("prefix:key", 1.23, 1.23, Exclude.Start, Order.Descending, 123, 456, CommandFlags.None)); - } - - [Fact] - public void SortedSetRangeByValue() - { - wrapper.SortedSetRangeByValue("key", "min", "max", Exclude.Start, 123, 456, CommandFlags.None); - mock.Verify(_ => _.SortedSetRangeByValue("prefix:key", "min", "max", Exclude.Start, Order.Ascending, 123, 456, CommandFlags.None)); - } - - [Fact] - public void SortedSetRangeByValueDesc() - { - wrapper.SortedSetRangeByValue("key", "min", "max", Exclude.Start, Order.Descending, 123, 456, CommandFlags.None); - mock.Verify(_ => _.SortedSetRangeByValue("prefix:key", "min", "max", Exclude.Start, Order.Descending, 123, 456, CommandFlags.None)); - } - - [Fact] - public void SortedSetRank() - { - wrapper.SortedSetRank("key", "member", Order.Descending, CommandFlags.None); - mock.Verify(_ => _.SortedSetRank("prefix:key", "member", Order.Descending, CommandFlags.None)); - } - - [Fact] - public void SortedSetRemove_1() - { - wrapper.SortedSetRemove("key", "member", CommandFlags.None); - mock.Verify(_ => _.SortedSetRemove("prefix:key", "member", CommandFlags.None)); - } - - [Fact] - public void SortedSetRemove_2() - { - RedisValue[] members = new RedisValue[0]; - wrapper.SortedSetRemove("key", members, CommandFlags.None); - mock.Verify(_ => _.SortedSetRemove("prefix:key", members, CommandFlags.None)); - } - - [Fact] - public void SortedSetRemoveRangeByRank() - { - wrapper.SortedSetRemoveRangeByRank("key", 123, 456, CommandFlags.None); - mock.Verify(_ => _.SortedSetRemoveRangeByRank("prefix:key", 123, 456, CommandFlags.None)); - } - - [Fact] - public void SortedSetRemoveRangeByScore() - { - wrapper.SortedSetRemoveRangeByScore("key", 1.23, 1.23, Exclude.Start, CommandFlags.None); - mock.Verify(_ => _.SortedSetRemoveRangeByScore("prefix:key", 1.23, 1.23, Exclude.Start, CommandFlags.None)); - } - - [Fact] - public void SortedSetRemoveRangeByValue() - { - wrapper.SortedSetRemoveRangeByValue("key", "min", "max", Exclude.Start, CommandFlags.None); - mock.Verify(_ => _.SortedSetRemoveRangeByValue("prefix:key", "min", "max", Exclude.Start, CommandFlags.None)); - } - - [Fact] - public void SortedSetScan() - { - wrapper.SortedSetScan("key", "pattern", 123, flags: CommandFlags.None); - mock.Verify(_ => _.SortedSetScan("prefix:key", "pattern", 123, CommandFlags.None)); - } - - [Fact] - public void SortedSetScan_Full() - { - wrapper.SortedSetScan("key", "pattern", 123, 42, 64, flags: CommandFlags.None); - mock.Verify(_ => _.SortedSetScan("prefix:key", "pattern", 123, 42, 64, CommandFlags.None)); - } - - [Fact] - public void SortedSetScore() - { - wrapper.SortedSetScore("key", "member", CommandFlags.None); - mock.Verify(_ => _.SortedSetScore("prefix:key", "member", CommandFlags.None)); - } - - [Fact] - public void StreamAcknowledge_1() - { - wrapper.StreamAcknowledge("key", "group", "0-0", CommandFlags.None); - mock.Verify(_ => _.StreamAcknowledge("prefix:key", "group", "0-0", CommandFlags.None)); - } - - [Fact] - public void StreamAcknowledge_2() - { - var messageIds = new RedisValue[] { "0-0", "0-1", "0-2" }; - wrapper.StreamAcknowledge("key", "group", messageIds, CommandFlags.None); - mock.Verify(_ => _.StreamAcknowledge("prefix:key", "group", messageIds, CommandFlags.None)); - } - - [Fact] - public void StreamAdd_1() - { - wrapper.StreamAdd("key", "field1", "value1", "*", 1000, true, CommandFlags.None); - mock.Verify(_ => _.StreamAdd("prefix:key", "field1", "value1", "*", 1000, true, CommandFlags.None)); - } - - [Fact] - public void StreamAdd_2() - { - var fields = new NameValueEntry[0]; - wrapper.StreamAdd("key", fields, "*", 1000, true, CommandFlags.None); - mock.Verify(_ => _.StreamAdd("prefix:key", fields, "*", 1000, true, CommandFlags.None)); - } - - [Fact] - public void StreamClaimMessages() - { - var messageIds = new RedisValue[0]; - wrapper.StreamClaim("key", "group", "consumer", 1000, messageIds, CommandFlags.None); - mock.Verify(_ => _.StreamClaim("prefix:key", "group", "consumer", 1000, messageIds, CommandFlags.None)); - } - - [Fact] - public void StreamClaimMessagesReturningIds() - { - var messageIds = new RedisValue[0]; - wrapper.StreamClaimIdsOnly("key", "group", "consumer", 1000, messageIds, CommandFlags.None); - mock.Verify(_ => _.StreamClaimIdsOnly("prefix:key", "group", "consumer", 1000, messageIds, CommandFlags.None)); - } - - [Fact] - public void StreamConsumerGroupSetPosition() - { - wrapper.StreamConsumerGroupSetPosition("key", "group", StreamPosition.Beginning, CommandFlags.None); - mock.Verify(_ => _.StreamConsumerGroupSetPosition("prefix:key", "group", StreamPosition.Beginning, CommandFlags.None)); - } - - [Fact] - public void StreamConsumerInfoGet() - { - wrapper.StreamConsumerInfo("key", "group", CommandFlags.None); - mock.Verify(_ => _.StreamConsumerInfo("prefix:key", "group", CommandFlags.None)); - } - - [Fact] - public void StreamCreateConsumerGroup() - { - wrapper.StreamCreateConsumerGroup("key", "group", StreamPosition.Beginning, false, CommandFlags.None); - mock.Verify(_ => _.StreamCreateConsumerGroup("prefix:key", "group", StreamPosition.Beginning, false, CommandFlags.None)); - } - - [Fact] - public void StreamGroupInfoGet() - { - wrapper.StreamGroupInfo("key", CommandFlags.None); - mock.Verify(_ => _.StreamGroupInfo("prefix:key", CommandFlags.None)); - } - - [Fact] - public void StreamInfoGet() - { - wrapper.StreamInfo("key", CommandFlags.None); - mock.Verify(_ => _.StreamInfo("prefix:key", CommandFlags.None)); - } - - [Fact] - public void StreamLength() - { - wrapper.StreamLength("key", CommandFlags.None); - mock.Verify(_ => _.StreamLength("prefix:key", CommandFlags.None)); - } - - [Fact] - public void StreamMessagesDelete() - { - var messageIds = new RedisValue[] { }; - wrapper.StreamDelete("key", messageIds, CommandFlags.None); - mock.Verify(_ => _.StreamDelete("prefix:key", messageIds, CommandFlags.None)); - } - - [Fact] - public void StreamDeleteConsumer() - { - wrapper.StreamDeleteConsumer("key", "group", "consumer", CommandFlags.None); - mock.Verify(_ => _.StreamDeleteConsumer("prefix:key", "group", "consumer", CommandFlags.None)); - } - - [Fact] - public void StreamDeleteConsumerGroup() - { - wrapper.StreamDeleteConsumerGroup("key", "group", CommandFlags.None); - mock.Verify(_ => _.StreamDeleteConsumerGroup("prefix:key", "group", CommandFlags.None)); - } - - [Fact] - public void StreamPendingInfoGet() - { - wrapper.StreamPending("key", "group", CommandFlags.None); - mock.Verify(_ => _.StreamPending("prefix:key", "group", CommandFlags.None)); - } - - [Fact] - public void StreamPendingMessageInfoGet() - { - wrapper.StreamPendingMessages("key", "group", 10, RedisValue.Null, "-", "+", CommandFlags.None); - mock.Verify(_ => _.StreamPendingMessages("prefix:key", "group", 10, RedisValue.Null, "-", "+", CommandFlags.None)); - } - - [Fact] - public void StreamRange() - { - wrapper.StreamRange("key", "-", "+", null, Order.Ascending, CommandFlags.None); - mock.Verify(_ => _.StreamRange("prefix:key", "-", "+", null, Order.Ascending, CommandFlags.None)); - } - - [Fact] - public void StreamRead_1() - { - var streamPositions = new StreamPosition[] { }; - wrapper.StreamRead(streamPositions, null, CommandFlags.None); - mock.Verify(_ => _.StreamRead(streamPositions, null, CommandFlags.None)); - } - - [Fact] - public void StreamRead_2() - { - wrapper.StreamRead("key", "0-0", null, CommandFlags.None); - mock.Verify(_ => _.StreamRead("prefix:key", "0-0", null, CommandFlags.None)); - } - - [Fact] - public void StreamStreamReadGroup_1() - { - wrapper.StreamReadGroup("key", "group", "consumer", "0-0", 10, false, CommandFlags.None); - mock.Verify(_ => _.StreamReadGroup("prefix:key", "group", "consumer", "0-0", 10, false, CommandFlags.None)); - } - - [Fact] - public void StreamStreamReadGroup_2() - { - var streamPositions = new StreamPosition[] { }; - wrapper.StreamReadGroup(streamPositions, "group", "consumer", 10, false, CommandFlags.None); - mock.Verify(_ => _.StreamReadGroup(streamPositions, "group", "consumer", 10, false, CommandFlags.None)); - } - - [Fact] - public void StreamTrim() - { - wrapper.StreamTrim("key", 1000, true, CommandFlags.None); - mock.Verify(_ => _.StreamTrim("prefix:key", 1000, true, CommandFlags.None)); - } - - [Fact] - public void StringAppend() - { - wrapper.StringAppend("key", "value", CommandFlags.None); - mock.Verify(_ => _.StringAppend("prefix:key", "value", CommandFlags.None)); - } - - [Fact] - public void StringBitCount() - { - wrapper.StringBitCount("key", 123, 456, CommandFlags.None); - mock.Verify(_ => _.StringBitCount("prefix:key", 123, 456, CommandFlags.None)); - } - - [Fact] - public void StringBitOperation_1() - { - wrapper.StringBitOperation(Bitwise.Xor, "destination", "first", "second", CommandFlags.None); - mock.Verify(_ => _.StringBitOperation(Bitwise.Xor, "prefix:destination", "prefix:first", "prefix:second", CommandFlags.None)); - } - - [Fact] - public void StringBitOperation_2() - { - RedisKey[] keys = new RedisKey[] { "a", "b" }; - Expression> valid = _ => _.Length == 2 && _[0] == "prefix:a" && _[1] == "prefix:b"; - wrapper.StringBitOperation(Bitwise.Xor, "destination", keys, CommandFlags.None); - mock.Verify(_ => _.StringBitOperation(Bitwise.Xor, "prefix:destination", It.Is(valid), CommandFlags.None)); - } - - [Fact] - public void StringBitPosition() - { - wrapper.StringBitPosition("key", true, 123, 456, CommandFlags.None); - mock.Verify(_ => _.StringBitPosition("prefix:key", true, 123, 456, CommandFlags.None)); - } - - [Fact] - public void StringDecrement_1() - { - wrapper.StringDecrement("key", 123, CommandFlags.None); - mock.Verify(_ => _.StringDecrement("prefix:key", 123, CommandFlags.None)); - } - - [Fact] - public void StringDecrement_2() - { - wrapper.StringDecrement("key", 1.23, CommandFlags.None); - mock.Verify(_ => _.StringDecrement("prefix:key", 1.23, CommandFlags.None)); - } - - [Fact] - public void StringGet_1() - { - wrapper.StringGet("key", CommandFlags.None); - mock.Verify(_ => _.StringGet("prefix:key", CommandFlags.None)); - } - - [Fact] - public void StringGet_2() - { - RedisKey[] keys = new RedisKey[] { "a", "b" }; - Expression> valid = _ => _.Length == 2 && _[0] == "prefix:a" && _[1] == "prefix:b"; - wrapper.StringGet(keys, CommandFlags.None); - mock.Verify(_ => _.StringGet(It.Is(valid), CommandFlags.None)); - } - - [Fact] - public void StringGetBit() - { - wrapper.StringGetBit("key", 123, CommandFlags.None); - mock.Verify(_ => _.StringGetBit("prefix:key", 123, CommandFlags.None)); - } - - [Fact] - public void StringGetRange() - { - wrapper.StringGetRange("key", 123, 456, CommandFlags.None); - mock.Verify(_ => _.StringGetRange("prefix:key", 123, 456, CommandFlags.None)); - } - - [Fact] - public void StringGetSet() - { - wrapper.StringGetSet("key", "value", CommandFlags.None); - mock.Verify(_ => _.StringGetSet("prefix:key", "value", CommandFlags.None)); - } - - [Fact] - public void StringGetWithExpiry() - { - wrapper.StringGetWithExpiry("key", CommandFlags.None); - mock.Verify(_ => _.StringGetWithExpiry("prefix:key", CommandFlags.None)); - } - - [Fact] - public void StringIncrement_1() - { - wrapper.StringIncrement("key", 123, CommandFlags.None); - mock.Verify(_ => _.StringIncrement("prefix:key", 123, CommandFlags.None)); - } - - [Fact] - public void StringIncrement_2() - { - wrapper.StringIncrement("key", 1.23, CommandFlags.None); - mock.Verify(_ => _.StringIncrement("prefix:key", 1.23, CommandFlags.None)); - } - - [Fact] - public void StringLength() - { - wrapper.StringLength("key", CommandFlags.None); - mock.Verify(_ => _.StringLength("prefix:key", CommandFlags.None)); - } - - [Fact] - public void StringSet_1() - { - TimeSpan expiry = TimeSpan.FromSeconds(123); - wrapper.StringSet("key", "value", expiry, When.Exists, CommandFlags.None); - mock.Verify(_ => _.StringSet("prefix:key", "value", expiry, When.Exists, CommandFlags.None)); - } - - [Fact] - public void StringSet_2() - { - KeyValuePair[] values = new KeyValuePair[] { new KeyValuePair("a", "x"), new KeyValuePair("b", "y") }; - Expression[], bool>> valid = _ => _.Length == 2 && _[0].Key == "prefix:a" && _[0].Value == "x" && _[1].Key == "prefix:b" && _[1].Value == "y"; - wrapper.StringSet(values, When.Exists, CommandFlags.None); - mock.Verify(_ => _.StringSet(It.Is(valid), When.Exists, CommandFlags.None)); - } - - [Fact] - public void StringSetBit() - { - wrapper.StringSetBit("key", 123, true, CommandFlags.None); - mock.Verify(_ => _.StringSetBit("prefix:key", 123, true, CommandFlags.None)); - } - - [Fact] - public void StringSetRange() - { - wrapper.StringSetRange("key", 123, "value", CommandFlags.None); - mock.Verify(_ => _.StringSetRange("prefix:key", 123, "value", CommandFlags.None)); - } - } -} diff --git a/tests/StackExchange.Redis.Tests/Databases.cs b/tests/StackExchange.Redis.Tests/Databases.cs deleted file mode 100644 index 13ee2c25b..000000000 --- a/tests/StackExchange.Redis.Tests/Databases.cs +++ /dev/null @@ -1,161 +0,0 @@ -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - [Collection(SharedConnectionFixture.Key)] - public class Databases : TestBase - { - public Databases(ITestOutputHelper output, SharedConnectionFixture fixture) : base (output, fixture) { } - - [Fact] - public async Task CountKeys() - { - var db1Id = TestConfig.GetDedicatedDB(); - var db2Id = TestConfig.GetDedicatedDB(); - using (var muxer = Create(allowAdmin: true)) - { - Skip.IfMissingDatabase(muxer, db1Id); - Skip.IfMissingDatabase(muxer, db2Id); - var server = GetAnyMaster(muxer); - server.FlushDatabase(db1Id, CommandFlags.FireAndForget); - server.FlushDatabase(db2Id, CommandFlags.FireAndForget); - } - using (var muxer = Create(defaultDatabase: db2Id)) - { - Skip.IfMissingDatabase(muxer, db1Id); - Skip.IfMissingDatabase(muxer, db2Id); - RedisKey key = Me(); - var dba = muxer.GetDatabase(db1Id); - var dbb = muxer.GetDatabase(db2Id); - dba.StringSet("abc", "def", flags: CommandFlags.FireAndForget); - dba.StringIncrement(key, flags: CommandFlags.FireAndForget); - dbb.StringIncrement(key, flags: CommandFlags.FireAndForget); - - var server = GetAnyMaster(muxer); - var c0 = server.DatabaseSizeAsync(db1Id); - var c1 = server.DatabaseSizeAsync(db2Id); - var c2 = server.DatabaseSizeAsync(); // using default DB, which is db2Id - - Assert.Equal(2, await c0); - Assert.Equal(1, await c1); - Assert.Equal(1, await c2); - } - } - - [Fact] - public void DatabaseCount() - { - using (var muxer = Create(allowAdmin: true)) - { - var server = GetAnyMaster(muxer); - var count = server.DatabaseCount; - Log("Count: " + count); - var configVal = server.ConfigGet("databases")[0].Value; - Log("Config databases: " + configVal); - Assert.Equal(int.Parse(configVal), count); - } - } - - [Fact] - public async Task MultiDatabases() - { - using (var muxer = Create()) - { - RedisKey key = Me(); - var db0 = muxer.GetDatabase(TestConfig.GetDedicatedDB(muxer)); - var db1 = muxer.GetDatabase(TestConfig.GetDedicatedDB(muxer)); - var db2 = muxer.GetDatabase(TestConfig.GetDedicatedDB(muxer)); - - db0.KeyDelete(key, CommandFlags.FireAndForget); - db1.KeyDelete(key, CommandFlags.FireAndForget); - db2.KeyDelete(key, CommandFlags.FireAndForget); - - db0.StringSet(key, "a", flags: CommandFlags.FireAndForget); - db1.StringSet(key, "b", flags: CommandFlags.FireAndForget); - db2.StringSet(key, "c", flags: CommandFlags.FireAndForget); - - var a = db0.StringGetAsync(key); - var b = db1.StringGetAsync(key); - var c = db2.StringGetAsync(key); - - Assert.Equal("a", await a); // db:0 - Assert.Equal("b", await b); // db:1 - Assert.Equal("c", await c); // db:2 - } - } - - [Fact] - public async Task SwapDatabases() - { - using (var muxer = Create(allowAdmin: true)) - { - Skip.IfMissingFeature(muxer, nameof(RedisFeatures.SwapDB), r => r.SwapDB); - - RedisKey key = Me(); - var db0id = TestConfig.GetDedicatedDB(muxer); - var db0 = muxer.GetDatabase(db0id); - var db1id = TestConfig.GetDedicatedDB(muxer); - var db1 = muxer.GetDatabase(db1id); - - db0.KeyDelete(key, CommandFlags.FireAndForget); - db1.KeyDelete(key, CommandFlags.FireAndForget); - - db0.StringSet(key, "a", flags: CommandFlags.FireAndForget); - db1.StringSet(key, "b", flags: CommandFlags.FireAndForget); - - var a = db0.StringGetAsync(key); - var b = db1.StringGetAsync(key); - - Assert.Equal("a", await a); // db:0 - Assert.Equal("b", await b); // db:1 - - var server = GetServer(muxer); - server.SwapDatabases(db0id, db1id); - - var aNew = db1.StringGetAsync(key); - var bNew = db0.StringGetAsync(key); - - Assert.Equal("a", await aNew); // db:1 - Assert.Equal("b", await bNew); // db:0 - } - } - - [Fact] - public async Task SwapDatabasesAsync() - { - using (var muxer = Create(allowAdmin: true)) - { - Skip.IfMissingFeature(muxer, nameof(RedisFeatures.SwapDB), r => r.SwapDB); - - RedisKey key = Me(); - var db0id = TestConfig.GetDedicatedDB(muxer); - var db0 = muxer.GetDatabase(db0id); - var db1id = TestConfig.GetDedicatedDB(muxer); - var db1 = muxer.GetDatabase(db1id); - - db0.KeyDelete(key, CommandFlags.FireAndForget); - db1.KeyDelete(key, CommandFlags.FireAndForget); - - db0.StringSet(key, "a", flags: CommandFlags.FireAndForget); - db1.StringSet(key, "b", flags: CommandFlags.FireAndForget); - - var a = db0.StringGetAsync(key); - var b = db1.StringGetAsync(key); - - Assert.Equal("a", await a); // db:0 - Assert.Equal("b", await b); // db:1 - - var server = GetServer(muxer); - _ = server.SwapDatabasesAsync(db0id, db1id).ForAwait(); - - var aNew = db1.StringGetAsync(key); - var bNew = db0.StringGetAsync(key); - - Assert.Equal("a", await aNew); // db:1 - Assert.Equal("b", await bNew); // db:0 - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/DefaultOptionsTests.cs b/tests/StackExchange.Redis.Tests/DefaultOptionsTests.cs new file mode 100644 index 000000000..a01e845da --- /dev/null +++ b/tests/StackExchange.Redis.Tests/DefaultOptionsTests.cs @@ -0,0 +1,220 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Net; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Logging.Abstractions; +using StackExchange.Redis.Configuration; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class DefaultOptionsTests(ITestOutputHelper output) : TestBase(output) +{ + public class TestOptionsProvider(string domainSuffix) : DefaultOptionsProvider + { + private readonly string _domainSuffix = domainSuffix; + + public override bool AbortOnConnectFail => true; + public override TimeSpan? ConnectTimeout => TimeSpan.FromSeconds(123); + public override bool AllowAdmin => true; + public override BacklogPolicy BacklogPolicy => BacklogPolicy.FailFast; + public override bool CheckCertificateRevocation => true; + public override CommandMap CommandMap => CommandMap.Create(new HashSet() { "SELECT" }); + public override TimeSpan ConfigCheckInterval => TimeSpan.FromSeconds(124); + public override string ConfigurationChannel => "TestConfigChannel"; + public override int ConnectRetry => 123; + public override Version DefaultVersion => new Version(1, 2, 3, 4); + protected override string GetDefaultClientName() => "TestPrefix-" + base.GetDefaultClientName(); + public override bool HeartbeatConsistencyChecks => true; + public override TimeSpan HeartbeatInterval => TimeSpan.FromMilliseconds(500); + public override bool IsMatch(EndPoint endpoint) => endpoint is DnsEndPoint dnsep && dnsep.Host.EndsWith(_domainSuffix); + public override TimeSpan KeepAliveInterval => TimeSpan.FromSeconds(125); + public override ILoggerFactory? LoggerFactory => NullLoggerFactory.Instance; + public override Proxy Proxy => Proxy.Twemproxy; + public override IReconnectRetryPolicy ReconnectRetryPolicy => new TestRetryPolicy(); + public override bool ResolveDns => true; + public override TimeSpan SyncTimeout => TimeSpan.FromSeconds(126); + public override string TieBreaker => "TestTiebreaker"; + public override string? User => "TestUser"; + public override string? Password => "TestPassword"; + } + + public class TestRetryPolicy : IReconnectRetryPolicy + { + public bool ShouldRetry(long currentRetryCount, int timeElapsedMillisecondsSinceLastRetry) => false; + } + + [Fact] + public void IsMatchOnDomain() + { + DefaultOptionsProvider.AddProvider(new TestOptionsProvider(".testdomain")); + + var epc = new EndPointCollection(new List() { new DnsEndPoint("local.testdomain", 0) }); + var provider = DefaultOptionsProvider.GetProvider(epc); + Assert.IsType(provider); + + epc = new EndPointCollection(new List() { new DnsEndPoint("local.nottestdomain", 0) }); + provider = DefaultOptionsProvider.GetProvider(epc); + Assert.IsType(provider); + } + + [Theory] + [InlineData("contoso.redis.cache.windows.net")] + [InlineData("contoso.REDIS.CACHE.chinacloudapi.cn")] // added a few upper case chars to validate comparison + [InlineData("contoso.redis.cache.usgovcloudapi.net")] + [InlineData("contoso.redis.cache.sovcloud-api.de")] + [InlineData("contoso.redis.cache.sovcloud-api.fr")] + public void IsMatchOnAzureDomain(string hostName) + { + var epc = new EndPointCollection(new List() { new DnsEndPoint(hostName, 0) }); + var provider = DefaultOptionsProvider.GetProvider(epc); + Assert.IsType(provider); + } + + [Theory] + [InlineData("contoso.redis.azure.net")] + [InlineData("contoso.redis.chinacloudapi.cn")] + [InlineData("contoso.redis.usgovcloudapi.net")] + [InlineData("contoso.redisenterprise.cache.azure.net")] + public void IsMatchOnAzureManagedRedisDomain(string hostName) + { + var epc = new EndPointCollection(new List() { new DnsEndPoint(hostName, 0) }); + var provider = DefaultOptionsProvider.GetProvider(epc); + Assert.IsType(provider); + } + + [Fact] + public void AllOverridesFromDefaultsProp() + { + var options = ConfigurationOptions.Parse("localhost"); + Assert.IsType(options.Defaults); + options.Defaults = new TestOptionsProvider(""); + Assert.IsType(options.Defaults); + AssertAllOverrides(options); + } + + [Fact] + public void AllOverridesFromEndpointsParse() + { + DefaultOptionsProvider.AddProvider(new TestOptionsProvider(".parse")); + var options = ConfigurationOptions.Parse("localhost.parse:6379"); + Assert.IsType(options.Defaults); + AssertAllOverrides(options); + } + + private static void AssertAllOverrides(ConfigurationOptions options) + { + Assert.True(options.AbortOnConnectFail); + Assert.Equal(TimeSpan.FromSeconds(123), TimeSpan.FromMilliseconds(options.ConnectTimeout)); + + Assert.True(options.AllowAdmin); + Assert.Equal(BacklogPolicy.FailFast, options.BacklogPolicy); + Assert.True(options.CheckCertificateRevocation); + + Assert.True(options.CommandMap.IsAvailable(RedisCommand.SELECT)); + Assert.False(options.CommandMap.IsAvailable(RedisCommand.GET)); + + Assert.Equal(TimeSpan.FromSeconds(124), TimeSpan.FromSeconds(options.ConfigCheckSeconds)); + Assert.Equal("TestConfigChannel", options.ConfigurationChannel); + Assert.Equal(123, options.ConnectRetry); + Assert.Equal(new Version(1, 2, 3, 4), options.DefaultVersion); + + Assert.True(options.HeartbeatConsistencyChecks); + Assert.Equal(TimeSpan.FromMilliseconds(500), options.HeartbeatInterval); + + Assert.Equal(TimeSpan.FromSeconds(125), TimeSpan.FromSeconds(options.KeepAlive)); + Assert.Equal(NullLoggerFactory.Instance, options.LoggerFactory); + Assert.Equal(Proxy.Twemproxy, options.Proxy); + Assert.IsType(options.ReconnectRetryPolicy); + Assert.True(options.ResolveDns); + Assert.Equal(TimeSpan.FromSeconds(126), TimeSpan.FromMilliseconds(options.SyncTimeout)); + Assert.Equal("TestTiebreaker", options.TieBreaker); + Assert.Equal("TestUser", options.User); + Assert.Equal("TestPassword", options.Password); + } + + public class TestAfterConnectOptionsProvider : DefaultOptionsProvider + { + public int Calls; + + public override Task AfterConnectAsync(ConnectionMultiplexer muxer, Action log) + { + Interlocked.Increment(ref Calls); + log("TestAfterConnectOptionsProvider.AfterConnectAsync!"); + return Task.CompletedTask; + } + } + + [Fact] + public async Task AfterConnectAsyncHandler() + { + var options = ConfigurationOptions.Parse(GetConfiguration()); + var provider = new TestAfterConnectOptionsProvider(); + options.Defaults = provider; + + await using var conn = await ConnectionMultiplexer.ConnectAsync(options, Writer); + + Assert.True(conn.IsConnected); + Assert.Equal(1, provider.Calls); + } + + public class TestClientNameOptionsProvider : DefaultOptionsProvider + { + protected override string GetDefaultClientName() => "Hey there"; + } + + [Fact] + public async Task ClientNameOverride() + { + var options = ConfigurationOptions.Parse(GetConfiguration()); + options.Defaults = new TestClientNameOptionsProvider(); + + await using var conn = await ConnectionMultiplexer.ConnectAsync(options, Writer); + + Assert.True(conn.IsConnected); + Assert.Equal("Hey there", conn.ClientName); + } + + [Fact] + public async Task ClientNameExplicitWins() + { + var options = ConfigurationOptions.Parse(GetConfiguration() + ",name=FooBar"); + options.Defaults = new TestClientNameOptionsProvider(); + + await using var conn = await ConnectionMultiplexer.ConnectAsync(options, Writer); + + Assert.True(conn.IsConnected); + Assert.Equal("FooBar", conn.ClientName); + } + + public class TestLibraryNameOptionsProvider : DefaultOptionsProvider + { + public string Id { get; } = Guid.NewGuid().ToString(); + public override string LibraryName => Id; + } + + [Fact] + public async Task LibraryNameOverride() + { + var options = ConfigurationOptions.Parse(GetConfiguration()); + var defaults = new TestLibraryNameOptionsProvider(); + options.AllowAdmin = true; + options.Defaults = defaults; + + await using var conn = await ConnectionMultiplexer.ConnectAsync(options, Writer); + // CLIENT SETINFO is in 7.2.0+ + TestBase.ThrowIfBelowMinVersion(conn, RedisFeatures.v7_2_0_rc1); + + var clients = await GetServer(conn).ClientListAsync(); + foreach (var client in clients) + { + Log("Library name: " + client.LibraryName); + } + + Assert.True(conn.IsConnected); + Assert.True(clients.Any(c => c.LibraryName == defaults.LibraryName), "Did not find client with name: " + defaults.Id); + } +} diff --git a/tests/StackExchange.Redis.Tests/DefaultPorts.cs b/tests/StackExchange.Redis.Tests/DefaultPorts.cs deleted file mode 100644 index 0e5b9cbcd..000000000 --- a/tests/StackExchange.Redis.Tests/DefaultPorts.cs +++ /dev/null @@ -1,58 +0,0 @@ -using System.Linq; -using System.Net; -using Xunit; - -namespace StackExchange.Redis.Tests -{ - public class DefaultPorts - { - [Theory] - [InlineData("foo", 6379)] - [InlineData("foo:6379", 6379)] - [InlineData("foo:6380", 6380)] - [InlineData("foo,ssl=false", 6379)] - [InlineData("foo:6379,ssl=false", 6379)] - [InlineData("foo:6380,ssl=false", 6380)] - - [InlineData("foo,ssl=true", 6380)] - [InlineData("foo:6379,ssl=true", 6379)] - [InlineData("foo:6380,ssl=true", 6380)] - [InlineData("foo:6381,ssl=true", 6381)] - public void ConfigStringRoundTripWithDefaultPorts(string config, int expectedPort) - { - var options = ConfigurationOptions.Parse(config); - string backAgain = options.ToString(); - Assert.Equal(config, backAgain.Replace("=True", "=true").Replace("=False", "=false")); - - options.SetDefaultPorts(); // normally it is the multiplexer that calls this, not us - Assert.Equal(expectedPort, ((DnsEndPoint)options.EndPoints.Single()).Port); - } - - [Theory] - [InlineData("foo", 0, false, 6379)] - [InlineData("foo", 6379, false, 6379)] - [InlineData("foo", 6380, false, 6380)] - - [InlineData("foo", 0, true, 6380)] - [InlineData("foo", 6379, true, 6379)] - [InlineData("foo", 6380, true, 6380)] - [InlineData("foo", 6381, true, 6381)] - - public void ConfigManualWithDefaultPorts(string host, int port, bool useSsl, int expectedPort) - { - var options = new ConfigurationOptions(); - if (port == 0) - { - options.EndPoints.Add(host); - } - else - { - options.EndPoints.Add(host, port); - } - if (useSsl) options.Ssl = true; - - options.SetDefaultPorts(); // normally it is the multiplexer that calls this, not us - Assert.Equal(expectedPort, ((DnsEndPoint)options.EndPoints.Single()).Port); - } - } -} \ No newline at end of file diff --git a/tests/StackExchange.Redis.Tests/DefaultPortsTests.cs b/tests/StackExchange.Redis.Tests/DefaultPortsTests.cs new file mode 100644 index 000000000..965bc6ef1 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/DefaultPortsTests.cs @@ -0,0 +1,57 @@ +using System.Linq; +using System.Net; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class DefaultPortsTests +{ + [Theory] + [InlineData("foo", 6379)] + [InlineData("foo:6379", 6379)] + [InlineData("foo:6380", 6380)] + [InlineData("foo,ssl=false", 6379)] + [InlineData("foo:6379,ssl=false", 6379)] + [InlineData("foo:6380,ssl=false", 6380)] + + [InlineData("foo,ssl=true", 6380)] + [InlineData("foo:6379,ssl=true", 6379)] + [InlineData("foo:6380,ssl=true", 6380)] + [InlineData("foo:6381,ssl=true", 6381)] + public void ConfigStringRoundTripWithDefaultPorts(string config, int expectedPort) + { + var options = ConfigurationOptions.Parse(config); + string backAgain = options.ToString(); + Assert.Equal(config, backAgain.Replace("=True", "=true").Replace("=False", "=false")); + + options.SetDefaultPorts(); // normally it is the multiplexer that calls this, not us + Assert.Equal(expectedPort, ((DnsEndPoint)options.EndPoints.Single()).Port); + } + + [Theory] + [InlineData("foo", 0, false, 6379)] + [InlineData("foo", 6379, false, 6379)] + [InlineData("foo", 6380, false, 6380)] + + [InlineData("foo", 0, true, 6380)] + [InlineData("foo", 6379, true, 6379)] + [InlineData("foo", 6380, true, 6380)] + [InlineData("foo", 6381, true, 6381)] + + public void ConfigManualWithDefaultPorts(string host, int port, bool useSsl, int expectedPort) + { + var options = new ConfigurationOptions(); + if (port == 0) + { + options.EndPoints.Add(host); + } + else + { + options.EndPoints.Add(host, port); + } + if (useSsl) options.Ssl = true; + + options.SetDefaultPorts(); // normally it is the multiplexer that calls this, not us + Assert.Equal(expectedPort, ((DnsEndPoint)options.EndPoints.Single()).Port); + } +} diff --git a/tests/StackExchange.Redis.Tests/Deprecated.cs b/tests/StackExchange.Redis.Tests/Deprecated.cs deleted file mode 100644 index 68c76bea3..000000000 --- a/tests/StackExchange.Redis.Tests/Deprecated.cs +++ /dev/null @@ -1,57 +0,0 @@ -using System; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - /// - /// Testing that things we depcreate still parse, but are otherwise defaults. - /// - public class Deprecated : TestBase - { - public Deprecated(ITestOutputHelper output) : base(output) { } - -#pragma warning disable CS0618 // Type or member is obsolete - [Fact] - public void PreserveAsyncOrder() - { - Assert.True(Attribute.IsDefined(typeof(ConfigurationOptions).GetProperty(nameof(ConfigurationOptions.PreserveAsyncOrder)), typeof(ObsoleteAttribute))); - - var options = ConfigurationOptions.Parse("name=Hello"); - Assert.False(options.PreserveAsyncOrder); - - options = ConfigurationOptions.Parse("preserveAsyncOrder=true"); - Assert.Equal("", options.ToString()); - Assert.False(options.PreserveAsyncOrder); - - options = ConfigurationOptions.Parse("preserveAsyncOrder=false"); - Assert.Equal("", options.ToString()); - Assert.False(options.PreserveAsyncOrder); - } - - [Fact] - public void WriteBufferParse() - { - Assert.True(Attribute.IsDefined(typeof(ConfigurationOptions).GetProperty(nameof(ConfigurationOptions.WriteBuffer)), typeof(ObsoleteAttribute))); - - var options = ConfigurationOptions.Parse("name=Hello"); - Assert.Equal(0, options.WriteBuffer); - - options = ConfigurationOptions.Parse("writeBuffer=8092"); - Assert.Equal(0, options.WriteBuffer); - } - - [Fact] - public void ResponseTimeout() - { - Assert.True(Attribute.IsDefined(typeof(ConfigurationOptions).GetProperty(nameof(ConfigurationOptions.ResponseTimeout)), typeof(ObsoleteAttribute))); - - var options = ConfigurationOptions.Parse("name=Hello"); - Assert.Equal(0, options.ResponseTimeout); - - options = ConfigurationOptions.Parse("responseTimeout=1000"); - Assert.Equal(0, options.ResponseTimeout); - } -#pragma warning restore CS0618 // Type or member is obsolete - } -} diff --git a/tests/StackExchange.Redis.Tests/DeprecatedTests.cs b/tests/StackExchange.Redis.Tests/DeprecatedTests.cs new file mode 100644 index 000000000..ab909ea16 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/DeprecatedTests.cs @@ -0,0 +1,70 @@ +using System; +using Xunit; + +namespace StackExchange.Redis.Tests; + +/// +/// Testing that things we deprecate still parse, but are otherwise defaults. +/// +public class DeprecatedTests(ITestOutputHelper output) : TestBase(output) +{ +#pragma warning disable CS0618 // Type or member is obsolete + [Fact] + public void HighPrioritySocketThreads() + { + Assert.True(Attribute.IsDefined(typeof(ConfigurationOptions).GetProperty(nameof(ConfigurationOptions.HighPrioritySocketThreads))!, typeof(ObsoleteAttribute))); + + var options = ConfigurationOptions.Parse("name=Hello"); + Assert.False(options.HighPrioritySocketThreads); + + options = ConfigurationOptions.Parse("highPriorityThreads=true"); + Assert.Equal("", options.ToString()); + Assert.False(options.HighPrioritySocketThreads); + + options = ConfigurationOptions.Parse("highPriorityThreads=false"); + Assert.Equal("", options.ToString()); + Assert.False(options.HighPrioritySocketThreads); + } + + [Fact] + public void PreserveAsyncOrder() + { + Assert.True(Attribute.IsDefined(typeof(ConfigurationOptions).GetProperty(nameof(ConfigurationOptions.PreserveAsyncOrder))!, typeof(ObsoleteAttribute))); + + var options = ConfigurationOptions.Parse("name=Hello"); + Assert.False(options.PreserveAsyncOrder); + + options = ConfigurationOptions.Parse("preserveAsyncOrder=true"); + Assert.Equal("", options.ToString()); + Assert.False(options.PreserveAsyncOrder); + + options = ConfigurationOptions.Parse("preserveAsyncOrder=false"); + Assert.Equal("", options.ToString()); + Assert.False(options.PreserveAsyncOrder); + } + + [Fact] + public void WriteBufferParse() + { + Assert.True(Attribute.IsDefined(typeof(ConfigurationOptions).GetProperty(nameof(ConfigurationOptions.WriteBuffer))!, typeof(ObsoleteAttribute))); + + var options = ConfigurationOptions.Parse("name=Hello"); + Assert.Equal(0, options.WriteBuffer); + + options = ConfigurationOptions.Parse("writeBuffer=8092"); + Assert.Equal(0, options.WriteBuffer); + } + + [Fact] + public void ResponseTimeout() + { + Assert.True(Attribute.IsDefined(typeof(ConfigurationOptions).GetProperty(nameof(ConfigurationOptions.ResponseTimeout))!, typeof(ObsoleteAttribute))); + + var options = ConfigurationOptions.Parse("name=Hello"); + Assert.Equal(0, options.ResponseTimeout); + + options = ConfigurationOptions.Parse("responseTimeout=1000"); + Assert.Equal(0, options.ResponseTimeout); + } +#pragma warning restore CS0618 +} diff --git a/tests/StackExchange.Redis.Tests/DigestIntegrationTests.cs b/tests/StackExchange.Redis.Tests/DigestIntegrationTests.cs new file mode 100644 index 000000000..ec5171075 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/DigestIntegrationTests.cs @@ -0,0 +1,155 @@ +using System; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class DigestIntegrationTests(ITestOutputHelper output, SharedConnectionFixture fixture) + : TestBase(output, fixture) +{ + [Fact] + public async Task ReadDigest() + { + await using var conn = Create(require: RedisFeatures.v8_4_0_rc1); + byte[] blob = new byte[1024]; + new Random().NextBytes(blob); + var local = ValueCondition.CalculateDigest(blob); + Assert.Equal(ValueCondition.ConditionKind.DigestEquals, local.Kind); + Assert.Equal(RedisValue.StorageType.Int64, local.Value.Type); + Log("Local digest: " + local); + + var key = Me(); + var db = conn.GetDatabase(); + await db.KeyDeleteAsync(key, flags: CommandFlags.FireAndForget); + + // test without a value + var digest = await db.StringDigestAsync(key); + Assert.Null(digest); + + // test with a value + await db.StringSetAsync(key, blob, flags: CommandFlags.FireAndForget); + digest = await db.StringDigestAsync(key); + Assert.NotNull(digest); + Assert.Equal(ValueCondition.ConditionKind.DigestEquals, digest.Value.Kind); + Assert.Equal(RedisValue.StorageType.Int64, digest.Value.Value.Type); + Log("Server digest: " + digest); + Assert.Equal(local, digest.Value); + } + + [Theory] + [InlineData(null, (int)ValueCondition.ConditionKind.NotExists)] + [InlineData("new value", (int)ValueCondition.ConditionKind.NotExists)] + [InlineData(null, (int)ValueCondition.ConditionKind.ValueEquals)] + [InlineData(null, (int)ValueCondition.ConditionKind.DigestEquals)] + public async Task InvalidConditionalDelete(string? testValue, int rawKind) + { + await using var conn = Create(); // no server requirement, since fails locally + var key = Me(); + var db = conn.GetDatabase(); + var condition = CreateCondition(testValue, rawKind); + + var ex = await Assert.ThrowsAsync(async () => + { + await db.StringDeleteAsync(key, when: condition); + }); + Assert.StartsWith("StringDeleteAsync cannot be used with a NotExists condition.", ex.Message); + } + + [Theory] + [InlineData(null, null, (int)ValueCondition.ConditionKind.Always)] + [InlineData(null, "new value", (int)ValueCondition.ConditionKind.Always)] + [InlineData("old value", "new value", (int)ValueCondition.ConditionKind.Always, true)] + [InlineData("new value", "new value", (int)ValueCondition.ConditionKind.Always, true)] + + [InlineData(null, null, (int)ValueCondition.ConditionKind.Exists)] + [InlineData(null, "new value", (int)ValueCondition.ConditionKind.Exists)] + [InlineData("old value", "new value", (int)ValueCondition.ConditionKind.Exists, true)] + [InlineData("new value", "new value", (int)ValueCondition.ConditionKind.Exists, true)] + + [InlineData(null, "new value", (int)ValueCondition.ConditionKind.DigestEquals)] + [InlineData("old value", "new value", (int)ValueCondition.ConditionKind.DigestEquals)] + [InlineData("new value", "new value", (int)ValueCondition.ConditionKind.DigestEquals, true)] + + [InlineData(null, "new value", (int)ValueCondition.ConditionKind.ValueEquals)] + [InlineData("old value", "new value", (int)ValueCondition.ConditionKind.ValueEquals)] + [InlineData("new value", "new value", (int)ValueCondition.ConditionKind.ValueEquals, true)] + + [InlineData(null, null, (int)ValueCondition.ConditionKind.DigestNotEquals)] + [InlineData(null, "new value", (int)ValueCondition.ConditionKind.DigestNotEquals)] + [InlineData("old value", "new value", (int)ValueCondition.ConditionKind.DigestNotEquals, true)] + [InlineData("new value", "new value", (int)ValueCondition.ConditionKind.DigestNotEquals)] + + [InlineData(null, null, (int)ValueCondition.ConditionKind.ValueNotEquals)] + [InlineData(null, "new value", (int)ValueCondition.ConditionKind.ValueNotEquals)] + [InlineData("old value", "new value", (int)ValueCondition.ConditionKind.ValueNotEquals, true)] + [InlineData("new value", "new value", (int)ValueCondition.ConditionKind.ValueNotEquals)] + public async Task ConditionalDelete(string? dbValue, string? testValue, int rawKind, bool expectDelete = false) + { + await using var conn = Create(require: RedisFeatures.v8_4_0_rc1); + var key = Me(); + var db = conn.GetDatabase(); + await db.KeyDeleteAsync(key, flags: CommandFlags.FireAndForget); + if (dbValue != null) await db.StringSetAsync(key, dbValue, flags: CommandFlags.FireAndForget); + + var condition = CreateCondition(testValue, rawKind); + + var pendingDelete = db.StringDeleteAsync(key, when: condition); + var exists = await db.KeyExistsAsync(key); + var deleted = await pendingDelete; + + if (dbValue is null) + { + // didn't exist to be deleted + Assert.False(expectDelete); + Assert.False(exists); + Assert.False(deleted); + } + else + { + Assert.Equal(expectDelete, deleted); + Assert.Equal(!expectDelete, exists); + } + } + + private ValueCondition CreateCondition(string? testValue, int rawKind) + { + var condition = (ValueCondition.ConditionKind)rawKind switch + { + ValueCondition.ConditionKind.Always => ValueCondition.Always, + ValueCondition.ConditionKind.Exists => ValueCondition.Exists, + ValueCondition.ConditionKind.NotExists => ValueCondition.NotExists, + ValueCondition.ConditionKind.ValueEquals => ValueCondition.Equal(testValue), + ValueCondition.ConditionKind.ValueNotEquals => ValueCondition.NotEqual(testValue), + ValueCondition.ConditionKind.DigestEquals => ValueCondition.DigestEqual(testValue), + ValueCondition.ConditionKind.DigestNotEquals => ValueCondition.DigestNotEqual(testValue), + _ => throw new ArgumentOutOfRangeException(nameof(rawKind)), + }; + Log($"Condition: {condition}"); + return condition; + } + + [Fact] + public async Task LeadingZeroFormatting() + { + // Example generated that hashes to 0x00006c38adf31777; see https://github.com/redis/redis/issues/14496 + var localDigest = + ValueCondition.CalculateDigest("v8lf0c11xh8ymlqztfd3eeq16kfn4sspw7fqmnuuq3k3t75em5wdizgcdw7uc26nnf961u2jkfzkjytls2kwlj7626sd"u8); + Log($"local: {localDigest}"); + Assert.Equal("IFDEQ 00006c38adf31777", localDigest.ToString()); + + await using var conn = Create(require: RedisFeatures.v8_4_0_rc1); + var key = Me(); + var db = conn.GetDatabase(); + await db.KeyDeleteAsync(key, flags: CommandFlags.FireAndForget); + await db.StringSetAsync(key, "v8lf0c11xh8ymlqztfd3eeq16kfn4sspw7fqmnuuq3k3t75em5wdizgcdw7uc26nnf961u2jkfzkjytls2kwlj7626sd", flags: CommandFlags.FireAndForget); + var pendingDigest = db.StringDigestAsync(key); + var pendingDeleted = db.StringDeleteAsync(key, when: localDigest); + var existsAfter = await db.KeyExistsAsync(key); + + var serverDigest = await pendingDigest; + Log($"server: {serverDigest}"); + Assert.Equal(localDigest, serverDigest); + Assert.True(await pendingDeleted); + Assert.False(existsAfter); + } +} diff --git a/tests/StackExchange.Redis.Tests/DigestUnitTests.cs b/tests/StackExchange.Redis.Tests/DigestUnitTests.cs new file mode 100644 index 000000000..e1883c13b --- /dev/null +++ b/tests/StackExchange.Redis.Tests/DigestUnitTests.cs @@ -0,0 +1,186 @@ +using System; +using System.Buffers; +using System.Collections.Generic; +using System.IO.Hashing; +using System.Text; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class DigestUnitTests(ITestOutputHelper output) : TestBase(output) +{ + [Theory] + [MemberData(nameof(SimpleDigestTestValues))] + public void RedisValue_Digest(string equivalentValue, RedisValue value) + { + // first, use pure XxHash3 to see what we expect + var hashHex = GetXxh3Hex(equivalentValue); + + var digest = value.Digest(); + Assert.Equal(ValueCondition.ConditionKind.DigestEquals, digest.Kind); + + Assert.Equal($"IFDEQ {hashHex}", digest.ToString()); + } + + public static IEnumerable SimpleDigestTestValues() + { + yield return ["Hello World", (RedisValue)"Hello World"]; + yield return ["42", (RedisValue)"42"]; + yield return ["42", (RedisValue)42]; + } + + [Theory] + [InlineData("Hello World", "e34615aade2e6333")] + [InlineData("42", "1217cb28c0ef2191")] + public void ValueCondition_CalculateDigest(string source, string expected) + { + var digest = ValueCondition.CalculateDigest(Encoding.UTF8.GetBytes(source)); + Assert.Equal($"IFDEQ {expected}", digest.ToString()); + } + + [Theory] + [InlineData("e34615aade2e6333")] + [InlineData("1217cb28c0ef2191")] + public void ValueCondition_ParseDigest(string value) + { + // parse from hex chars + var digest = ValueCondition.ParseDigest(value.AsSpan()); + Assert.Equal($"IFDEQ {value}", digest.ToString()); + + // and the same, from hex bytes + digest = ValueCondition.ParseDigest(Encoding.UTF8.GetBytes(value).AsSpan()); + Assert.Equal($"IFDEQ {value}", digest.ToString()); + } + + [Theory] + [InlineData("Hello World", "e34615aade2e6333")] + [InlineData("42", "1217cb28c0ef2191")] + [InlineData("", "2d06800538d394c2")] + [InlineData("a", "e6c632b61e964e1f")] + public void KnownXxh3Values(string source, string expected) + => Assert.Equal(expected, GetXxh3Hex(source)); + + private static string GetXxh3Hex(string source) + { + var len = Encoding.UTF8.GetMaxByteCount(source.Length); + var oversized = ArrayPool.Shared.Rent(len); + #if NET + var bytes = Encoding.UTF8.GetBytes(source, oversized); + #else + int bytes; + unsafe + { + fixed (byte* bPtr = oversized) + { + fixed (char* cPtr = source) + { + bytes = Encoding.UTF8.GetBytes(cPtr, source.Length, bPtr, len); + } + } + } + #endif + var result = GetXxh3Hex(oversized.AsSpan(0, bytes)); + ArrayPool.Shared.Return(oversized); + return result; + } + + private static string GetXxh3Hex(ReadOnlySpan source) + { + byte[] targetBytes = new byte[8]; + XxHash3.Hash(source, targetBytes); + return BitConverter.ToString(targetBytes).Replace("-", string.Empty).ToLowerInvariant(); + } + + [Fact] + public void ValueCondition_Mutations() + { + const string InputValue = + "Meantime we shall express our darker purpose.\nGive me the map there. Know we have divided\nIn three our kingdom; and 'tis our fast intent\nTo shake all cares and business from our age,\nConferring them on younger strengths while we\nUnburthen'd crawl toward death. Our son of Cornwall,\nAnd you, our no less loving son of Albany,\nWe have this hour a constant will to publish\nOur daughters' several dowers, that future strife\nMay be prevented now. The princes, France and Burgundy,\nGreat rivals in our youngest daughter's love,\nLong in our court have made their amorous sojourn,\nAnd here are to be answer'd."; + + var condition = ValueCondition.Equal(InputValue); + Assert.Equal($"IFEQ {InputValue}", condition.ToString()); + Assert.True(condition.IsValueTest); + Assert.False(condition.IsDigestTest); + Assert.False(condition.IsNegated); + Assert.False(condition.IsExistenceTest); + + var negCondition = !condition; + Assert.NotEqual(condition, negCondition); + Assert.Equal($"IFNE {InputValue}", negCondition.ToString()); + Assert.True(negCondition.IsValueTest); + Assert.False(negCondition.IsDigestTest); + Assert.True(negCondition.IsNegated); + Assert.False(negCondition.IsExistenceTest); + + var negNegCondition = !negCondition; + Assert.Equal(condition, negNegCondition); + + var digest = condition.AsDigest(); + Assert.NotEqual(condition, digest); + Assert.Equal($"IFDEQ {GetXxh3Hex(InputValue)}", digest.ToString()); + Assert.False(digest.IsValueTest); + Assert.True(digest.IsDigestTest); + Assert.False(digest.IsNegated); + Assert.False(digest.IsExistenceTest); + + var negDigest = !digest; + Assert.NotEqual(digest, negDigest); + Assert.Equal($"IFDNE {GetXxh3Hex(InputValue)}", negDigest.ToString()); + Assert.False(negDigest.IsValueTest); + Assert.True(negDigest.IsDigestTest); + Assert.True(negDigest.IsNegated); + Assert.False(negDigest.IsExistenceTest); + + var negNegDigest = !negDigest; + Assert.Equal(digest, negNegDigest); + + var @default = default(ValueCondition); + Assert.False(@default.IsValueTest); + Assert.False(@default.IsDigestTest); + Assert.False(@default.IsNegated); + Assert.False(@default.IsExistenceTest); + Assert.Equal("", @default.ToString()); + Assert.Equal(ValueCondition.Always, @default); + + var ex = Assert.Throws(() => !@default); + Assert.Equal("operator ! cannot be used with a Always condition.", ex.Message); + + var exists = ValueCondition.Exists; + Assert.False(exists.IsValueTest); + Assert.False(exists.IsDigestTest); + Assert.False(exists.IsNegated); + Assert.True(exists.IsExistenceTest); + Assert.Equal("XX", exists.ToString()); + + var notExists = ValueCondition.NotExists; + Assert.False(notExists.IsValueTest); + Assert.False(notExists.IsDigestTest); + Assert.True(notExists.IsNegated); + Assert.True(notExists.IsExistenceTest); + Assert.Equal("NX", notExists.ToString()); + + Assert.NotEqual(exists, notExists); + Assert.Equal(exists, !notExists); + Assert.Equal(notExists, !exists); + } + + [Fact] + public void RandomBytes() + { + byte[] buffer = ArrayPool.Shared.Rent(8000); + var rand = new Random(); + + for (int i = 0; i < 100; i++) + { + var len = rand.Next(1, buffer.Length); + var span = buffer.AsSpan(0, len); +#if NET + rand.NextBytes(span); +#else + rand.NextBytes(buffer); +#endif + var digest = ValueCondition.CalculateDigest(span); + Assert.Equal($"IFDEQ {GetXxh3Hex(span)}", digest.ToString()); + } + } +} diff --git a/tests/StackExchange.Redis.Tests/DuplexStream.cs b/tests/StackExchange.Redis.Tests/DuplexStream.cs new file mode 100644 index 000000000..8a9b8c737 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/DuplexStream.cs @@ -0,0 +1,131 @@ +using System; +using System.IO; +using System.Threading; +using System.Threading.Tasks; + +namespace StackExchange.Redis.Tests; + +/// +/// Combines separate input and output streams into a single duplex stream. +/// +internal sealed class DuplexStream(Stream inputStream, Stream outputStream) : Stream +{ + private readonly Stream _inputStream = inputStream ?? throw new ArgumentNullException(nameof(inputStream)); + private readonly Stream _outputStream = outputStream ?? throw new ArgumentNullException(nameof(outputStream)); + + public override bool CanRead => _inputStream.CanRead; + public override bool CanWrite => _outputStream.CanWrite; + public override bool CanSeek => false; + public override bool CanTimeout => _inputStream.CanTimeout || _outputStream.CanTimeout; + + public override int ReadTimeout + { + get => _inputStream.ReadTimeout; + set => _inputStream.ReadTimeout = value; + } + + public override int WriteTimeout + { + get => _outputStream.WriteTimeout; + set => _outputStream.WriteTimeout = value; + } + + public override long Length => throw new NotSupportedException($"{nameof(DuplexStream)} does not support seeking."); + public override long Position + { + get => throw new NotSupportedException($"{nameof(DuplexStream)} does not support seeking."); + set => throw new NotSupportedException($"{nameof(DuplexStream)} does not support seeking."); + } + + public override int Read(byte[] buffer, int offset, int count) + => _inputStream.Read(buffer, offset, count); + + public override Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + => _inputStream.ReadAsync(buffer, offset, count, cancellationToken); + +#if NET + public override ValueTask ReadAsync(Memory buffer, CancellationToken cancellationToken = default) + => _inputStream.ReadAsync(buffer, cancellationToken); + + public override int Read(Span buffer) + => _inputStream.Read(buffer); +#endif + + public override int ReadByte() + => _inputStream.ReadByte(); + + public override void Write(byte[] buffer, int offset, int count) + => _outputStream.Write(buffer, offset, count); + + public override Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + => _outputStream.WriteAsync(buffer, offset, count, cancellationToken); + +#if NET + public override ValueTask WriteAsync(ReadOnlyMemory buffer, CancellationToken cancellationToken = default) + => _outputStream.WriteAsync(buffer, cancellationToken); + + public override void Write(ReadOnlySpan buffer) + => _outputStream.Write(buffer); +#endif + + public override void WriteByte(byte value) + => _outputStream.WriteByte(value); + + public override void Flush() + => _outputStream.Flush(); + + public override Task FlushAsync(CancellationToken cancellationToken) + => _outputStream.FlushAsync(cancellationToken); + + public override long Seek(long offset, SeekOrigin origin) + => throw new NotSupportedException($"{nameof(DuplexStream)} does not support seeking."); + + public override void SetLength(long value) + => throw new NotSupportedException($"{nameof(DuplexStream)} does not support seeking."); + + public override void Close() + { + _inputStream.Close(); + _outputStream.Close(); + base.Close(); + } + + protected override void Dispose(bool disposing) + { + if (disposing) + { + _inputStream.Dispose(); + _outputStream.Dispose(); + } + base.Dispose(disposing); + } + +#if NET + public override async ValueTask DisposeAsync() + { + await _inputStream.DisposeAsync().ConfigureAwait(false); + await _outputStream.DisposeAsync().ConfigureAwait(false); + await base.DisposeAsync().ConfigureAwait(false); + } +#endif + + public override IAsyncResult BeginRead(byte[] buffer, int offset, int count, AsyncCallback? callback, object? state) + => _inputStream.BeginRead(buffer, offset, count, callback, state); + + public override int EndRead(IAsyncResult asyncResult) + => _inputStream.EndRead(asyncResult); + + public override IAsyncResult BeginWrite(byte[] buffer, int offset, int count, AsyncCallback? callback, object? state) + => _outputStream.BeginWrite(buffer, offset, count, callback, state); + + public override void EndWrite(IAsyncResult asyncResult) + => _outputStream.EndWrite(asyncResult); + +#if NET + public override void CopyTo(Stream destination, int bufferSize) + => _inputStream.CopyTo(destination, bufferSize); +#endif + + public override Task CopyToAsync(Stream destination, int bufferSize, CancellationToken cancellationToken) + => _inputStream.CopyToAsync(destination, bufferSize, cancellationToken); +} diff --git a/tests/StackExchange.Redis.Tests/EnvoyTests.cs b/tests/StackExchange.Redis.Tests/EnvoyTests.cs new file mode 100644 index 000000000..d9d22c801 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/EnvoyTests.cs @@ -0,0 +1,47 @@ +using System; +using System.Text; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class EnvoyTests(ITestOutputHelper output) : TestBase(output) +{ + protected override string GetConfiguration() => TestConfig.Current.ProxyServerAndPort; + + /// + /// Tests basic envoy connection with the ability to set and get a key. + /// + [Fact] + public async Task TestBasicEnvoyConnection() + { + var sb = new StringBuilder(); + Writer.EchoTo(sb); + try + { + await using var conn = Create(configuration: GetConfiguration(), keepAlive: 1, connectTimeout: 2000, allowAdmin: true, shared: false, proxy: Proxy.Envoyproxy, log: Writer); + + var db = conn.GetDatabase(); + + var key = Me() + "foobar"; + const string value = "barfoo"; + db.StringSet(key, value); + + var expectedVal = db.StringGet(key); + + Assert.Equal(value, expectedVal); + } + catch (TimeoutException ex) when (ex.Message == "Connect timeout" || sb.ToString().Contains("Returned, but incorrectly")) + { + Assert.Skip($"Envoy server not found: {ex}."); + } + catch (AggregateException ex) + { + Assert.Skip($"Envoy server not found: {ex}."); + } + catch (RedisConnectionException ex) when (sb.ToString().Contains("It was not possible to connect to the redis server(s)")) + { + Assert.Skip($"Envoy server not found: {ex}."); + } + } +} diff --git a/tests/StackExchange.Redis.Tests/EventArgsTests.cs b/tests/StackExchange.Redis.Tests/EventArgsTests.cs index 5d7a18a43..74b5e369a 100644 --- a/tests/StackExchange.Redis.Tests/EventArgsTests.cs +++ b/tests/StackExchange.Redis.Tests/EventArgsTests.cs @@ -2,170 +2,80 @@ using NSubstitute; using Xunit; -namespace StackExchange.Redis.Tests +namespace StackExchange.Redis.Tests; + +public class EventArgsTests { - public class EventArgsTests + [Fact] + public void EventArgsCanBeSubstituted() { - [Fact] - public void EventArgsCanBeSubstituted() - { - EndPointEventArgs endpointArgsMock - = Substitute.For(default, default); + EndPointEventArgs endpointArgsMock + = Substitute.For(default, default); - RedisErrorEventArgs redisErrorArgsMock - = Substitute.For(default, default, default); + RedisErrorEventArgs redisErrorArgsMock + = Substitute.For(default, default, default); - ConnectionFailedEventArgs connectionFailedArgsMock - = Substitute.For( - default, default, default, default, default, default); + ConnectionFailedEventArgs connectionFailedArgsMock + = Substitute.For(default, default, default, default, default, default); - InternalErrorEventArgs internalErrorArgsMock - = Substitute.For( - default, default, default, default, default); + InternalErrorEventArgs internalErrorArgsMock + = Substitute.For(default, default, default, default, default); - HashSlotMovedEventArgs hashSlotMovedArgsMock - = Substitute.For( - default, default, default, default); + HashSlotMovedEventArgs hashSlotMovedArgsMock + = Substitute.For(default, default, default, default); - DiagnosticStub stub = DiagnosticStub.Create(); + DiagnosticStub stub = new DiagnosticStub(); - stub.ConfigurationChangedBroadcastHandler(default, endpointArgsMock); - Assert.Equal(stub.Message,DiagnosticStub.ConfigurationChangedBroadcastHandlerMessage); + stub.ConfigurationChangedBroadcastHandler(default, endpointArgsMock); + Assert.Equal(DiagnosticStub.ConfigurationChangedBroadcastHandlerMessage, stub.Message); - stub.ErrorMessageHandler(default, redisErrorArgsMock); - Assert.Equal(stub.Message, DiagnosticStub.ErrorMessageHandlerMessage); + stub.ErrorMessageHandler(default, redisErrorArgsMock); + Assert.Equal(DiagnosticStub.ErrorMessageHandlerMessage, stub.Message); - stub.ConnectionFailedHandler(default, connectionFailedArgsMock); - Assert.Equal(stub.Message, DiagnosticStub.ConnectionFailedHandlerMessage); + stub.ConnectionFailedHandler(default, connectionFailedArgsMock); + Assert.Equal(DiagnosticStub.ConnectionFailedHandlerMessage, stub.Message); - stub.InternalErrorHandler(default, internalErrorArgsMock); - Assert.Equal(stub.Message, DiagnosticStub.InternalErrorHandlerMessage); + stub.InternalErrorHandler(default, internalErrorArgsMock); + Assert.Equal(DiagnosticStub.InternalErrorHandlerMessage, stub.Message); - stub.ConnectionRestoredHandler(default, connectionFailedArgsMock); - Assert.Equal(stub.Message, DiagnosticStub.ConnectionRestoredHandlerMessage); + stub.ConnectionRestoredHandler(default, connectionFailedArgsMock); + Assert.Equal(DiagnosticStub.ConnectionRestoredHandlerMessage, stub.Message); - stub.ConfigurationChangedHandler(default, endpointArgsMock); - Assert.Equal(stub.Message, DiagnosticStub.ConfigurationChangedHandlerMessage); + stub.ConfigurationChangedHandler(default, endpointArgsMock); + Assert.Equal(DiagnosticStub.ConfigurationChangedHandlerMessage, stub.Message); - stub.HashSlotMovedHandler(default, hashSlotMovedArgsMock); - Assert.Equal(stub.Message, DiagnosticStub.HashSlotMovedHandlerMessage); - } + stub.HashSlotMovedHandler(default, hashSlotMovedArgsMock); + Assert.Equal(DiagnosticStub.HashSlotMovedHandlerMessage, stub.Message); + } - public class DiagnosticStub + public class DiagnosticStub + { + public const string ConfigurationChangedBroadcastHandlerMessage = "ConfigurationChangedBroadcastHandler invoked"; + public const string ErrorMessageHandlerMessage = "ErrorMessageHandler invoked"; + public const string ConnectionFailedHandlerMessage = "ConnectionFailedHandler invoked"; + public const string InternalErrorHandlerMessage = "InternalErrorHandler invoked"; + public const string ConnectionRestoredHandlerMessage = "ConnectionRestoredHandler invoked"; + public const string ConfigurationChangedHandlerMessage = "ConfigurationChangedHandler invoked"; + public const string HashSlotMovedHandlerMessage = "HashSlotMovedHandler invoked"; + + public DiagnosticStub() { - public const string ConfigurationChangedBroadcastHandlerMessage - = "ConfigurationChangedBroadcastHandler invoked"; - - public const string ErrorMessageHandlerMessage - = "ErrorMessageHandler invoked"; - - public const string ConnectionFailedHandlerMessage - = "ConnectionFailedHandler invoked"; - - public const string InternalErrorHandlerMessage - = "InternalErrorHandler invoked"; - - public const string ConnectionRestoredHandlerMessage - = "ConnectionRestoredHandler invoked"; - - public const string ConfigurationChangedHandlerMessage - = "ConfigurationChangedHandler invoked"; - - public const string HashSlotMovedHandlerMessage - = "HashSlotMovedHandler invoked"; - - public static DiagnosticStub Create() - { - DiagnosticStub stub = new DiagnosticStub(); - - stub.ConfigurationChangedBroadcastHandler - = (obj, args) => - { - stub.Message = ConfigurationChangedBroadcastHandlerMessage; - }; - - stub.ErrorMessageHandler - = (obj, args) => - { - stub.Message = ErrorMessageHandlerMessage; - }; - - stub.ConnectionFailedHandler - = (obj, args) => - { - stub.Message = ConnectionFailedHandlerMessage; - }; - - stub.InternalErrorHandler - = (obj, args) => - { - stub.Message = InternalErrorHandlerMessage; - }; - - stub.ConnectionRestoredHandler - = (obj, args) => - { - stub.Message = ConnectionRestoredHandlerMessage; - }; - - stub.ConfigurationChangedHandler - = (obj, args) => - { - stub.Message = ConfigurationChangedHandlerMessage; - }; - - stub.HashSlotMovedHandler - = (obj, args) => - { - stub.Message = HashSlotMovedHandlerMessage; - }; - - return stub; - } - - public string Message { get; private set; } - - public Action ConfigurationChangedBroadcastHandler - { - get; - private set; - } - - public Action ErrorMessageHandler - { - get; - private set; - } - - public Action ConnectionFailedHandler - { - get; - private set; - } - - public Action InternalErrorHandler - { - get; - private set; - } - - public Action ConnectionRestoredHandler - { - get; - private set; - } - - public Action ConfigurationChangedHandler - { - get; - private set; - } - - public Action HashSlotMovedHandler - { - get; - private set; - } + ConfigurationChangedBroadcastHandler = (obj, args) => Message = ConfigurationChangedBroadcastHandlerMessage; + ErrorMessageHandler = (obj, args) => Message = ErrorMessageHandlerMessage; + ConnectionFailedHandler = (obj, args) => Message = ConnectionFailedHandlerMessage; + InternalErrorHandler = (obj, args) => Message = InternalErrorHandlerMessage; + ConnectionRestoredHandler = (obj, args) => Message = ConnectionRestoredHandlerMessage; + ConfigurationChangedHandler = (obj, args) => Message = ConfigurationChangedHandlerMessage; + HashSlotMovedHandler = (obj, args) => Message = HashSlotMovedHandlerMessage; } + + public string? Message { get; private set; } + public Action ConfigurationChangedBroadcastHandler { get; } + public Action ErrorMessageHandler { get; } + public Action ConnectionFailedHandler { get; } + public Action InternalErrorHandler { get; } + public Action ConnectionRestoredHandler { get; } + public Action ConfigurationChangedHandler { get; } + public Action HashSlotMovedHandler { get; } } } diff --git a/tests/StackExchange.Redis.Tests/ExceptionFactoryTests.cs b/tests/StackExchange.Redis.Tests/ExceptionFactoryTests.cs index fcce9bcca..65d6946dc 100644 --- a/tests/StackExchange.Redis.Tests/ExceptionFactoryTests.cs +++ b/tests/StackExchange.Redis.Tests/ExceptionFactoryTests.cs @@ -1,210 +1,255 @@ using System; +using System.Threading.Tasks; using Xunit; -using Xunit.Abstractions; -namespace StackExchange.Redis.Tests +namespace StackExchange.Redis.Tests; + +public class ExceptionFactoryTests(ITestOutputHelper output) : TestBase(output) { - public class ExceptionFactoryTests : TestBase + [Fact] + public async Task NullLastException() { - public ExceptionFactoryTests(ITestOutputHelper output) : base (output) { } + await using var conn = Create(keepAlive: 1, connectTimeout: 10000, allowAdmin: true); - [Fact] - public void NullLastException() - { - using (var muxer = Create(keepAlive: 1, connectTimeout: 10000, allowAdmin: true)) - { - muxer.GetDatabase(); - Assert.Null(muxer.GetServerSnapshot()[0].LastException); - var ex = ExceptionFactory.NoConnectionAvailable(muxer as ConnectionMultiplexer, null, null); - Assert.Null(ex.InnerException); - } - } + conn.GetDatabase(); + Assert.Null(conn.GetServerSnapshot()[0].LastException); + var ex = ExceptionFactory.NoConnectionAvailable(conn.UnderlyingMultiplexer, null, null); + Assert.Null(ex.InnerException); + } - [Fact] - public void CanGetVersion() - { - var libVer = ExceptionFactory.GetLibVersion(); - Assert.Matches(@"2\.[0-9]+\.[0-9]+(\.[0-9]+)?", libVer); - } + [Fact] + public void CanGetVersion() + { + var libVer = Utils.GetLibVersion(); + Assert.Matches(@"2\.[0-9]+\.[0-9]+(\.[0-9]+)?", libVer); + } #if DEBUG - [Fact] - public void MultipleEndpointsThrowConnectionException() + [Fact] + public async Task MultipleEndpointsThrowConnectionException() + { + try { - try - { - using (var muxer = Create(keepAlive: 1, connectTimeout: 10000, allowAdmin: true, shared: false)) - { - muxer.GetDatabase(); - muxer.AllowConnect = false; - - foreach (var endpoint in muxer.GetEndPoints()) - { - muxer.GetServer(endpoint).SimulateConnectionFailure(); - } - - var ex = ExceptionFactory.NoConnectionAvailable(muxer as ConnectionMultiplexer, null, null); - var outer = Assert.IsType(ex); - Assert.Equal(ConnectionFailureType.UnableToResolvePhysicalConnection, outer.FailureType); - var inner = Assert.IsType(outer.InnerException); - Assert.True(inner.FailureType == ConnectionFailureType.SocketFailure - || inner.FailureType == ConnectionFailureType.InternalFailure); - } - } - finally + await using var conn = Create(keepAlive: 1, connectTimeout: 10000, allowAdmin: true, shared: false); + + conn.GetDatabase(); + conn.AllowConnect = false; + + foreach (var endpoint in conn.GetEndPoints()) { - ClearAmbientFailures(); + conn.GetServer(endpoint).SimulateConnectionFailure(SimulatedFailureType.All); } + + var ex = ExceptionFactory.NoConnectionAvailable(conn.UnderlyingMultiplexer, null, null); + var outer = Assert.IsType(ex); + Assert.Equal(ConnectionFailureType.UnableToResolvePhysicalConnection, outer.FailureType); + var inner = Assert.IsType(outer.InnerException); + Assert.True(inner.FailureType == ConnectionFailureType.SocketFailure + || inner.FailureType == ConnectionFailureType.InternalFailure); + } + finally + { + ClearAmbientFailures(); } + } #endif - [Fact] - public void ServerTakesPrecendenceOverSnapshot() + [Fact] + public async Task ServerTakesPrecendenceOverSnapshot() + { + try { - try - { - using (var muxer = Create(keepAlive: 1, connectTimeout: 10000, allowAdmin: true, shared: false)) - { - muxer.GetDatabase(); - muxer.AllowConnect = false; + await using var conn = Create(keepAlive: 1, connectTimeout: 10000, allowAdmin: true, shared: false, backlogPolicy: BacklogPolicy.FailFast); - muxer.GetServer(muxer.GetEndPoints()[0]).SimulateConnectionFailure(); + conn.GetDatabase(); + conn.AllowConnect = false; - var ex = ExceptionFactory.NoConnectionAvailable(muxer as ConnectionMultiplexer, null, muxer.GetServerSnapshot()[0]); - Assert.IsType(ex); - Assert.IsType(ex.InnerException); - Assert.Equal(ex.InnerException, muxer.GetServerSnapshot()[0].LastException); - } - } - finally - { - ClearAmbientFailures(); - } + conn.GetServer(conn.GetEndPoints()[0]).SimulateConnectionFailure(SimulatedFailureType.All); + + var ex = ExceptionFactory.NoConnectionAvailable(conn.UnderlyingMultiplexer, null, conn.GetServerSnapshot()[0]); + Assert.IsType(ex); + Assert.IsType(ex.InnerException); + Assert.Equal(ex.InnerException, conn.GetServerSnapshot()[0].LastException); } + finally + { + ClearAmbientFailures(); + } + } - [Fact] - public void NullInnerExceptionForMultipleEndpointsWithNoLastException() + [Fact] + public async Task NullInnerExceptionForMultipleEndpointsWithNoLastException() + { + try { - try - { - using (var muxer = Create(keepAlive: 1, connectTimeout: 10000, allowAdmin: true)) - { - muxer.GetDatabase(); - muxer.AllowConnect = false; - var ex = ExceptionFactory.NoConnectionAvailable(muxer as ConnectionMultiplexer, null, null); - Assert.IsType(ex); - Assert.Null(ex.InnerException); - } - } - finally - { - ClearAmbientFailures(); - } + await using var conn = Create(keepAlive: 1, connectTimeout: 10000, allowAdmin: true); + + conn.GetDatabase(); + conn.AllowConnect = false; + var ex = ExceptionFactory.NoConnectionAvailable(conn.UnderlyingMultiplexer, null, null); + Assert.IsType(ex); + Assert.Null(ex.InnerException); + } + finally + { + ClearAmbientFailures(); + } + } + + [Fact] + public async Task TimeoutException() + { + try + { + await using var conn = Create(keepAlive: 1, connectTimeout: 10000, allowAdmin: true, shared: false); + + var server = GetServer(conn); + conn.AllowConnect = false; + var msg = Message.Create(-1, CommandFlags.None, RedisCommand.PING); + var rawEx = ExceptionFactory.Timeout(conn.UnderlyingMultiplexer, "Test Timeout", msg, new ServerEndPoint(conn.UnderlyingMultiplexer, server.EndPoint)); + var ex = Assert.IsType(rawEx); + Log("Exception: " + ex.Message); + + // Example format: "Test Timeout, command=PING, inst: 0, qu: 0, qs: 0, aw: False, in: 0, in-pipe: 0, out-pipe: 0, last-in: 0, cur-in: 0, serverEndpoint: 127.0.0.1:6379, mgr: 10 of 10 available, clientName: TimeoutException, IOCP: (Busy=0,Free=1000,Min=8,Max=1000), WORKER: (Busy=2,Free=2045,Min=8,Max=2047), v: 2.1.0 (Please take a look at this article for some common client-side issues that can cause timeouts: https://stackexchange.github.io/StackExchange.Redis/Timeouts)"; + Assert.StartsWith("Test Timeout, command=PING", ex.Message); + Assert.Contains("clientName: " + nameof(TimeoutException), ex.Message); + // Ensure our pipe numbers are in place + Assert.Contains("inst: 0, qu: 0, qs: 0, aw: False, bw: Inactive, in: 0, in-pipe: 0, out-pipe: 0, last-in: 0, cur-in: 0", ex.Message); + Assert.Contains("mc: 1/1/0", ex.Message); + Assert.Contains("serverEndpoint: " + server.EndPoint, ex.Message); + Assert.Contains("IOCP: ", ex.Message); + Assert.Contains("WORKER: ", ex.Message); + Assert.Contains("sync-ops: ", ex.Message); + Assert.Contains("async-ops: ", ex.Message); + Assert.Contains("conn-sec: n/a", ex.Message); + Assert.Contains("aoc: 1", ex.Message); +#if NET + // ...POOL: (Threads=33,QueuedItems=0,CompletedItems=5547,Timers=60)... + Assert.Contains("POOL: ", ex.Message); + Assert.Contains("Threads=", ex.Message); + Assert.Contains("QueuedItems=", ex.Message); + Assert.Contains("CompletedItems=", ex.Message); + Assert.Contains("Timers=", ex.Message); +#endif + Assert.DoesNotContain("Unspecified/", ex.Message); + Assert.EndsWith(" (Please take a look at this article for some common client-side issues that can cause timeouts: https://stackexchange.github.io/StackExchange.Redis/Timeouts)", ex.Message); + Assert.Null(ex.InnerException); + } + finally + { + ClearAmbientFailures(); } + } - [Fact] - public void TimeoutException() + [Theory] + [InlineData(false, 0, 0, true, "Connection to Redis never succeeded (attempts: 0 - connection likely in-progress), unable to service operation: PING")] + [InlineData(false, 1, 0, true, "Connection to Redis never succeeded (attempts: 1 - connection likely in-progress), unable to service operation: PING")] + [InlineData(false, 12, 0, true, "Connection to Redis never succeeded (attempts: 12 - check your config), unable to service operation: PING")] + [InlineData(false, 0, 0, false, "Connection to Redis never succeeded (attempts: 0 - connection likely in-progress), unable to service operation: PING")] + [InlineData(false, 1, 0, false, "Connection to Redis never succeeded (attempts: 1 - connection likely in-progress), unable to service operation: PING")] + [InlineData(false, 12, 0, false, "Connection to Redis never succeeded (attempts: 12 - check your config), unable to service operation: PING")] + [InlineData(true, 0, 0, true, "No connection is active/available to service this operation: PING")] + [InlineData(true, 1, 0, true, "No connection is active/available to service this operation: PING")] + [InlineData(true, 12, 0, true, "No connection is active/available to service this operation: PING")] + public async Task NoConnectionException(bool abortOnConnect, int connCount, int completeCount, bool hasDetail, string messageStart) + { + try { - try + var options = new ConfigurationOptions() { - using (var muxer = Create(keepAlive: 1, connectTimeout: 10000, allowAdmin: true, shared: false) as ConnectionMultiplexer) - { - var server = GetServer(muxer); - muxer.AllowConnect = false; - var msg = Message.Create(-1, CommandFlags.None, RedisCommand.PING); - var rawEx = ExceptionFactory.Timeout(muxer, "Test Timeout", msg, new ServerEndPoint(muxer, server.EndPoint)); - var ex = Assert.IsType(rawEx); - Writer.WriteLine("Exception: " + ex.Message); - - // Example format: "Test Timeout, command=PING, inst: 0, qu: 0, qs: 0, aw: False, in: 0, in-pipe: 0, out-pipe: 0, serverEndpoint: 127.0.0.1:6379, mgr: 10 of 10 available, clientName: TimeoutException, IOCP: (Busy=0,Free=1000,Min=8,Max=1000), WORKER: (Busy=2,Free=2045,Min=8,Max=2047), v: 2.1.0 (Please take a look at this article for some common client-side issues that can cause timeouts: https://stackexchange.github.io/StackExchange.Redis/Timeouts)"; - Assert.StartsWith("Test Timeout, command=PING", ex.Message); - Assert.Contains("clientName: " + nameof(TimeoutException), ex.Message); - // Ensure our pipe numbers are in place - Assert.Contains("inst: 0, qu: 0, qs: 0, aw: False, in: 0, in-pipe: 0, out-pipe: 0", ex.Message); - Assert.Contains("mc: 1/1/0", ex.Message); - Assert.Contains("serverEndpoint: " + server.EndPoint, ex.Message); - Assert.DoesNotContain("Unspecified/", ex.Message); - Assert.EndsWith(" (Please take a look at this article for some common client-side issues that can cause timeouts: https://stackexchange.github.io/StackExchange.Redis/Timeouts)", ex.Message); - Assert.Null(ex.InnerException); - } + AbortOnConnectFail = abortOnConnect, + BacklogPolicy = BacklogPolicy.FailFast, + ConnectTimeout = 1000, + SyncTimeout = 500, + KeepAlive = 5000, + }; + + ConnectionMultiplexer conn; + if (abortOnConnect) + { + options.EndPoints.Add(TestConfig.Current.PrimaryServerAndPort); + conn = ConnectionMultiplexer.Connect(options, Writer); } - finally + else { - ClearAmbientFailures(); + options.EndPoints.Add($"doesnot.exist.{Guid.NewGuid():N}:6379"); + conn = ConnectionMultiplexer.Connect(options, Writer); } - } - [Theory] - [InlineData(false, 0, 0, true, "Connection to Redis never succeeded (attempts: 0 - connection likely in-progress), unable to service operation: PING")] - [InlineData(false, 1, 0, true, "Connection to Redis never succeeded (attempts: 1 - connection likely in-progress), unable to service operation: PING")] - [InlineData(false, 12, 0, true, "Connection to Redis never succeeded (attempts: 12 - check your config), unable to service operation: PING")] - [InlineData(false, 0, 0, false, "Connection to Redis never succeeded (attempts: 0 - connection likely in-progress), unable to service operation: PING")] - [InlineData(false, 1, 0, false, "Connection to Redis never succeeded (attempts: 1 - connection likely in-progress), unable to service operation: PING")] - [InlineData(false, 12, 0, false, "Connection to Redis never succeeded (attempts: 12 - check your config), unable to service operation: PING")] - [InlineData(true, 0, 0, true, "No connection is active/available to service this operation: PING")] - [InlineData(true, 1, 0, true, "No connection is active/available to service this operation: PING")] - [InlineData(true, 12, 0, true, "No connection is active/available to service this operation: PING")] - public void NoConnectionException(bool abortOnConnect, int connCount, int completeCount, bool hasDetail, string messageStart) - { - try + await using (conn) { - var options = new ConfigurationOptions() + var server = conn.GetServer(conn.GetEndPoints()[0]); + conn.AllowConnect = false; + conn._connectAttemptCount = connCount; + conn._connectCompletedCount = completeCount; + options.IncludeDetailInExceptions = hasDetail; + options.IncludePerformanceCountersInExceptions = hasDetail; + + var msg = Message.Create(-1, CommandFlags.None, RedisCommand.PING); + var rawEx = ExceptionFactory.NoConnectionAvailable(conn, msg, new ServerEndPoint(conn, server.EndPoint)); + var ex = Assert.IsType(rawEx); + Log("Exception: " + ex.Message); + + // Example format: "Exception: No connection is active/available to service this operation: PING, inst: 0, qu: 0, qs: 0, aw: False, in: 0, in-pipe: 0, out-pipe: 0, last-in: 0, cur-in: 0, serverEndpoint: 127.0.0.1:6379, mc: 1/1/0, mgr: 10 of 10 available, clientName: NoConnectionException, IOCP: (Busy=0,Free=1000,Min=8,Max=1000), WORKER: (Busy=2,Free=2045,Min=8,Max=2047), Local-CPU: 100%, v: 2.1.0.5"; + Assert.StartsWith(messageStart, ex.Message); + + // Ensure our pipe numbers are in place if they should be + if (hasDetail) { - AbortOnConnectFail = abortOnConnect, - ConnectTimeout = 500, - SyncTimeout = 500, - KeepAlive = 5000 - }; - - ConnectionMultiplexer muxer; - if (abortOnConnect) - { - options.EndPoints.Add(TestConfig.Current.MasterServerAndPort); - muxer = ConnectionMultiplexer.Connect(options); + Assert.Contains("inst: 0, qu: 0, qs: 0, aw: False, bw: Inactive, in: 0, in-pipe: 0, out-pipe: 0, last-in: 0, cur-in: 0", ex.Message); + Assert.Contains($"mc: {connCount}/{completeCount}/0", ex.Message); + Assert.Contains("serverEndpoint: " + server.EndPoint.ToString()?.Replace("Unspecified/", ""), ex.Message); } else { - options.EndPoints.Add($"doesnot.exist.{Guid.NewGuid():N}:6379"); - muxer = ConnectionMultiplexer.Connect(options); - } - - using (muxer) - { - var server = muxer.GetServer(muxer.GetEndPoints()[0]); - muxer.AllowConnect = false; - muxer._connectAttemptCount = connCount; - muxer._connectCompletedCount = completeCount; - muxer.IncludeDetailInExceptions = hasDetail; - muxer.IncludePerformanceCountersInExceptions = hasDetail; - - var msg = Message.Create(-1, CommandFlags.None, RedisCommand.PING); - var rawEx = ExceptionFactory.NoConnectionAvailable(muxer, msg, new ServerEndPoint(muxer, server.EndPoint)); - var ex = Assert.IsType(rawEx); - Writer.WriteLine("Exception: " + ex.Message); - - // Example format: "Exception: No connection is active/available to service this operation: PING, inst: 0, qu: 0, qs: 0, aw: False, in: 0, in-pipe: 0, out-pipe: 0, serverEndpoint: 127.0.0.1:6379, mc: 1/1/0, mgr: 10 of 10 available, clientName: NoConnectionException, IOCP: (Busy=0,Free=1000,Min=8,Max=1000), WORKER: (Busy=2,Free=2045,Min=8,Max=2047), Local-CPU: 100%, v: 2.1.0.5"; - Assert.StartsWith(messageStart, ex.Message); - - // Ensure our pipe numbers are in place if they should be - if (hasDetail) - { - Assert.Contains("inst: 0, qu: 0, qs: 0, aw: False, in: 0, in-pipe: 0, out-pipe: 0", ex.Message); - Assert.Contains($"mc: {connCount}/{completeCount}/0", ex.Message); - Assert.Contains("serverEndpoint: " + server.EndPoint.ToString().Replace("Unspecified/", ""), ex.Message); - } - else - { - Assert.DoesNotContain("inst: 0, qu: 0, qs: 0, aw: False, in: 0, in-pipe: 0, out-pipe: 0", ex.Message); - Assert.DoesNotContain($"mc: {connCount}/{completeCount}/0", ex.Message); - Assert.DoesNotContain("serverEndpoint: " + server.EndPoint.ToString().Replace("Unspecified/", ""), ex.Message); - } - Assert.DoesNotContain("Unspecified/", ex.Message); + Assert.DoesNotContain("inst: 0, qu: 0, qs: 0, aw: False, bw: Inactive, in: 0, in-pipe: 0, out-pipe: 0, last-in: 0, cur-in: 0", ex.Message); + Assert.DoesNotContain($"mc: {connCount}/{completeCount}/0", ex.Message); + Assert.DoesNotContain("serverEndpoint: " + server.EndPoint.ToString()?.Replace("Unspecified/", ""), ex.Message); } + Assert.DoesNotContain("Unspecified/", ex.Message); } - finally - { - ClearAmbientFailures(); - } } + finally + { + ClearAmbientFailures(); + } + } + + [Fact] + public async Task NoConnectionPrimaryOnlyException() + { + await using var conn = await ConnectionMultiplexer.ConnectAsync(TestConfig.Current.ReplicaServerAndPort, Writer); + + var msg = Message.Create(0, CommandFlags.None, RedisCommand.SET, (RedisKey)Me(), (RedisValue)"test"); + Assert.True(msg.IsPrimaryOnly()); + var rawEx = ExceptionFactory.NoConnectionAvailable(conn, msg, null); + var ex = Assert.IsType(rawEx); + Log("Exception: " + ex.Message); + + // Ensure a primary-only operation like SET gives the additional context + Assert.StartsWith("No connection (requires writable - not eligible for replica) is active/available to service this operation: SET", ex.Message); + } + + [Theory] + [InlineData(true, ConnectionFailureType.ProtocolFailure, "ProtocolFailure on [0]:GET myKey (StringProcessor), my annotation")] + [InlineData(true, ConnectionFailureType.ConnectionDisposed, "ConnectionDisposed on [0]:GET myKey (StringProcessor), my annotation")] + [InlineData(false, ConnectionFailureType.ProtocolFailure, "ProtocolFailure on [0]:GET (StringProcessor), my annotation")] + [InlineData(false, ConnectionFailureType.ConnectionDisposed, "ConnectionDisposed on [0]:GET (StringProcessor), my annotation")] + public async Task MessageFail(bool includeDetail, ConnectionFailureType failType, string messageStart) + { + await using var conn = Create(shared: false); + + conn.RawConfig.IncludeDetailInExceptions = includeDetail; + + var message = Message.Create(0, CommandFlags.None, RedisCommand.GET, (RedisKey)"myKey"); + var resultBox = SimpleResultBox.Create(); + message.SetSource(ResultProcessor.String, resultBox); + + message.Fail(failType, null, "my annotation", conn.UnderlyingMultiplexer); + + resultBox.GetResult(out var ex); + Assert.NotNull(ex); + + Assert.StartsWith(messageStart, ex.Message); } } diff --git a/tests/StackExchange.Redis.Tests/Execute.cs b/tests/StackExchange.Redis.Tests/Execute.cs deleted file mode 100644 index de58a671e..000000000 --- a/tests/StackExchange.Redis.Tests/Execute.cs +++ /dev/null @@ -1,44 +0,0 @@ -using System.Linq; -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - [Collection(SharedConnectionFixture.Key)] - public class ExecuteTests : TestBase - { - public ExecuteTests(ITestOutputHelper output, SharedConnectionFixture fixture) : base(output, fixture) { } - - [Fact] - public async Task DBExecute() - { - using (var conn = Create()) - { - var db = conn.GetDatabase(4); - RedisKey key = Me(); - db.StringSet(key, "some value"); - - var actual = (string)db.Execute("GET", key); - Assert.Equal("some value", actual); - - actual = (string)await db.ExecuteAsync("GET", key).ForAwait(); - Assert.Equal("some value", actual); - } - } - - [Fact] - public async Task ServerExecute() - { - using (var conn = Create()) - { - var server = conn.GetServer(conn.GetEndPoints().First()); - var actual = (string)server.Execute("echo", "some value"); - Assert.Equal("some value", actual); - - actual = (string)await server.ExecuteAsync("echo", "some value").ForAwait(); - Assert.Equal("some value", actual); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/ExecuteTests.cs b/tests/StackExchange.Redis.Tests/ExecuteTests.cs new file mode 100644 index 000000000..1e1f10bd4 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/ExecuteTests.cs @@ -0,0 +1,37 @@ +using System.Linq; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class ExecuteTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + [Fact] + public async Task DBExecute() + { + await using var conn = Create(); + + var db = conn.GetDatabase(4); + RedisKey key = Me(); + db.StringSet(key, "some value"); + + var actual = (string?)db.Execute("GET", key); + Assert.Equal("some value", actual); + + actual = (string?)await db.ExecuteAsync("GET", key).ForAwait(); + Assert.Equal("some value", actual); + } + + [Fact] + public async Task ServerExecute() + { + await using var conn = Create(); + + var server = conn.GetServer(conn.GetEndPoints().First()); + var actual = (string?)server.Execute("echo", "some value"); + Assert.Equal("some value", actual); + + actual = (string?)await server.ExecuteAsync("echo", "some value").ForAwait(); + Assert.Equal("some value", actual); + } +} diff --git a/tests/StackExchange.Redis.Tests/Expiry.cs b/tests/StackExchange.Redis.Tests/Expiry.cs deleted file mode 100644 index 29cd199e2..000000000 --- a/tests/StackExchange.Redis.Tests/Expiry.cs +++ /dev/null @@ -1,96 +0,0 @@ -using System; -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - [Collection(SharedConnectionFixture.Key)] - public class Expiry : TestBase - { - public Expiry(ITestOutputHelper output, SharedConnectionFixture fixture) : base (output, fixture) { } - - private static string[] GetMap(bool disablePTimes) => disablePTimes ? (new[] { "pexpire", "pexpireat", "pttl" }) : null; - - [Theory] - [InlineData(true)] - [InlineData(false)] - public async Task TestBasicExpiryTimeSpan(bool disablePTimes) - { - using (var muxer = Create(disabledCommands: GetMap(disablePTimes))) - { - RedisKey key = Me(); - var conn = muxer.GetDatabase(); - conn.KeyDelete(key, CommandFlags.FireAndForget); - - conn.StringSet(key, "new value", flags: CommandFlags.FireAndForget); - var a = conn.KeyTimeToLiveAsync(key); - conn.KeyExpire(key, TimeSpan.FromHours(1), CommandFlags.FireAndForget); - var b = conn.KeyTimeToLiveAsync(key); - conn.KeyExpire(key, (TimeSpan?)null, CommandFlags.FireAndForget); - var c = conn.KeyTimeToLiveAsync(key); - conn.KeyExpire(key, TimeSpan.FromHours(1.5), CommandFlags.FireAndForget); - var d = conn.KeyTimeToLiveAsync(key); - conn.KeyExpire(key, TimeSpan.MaxValue, CommandFlags.FireAndForget); - var e = conn.KeyTimeToLiveAsync(key); - conn.KeyDelete(key, CommandFlags.FireAndForget); - var f = conn.KeyTimeToLiveAsync(key); - - Assert.Null(await a); - var time = await b; - Assert.NotNull(time); - Assert.True(time > TimeSpan.FromMinutes(59.9) && time <= TimeSpan.FromMinutes(60)); - Assert.Null(await c); - time = await d; - Assert.NotNull(time); - Assert.True(time > TimeSpan.FromMinutes(89.9) && time <= TimeSpan.FromMinutes(90)); - Assert.Null(await e); - Assert.Null(await f); - } - } - - [Theory] - [InlineData(true, true)] - [InlineData(false, true)] - [InlineData(true, false)] - [InlineData(false, false)] - public async Task TestBasicExpiryDateTime(bool disablePTimes, bool utc) - { - using (var muxer = Create(disabledCommands: GetMap(disablePTimes))) - { - RedisKey key = Me(); - var conn = muxer.GetDatabase(); - conn.KeyDelete(key, CommandFlags.FireAndForget); - - var now = utc ? DateTime.UtcNow : DateTime.Now; - Log("Now: {0}", now); - conn.StringSet(key, "new value", flags: CommandFlags.FireAndForget); - var a = conn.KeyTimeToLiveAsync(key); - conn.KeyExpire(key, now.AddHours(1), CommandFlags.FireAndForget); - var b = conn.KeyTimeToLiveAsync(key); - conn.KeyExpire(key, (DateTime?)null, CommandFlags.FireAndForget); - var c = conn.KeyTimeToLiveAsync(key); - conn.KeyExpire(key, now.AddHours(1.5), CommandFlags.FireAndForget); - var d = conn.KeyTimeToLiveAsync(key); - conn.KeyExpire(key, DateTime.MaxValue, CommandFlags.FireAndForget); - var e = conn.KeyTimeToLiveAsync(key); - conn.KeyDelete(key, CommandFlags.FireAndForget); - var f = conn.KeyTimeToLiveAsync(key); - - Assert.Null(await a); - var time = await b; - Assert.NotNull(time); - Log("Time: {0}, Expected: {1}-{2}", time, TimeSpan.FromMinutes(59), TimeSpan.FromMinutes(60)); - Assert.True(time >= TimeSpan.FromMinutes(59)); - Assert.True(time <= TimeSpan.FromMinutes(60.1)); - Assert.Null(await c); - time = await d; - Assert.NotNull(time); - Assert.True(time >= TimeSpan.FromMinutes(89)); - Assert.True(time <= TimeSpan.FromMinutes(90.1)); - Assert.Null(await e); - Assert.Null(await f); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/ExpiryTests.cs b/tests/StackExchange.Redis.Tests/ExpiryTests.cs new file mode 100644 index 000000000..fab26586f --- /dev/null +++ b/tests/StackExchange.Redis.Tests/ExpiryTests.cs @@ -0,0 +1,192 @@ +using System; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class ExpiryTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + private static string[]? GetMap(bool disablePTimes) => disablePTimes ? ["pexpire", "pexpireat", "pttl"] : null; + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task TestBasicExpiryTimeSpan(bool disablePTimes) + { + await using var conn = Create(disabledCommands: GetMap(disablePTimes)); + + RedisKey key = Me(); + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + db.StringSet(key, "new value", flags: CommandFlags.FireAndForget); + var a = db.KeyTimeToLiveAsync(key); + db.KeyExpire(key, TimeSpan.FromHours(1), CommandFlags.FireAndForget); + var b = db.KeyTimeToLiveAsync(key); + db.KeyExpire(key, (TimeSpan?)null, CommandFlags.FireAndForget); + var c = db.KeyTimeToLiveAsync(key); + db.KeyExpire(key, TimeSpan.FromHours(1.5), CommandFlags.FireAndForget); + var d = db.KeyTimeToLiveAsync(key); + db.KeyExpire(key, TimeSpan.MaxValue, CommandFlags.FireAndForget); + var e = db.KeyTimeToLiveAsync(key); + db.KeyDelete(key, CommandFlags.FireAndForget); + var f = db.KeyTimeToLiveAsync(key); + + Assert.Null(await a); + var time = await b; + Assert.NotNull(time); + Assert.True(time > TimeSpan.FromMinutes(59.9) && time <= TimeSpan.FromMinutes(60)); + Assert.Null(await c); + time = await d; + Assert.NotNull(time); + Assert.True(time > TimeSpan.FromMinutes(89.9) && time <= TimeSpan.FromMinutes(90)); + Assert.Null(await e); + Assert.Null(await f); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task TestExpiryOptions(bool disablePTimes) + { + await using var conn = Create(disabledCommands: GetMap(disablePTimes), require: RedisFeatures.v7_0_0_rc1); + + var key = Me(); + var db = conn.GetDatabase(); + db.KeyDelete(key); + db.StringSet(key, "value"); + + // The key has no expiry + Assert.False(await db.KeyExpireAsync(key, TimeSpan.FromHours(1), ExpireWhen.HasExpiry)); + Assert.True(await db.KeyExpireAsync(key, TimeSpan.FromHours(1), ExpireWhen.HasNoExpiry)); + + // The key has an existing expiry + Assert.True(await db.KeyExpireAsync(key, TimeSpan.FromHours(1), ExpireWhen.HasExpiry)); + Assert.False(await db.KeyExpireAsync(key, TimeSpan.FromHours(1), ExpireWhen.HasNoExpiry)); + + // Set only when the new expiry is greater than current one + Assert.True(await db.KeyExpireAsync(key, TimeSpan.FromHours(1.5), ExpireWhen.GreaterThanCurrentExpiry)); + Assert.False(await db.KeyExpireAsync(key, TimeSpan.FromHours(0.5), ExpireWhen.GreaterThanCurrentExpiry)); + + // Set only when the new expiry is less than current one + Assert.True(await db.KeyExpireAsync(key, TimeSpan.FromHours(0.5), ExpireWhen.LessThanCurrentExpiry)); + Assert.False(await db.KeyExpireAsync(key, TimeSpan.FromHours(1.5), ExpireWhen.LessThanCurrentExpiry)); + } + + [Theory] + [InlineData(true, true)] + [InlineData(false, true)] + [InlineData(true, false)] + [InlineData(false, false)] + public async Task TestBasicExpiryDateTime(bool disablePTimes, bool utc) + { + await using var conn = Create(disabledCommands: GetMap(disablePTimes)); + + RedisKey key = Me(); + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + var now = utc ? DateTime.UtcNow : DateTime.Now; + var serverTime = GetServer(conn).Time(); + Log("Server time: {0}", serverTime); + var offset = DateTime.UtcNow - serverTime; + + Log("Now (local time): {0}", now); + db.StringSet(key, "new value", flags: CommandFlags.FireAndForget); + var a = db.KeyTimeToLiveAsync(key); + db.KeyExpire(key, now.AddHours(1), CommandFlags.FireAndForget); + var b = db.KeyTimeToLiveAsync(key); + db.KeyExpire(key, (DateTime?)null, CommandFlags.FireAndForget); + var c = db.KeyTimeToLiveAsync(key); + db.KeyExpire(key, now.AddHours(1.5), CommandFlags.FireAndForget); + var d = db.KeyTimeToLiveAsync(key); + db.KeyExpire(key, DateTime.MaxValue, CommandFlags.FireAndForget); + var e = db.KeyTimeToLiveAsync(key); + db.KeyDelete(key, CommandFlags.FireAndForget); + var f = db.KeyTimeToLiveAsync(key); + + Assert.Null(await a); + var timeResult = await b; + Assert.NotNull(timeResult); + TimeSpan time = timeResult.Value; + + // Adjust for server time offset, if any when checking expectations + time -= offset; + + Log("Time: {0}, Expected: {1}-{2}", time, TimeSpan.FromMinutes(59), TimeSpan.FromMinutes(60)); + Assert.True(time >= TimeSpan.FromMinutes(59)); + Assert.True(time <= TimeSpan.FromMinutes(60.1)); + Assert.Null(await c); + + timeResult = await d; + Assert.NotNull(timeResult); + time = timeResult.Value; + + Assert.True(time >= TimeSpan.FromMinutes(89)); + Assert.True(time <= TimeSpan.FromMinutes(90.1)); + Assert.Null(await e); + Assert.Null(await f); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task KeyExpiryTime(bool disablePTimes) + { + await using var conn = Create(disabledCommands: GetMap(disablePTimes), require: RedisFeatures.v7_0_0_rc1); + + var key = Me(); + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + var expireTime = DateTime.UtcNow.AddHours(1); + db.StringSet(key, "new value", flags: CommandFlags.FireAndForget); + db.KeyExpire(key, expireTime, CommandFlags.FireAndForget); + + var time = db.KeyExpireTime(key); + Assert.NotNull(time); + Assert.Equal(expireTime, time!.Value, TimeSpan.FromSeconds(30)); + + // Without associated expiration time + db.KeyDelete(key, CommandFlags.FireAndForget); + db.StringSet(key, "new value", flags: CommandFlags.FireAndForget); + time = db.KeyExpireTime(key); + Assert.Null(time); + + // Non existing key + db.KeyDelete(key, CommandFlags.FireAndForget); + time = db.KeyExpireTime(key); + Assert.Null(time); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task KeyExpiryTimeAsync(bool disablePTimes) + { + await using var conn = Create(disabledCommands: GetMap(disablePTimes), require: RedisFeatures.v7_0_0_rc1); + + var key = Me(); + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + var expireTime = DateTime.UtcNow.AddHours(1); + db.StringSet(key, "new value", flags: CommandFlags.FireAndForget); + db.KeyExpire(key, expireTime, CommandFlags.FireAndForget); + + var time = await db.KeyExpireTimeAsync(key); + Assert.NotNull(time); + Assert.Equal(expireTime, time.Value, TimeSpan.FromSeconds(30)); + + // Without associated expiration time + db.KeyDelete(key, CommandFlags.FireAndForget); + db.StringSet(key, "new value", flags: CommandFlags.FireAndForget); + time = await db.KeyExpireTimeAsync(key); + Assert.Null(time); + + // Non existing key + db.KeyDelete(key, CommandFlags.FireAndForget); + time = await db.KeyExpireTimeAsync(key); + Assert.Null(time); + } +} diff --git a/tests/StackExchange.Redis.Tests/ExpiryTokenTests.cs b/tests/StackExchange.Redis.Tests/ExpiryTokenTests.cs new file mode 100644 index 000000000..6012422ed --- /dev/null +++ b/tests/StackExchange.Redis.Tests/ExpiryTokenTests.cs @@ -0,0 +1,116 @@ +using System; +using Xunit; +using static StackExchange.Redis.Expiration; +namespace StackExchange.Redis.Tests; + +public class ExpirationTests // pure tests, no DB +{ + [Fact] + public void Persist_Seconds() + { + TimeSpan? time = TimeSpan.FromMilliseconds(5000); + var ex = CreateOrPersist(time, false); + Assert.Equal(2, ex.TokenCount); + Assert.Equal("EX 5", ex.ToString()); + } + + [Fact] + public void Persist_Milliseconds() + { + TimeSpan? time = TimeSpan.FromMilliseconds(5001); + var ex = CreateOrPersist(time, false); + Assert.Equal(2, ex.TokenCount); + Assert.Equal("PX 5001", ex.ToString()); + } + + [Fact] + public void Persist_None_False() + { + TimeSpan? time = null; + var ex = CreateOrPersist(time, false); + Assert.Equal(0, ex.TokenCount); + Assert.Equal("", ex.ToString()); + } + + [Fact] + public void Persist_None_True() + { + TimeSpan? time = null; + var ex = CreateOrPersist(time, true); + Assert.Equal(1, ex.TokenCount); + Assert.Equal("PERSIST", ex.ToString()); + } + + [Fact] + public void Persist_Both() + { + TimeSpan? time = TimeSpan.FromMilliseconds(5000); + var ex = Assert.Throws(() => CreateOrPersist(time, true)); + Assert.Equal("persist", ex.ParamName); + Assert.StartsWith("Cannot specify both expiry and persist", ex.Message); + } + + [Fact] + public void KeepTtl_Seconds() + { + TimeSpan? time = TimeSpan.FromMilliseconds(5000); + var ex = CreateOrKeepTtl(time, false); + Assert.Equal(2, ex.TokenCount); + Assert.Equal("EX 5", ex.ToString()); + } + + [Fact] + public void KeepTtl_Milliseconds() + { + TimeSpan? time = TimeSpan.FromMilliseconds(5001); + var ex = CreateOrKeepTtl(time, false); + Assert.Equal(2, ex.TokenCount); + Assert.Equal("PX 5001", ex.ToString()); + } + + [Fact] + public void KeepTtl_None_False() + { + TimeSpan? time = null; + var ex = CreateOrKeepTtl(time, false); + Assert.Equal(0, ex.TokenCount); + Assert.Equal("", ex.ToString()); + } + + [Fact] + public void KeepTtl_None_True() + { + TimeSpan? time = null; + var ex = CreateOrKeepTtl(time, true); + Assert.Equal(1, ex.TokenCount); + Assert.Equal("KEEPTTL", ex.ToString()); + } + + [Fact] + public void KeepTtl_Both() + { + TimeSpan? time = TimeSpan.FromMilliseconds(5000); + var ex = Assert.Throws(() => CreateOrKeepTtl(time, true)); + Assert.Equal("keepTtl", ex.ParamName); + Assert.StartsWith("Cannot specify both expiry and keepTtl", ex.Message); + } + + [Fact] + public void DateTime_Seconds() + { + var when = new DateTime(2025, 7, 23, 10, 4, 14, DateTimeKind.Utc); + var ex = new Expiration(when); + Assert.Equal(2, ex.TokenCount); + Assert.Equal("EXAT 1753265054", ex.ToString()); + } + + [Fact] + public void DateTime_Milliseconds() + { + var when = new DateTime(2025, 7, 23, 10, 4, 14, DateTimeKind.Utc); + when = when.AddMilliseconds(14); + var ex = new Expiration(when); + Assert.Equal(2, ex.TokenCount); + Assert.Equal("PXAT 1753265054014", ex.ToString()); + } +} diff --git a/tests/StackExchange.Redis.Tests/FSharpCompat.cs b/tests/StackExchange.Redis.Tests/FSharpCompat.cs deleted file mode 100644 index a9b68b8d5..000000000 --- a/tests/StackExchange.Redis.Tests/FSharpCompat.cs +++ /dev/null @@ -1,26 +0,0 @@ -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - public class FSharpCompat : TestBase - { - public FSharpCompat(ITestOutputHelper output) : base (output) { } - - [Fact] - public void RedisKeyConstructor() - { - Assert.Equal(default, new RedisKey()); - Assert.Equal((RedisKey)"MyKey", new RedisKey("MyKey")); - Assert.Equal((RedisKey)"MyKey2", new RedisKey(null, "MyKey2")); - } - - [Fact] - public void RedisValueConstructor() - { - Assert.Equal(default, new RedisValue()); - Assert.Equal((RedisValue)"MyKey", new RedisValue("MyKey")); - Assert.Equal((RedisValue)"MyKey2", new RedisValue("MyKey2", 0)); - } - } -} diff --git a/tests/StackExchange.Redis.Tests/FSharpCompatTests.cs b/tests/StackExchange.Redis.Tests/FSharpCompatTests.cs new file mode 100644 index 000000000..192234f2d --- /dev/null +++ b/tests/StackExchange.Redis.Tests/FSharpCompatTests.cs @@ -0,0 +1,24 @@ +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class FSharpCompatTests(ITestOutputHelper output) : TestBase(output) +{ +#pragma warning disable SA1129 // Do not use default value type constructor + [Fact] + public void RedisKeyConstructor() + { + Assert.Equal(default, new RedisKey()); + Assert.Equal((RedisKey)"MyKey", new RedisKey("MyKey")); + Assert.Equal((RedisKey)"MyKey2", new RedisKey(null, "MyKey2")); + } + + [Fact] + public void RedisValueConstructor() + { + Assert.Equal(default, new RedisValue()); + Assert.Equal((RedisValue)"MyKey", new RedisValue("MyKey")); + Assert.Equal((RedisValue)"MyKey2", new RedisValue("MyKey2", 0)); + } +#pragma warning restore SA1129 // Do not use default value type constructor +} diff --git a/tests/StackExchange.Redis.Tests/Failover.cs b/tests/StackExchange.Redis.Tests/Failover.cs deleted file mode 100644 index 4923c65ef..000000000 --- a/tests/StackExchange.Redis.Tests/Failover.cs +++ /dev/null @@ -1,366 +0,0 @@ -using System; -using System.IO; -using System.Threading; -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - public class Failover : TestBase, IAsyncLifetime - { - protected override string GetConfiguration() => GetMasterReplicaConfig().ToString(); - - public Failover(ITestOutputHelper output) : base(output) - { - } - - public Task DisposeAsync() => Task.CompletedTask; - - public async Task InitializeAsync() - { - using (var mutex = Create()) - { - var shouldBeMaster = mutex.GetServer(TestConfig.Current.FailoverMasterServerAndPort); - if (shouldBeMaster.IsReplica) - { - Log(shouldBeMaster.EndPoint + " should be master, fixing..."); - shouldBeMaster.MakeMaster(ReplicationChangeOptions.SetTiebreaker); - } - - var shouldBeReplica = mutex.GetServer(TestConfig.Current.FailoverReplicaServerAndPort); - if (!shouldBeReplica.IsReplica) - { - Log(shouldBeReplica.EndPoint + " should be a replica, fixing..."); - shouldBeReplica.ReplicaOf(shouldBeMaster.EndPoint); - await Task.Delay(2000).ForAwait(); - } - } - } - - private static ConfigurationOptions GetMasterReplicaConfig() - { - return new ConfigurationOptions - { - AllowAdmin = true, - SyncTimeout = 100000, - EndPoints = - { - { TestConfig.Current.FailoverMasterServer, TestConfig.Current.FailoverMasterPort }, - { TestConfig.Current.FailoverReplicaServer, TestConfig.Current.FailoverReplicaPort }, - } - }; - } - - [Fact] - public async Task ConfigureAsync() - { - using (var muxer = Create()) - { - await Task.Delay(1000).ForAwait(); - Log("About to reconfigure....."); - await muxer.ConfigureAsync().ForAwait(); - Log("Reconfigured"); - } - } - - [Fact] - public async Task ConfigureSync() - { - using (var muxer = Create()) - { - await Task.Delay(1000).ForAwait(); - Log("About to reconfigure....."); - muxer.Configure(); - Log("Reconfigured"); - } - } - - [Fact] - public async Task ConfigVerifyReceiveConfigChangeBroadcast() - { - _ = GetConfiguration(); - using (var sender = Create(allowAdmin: true)) - using (var receiver = Create(syncTimeout: 2000)) - { - int total = 0; - receiver.ConfigurationChangedBroadcast += (s, a) => - { - Log("Config changed: " + (a.EndPoint == null ? "(none)" : a.EndPoint.ToString())); - Interlocked.Increment(ref total); - }; - // send a reconfigure/reconnect message - long count = sender.PublishReconfigure(); - GetServer(receiver).Ping(); - GetServer(receiver).Ping(); - await Task.Delay(1000).ConfigureAwait(false); - Assert.True(count == -1 || count >= 2, "subscribers"); - Assert.True(Interlocked.CompareExchange(ref total, 0, 0) >= 1, "total (1st)"); - - Interlocked.Exchange(ref total, 0); - - // and send a second time via a re-master operation - var server = GetServer(sender); - if (server.IsReplica) Skip.Inconclusive("didn't expect a replica"); - server.MakeMaster(ReplicationChangeOptions.Broadcast); - await Task.Delay(1000).ConfigureAwait(false); - GetServer(receiver).Ping(); - GetServer(receiver).Ping(); - Assert.True(Interlocked.CompareExchange(ref total, 0, 0) >= 1, "total (2nd)"); - } - } - - - [Fact] - public async Task DereplicateGoesToPrimary() - { - ConfigurationOptions config = GetMasterReplicaConfig(); - config.ConfigCheckSeconds = 5; - using (var conn = ConnectionMultiplexer.Connect(config)) - { - var primary = conn.GetServer(TestConfig.Current.FailoverMasterServerAndPort); - var secondary = conn.GetServer(TestConfig.Current.FailoverReplicaServerAndPort); - - primary.Ping(); - secondary.Ping(); - - primary.MakeMaster(ReplicationChangeOptions.SetTiebreaker); - secondary.MakeMaster(ReplicationChangeOptions.None); - - await Task.Delay(100).ConfigureAwait(false); - - primary.Ping(); - secondary.Ping(); - - using (var writer = new StringWriter()) - { - conn.Configure(writer); - string log = writer.ToString(); - Writer.WriteLine(log); - bool isUnanimous = log.Contains("tie-break is unanimous at " + TestConfig.Current.FailoverMasterServerAndPort); - if (!isUnanimous) Skip.Inconclusive("this is timing sensitive; unable to verify this time"); - } - // k, so we know everyone loves 6379; is that what we get? - - var db = conn.GetDatabase(); - RedisKey key = Me(); - - Assert.Equal(primary.EndPoint, db.IdentifyEndpoint(key, CommandFlags.PreferMaster)); - Assert.Equal(primary.EndPoint, db.IdentifyEndpoint(key, CommandFlags.DemandMaster)); - Assert.Equal(primary.EndPoint, db.IdentifyEndpoint(key, CommandFlags.PreferReplica)); - - var ex = Assert.Throws(() => db.IdentifyEndpoint(key, CommandFlags.DemandReplica)); - Assert.StartsWith("No connection is active/available to service this operation: EXISTS " + Me(), ex.Message); - Writer.WriteLine("Invoking MakeMaster()..."); - primary.MakeMaster(ReplicationChangeOptions.Broadcast | ReplicationChangeOptions.ReplicateToOtherEndpoints | ReplicationChangeOptions.SetTiebreaker, Writer); - Writer.WriteLine("Finished MakeMaster() call."); - - await Task.Delay(100).ConfigureAwait(false); - - Writer.WriteLine("Invoking Ping() (post-master)"); - primary.Ping(); - secondary.Ping(); - Writer.WriteLine("Finished Ping() (post-master)"); - - Assert.True(primary.IsConnected, $"{primary.EndPoint} is not connected."); - Assert.True(secondary.IsConnected, $"{secondary.EndPoint} is not connected."); - - Writer.WriteLine($"{primary.EndPoint}: {primary.ServerType}, Mode: {(primary.IsReplica ? "Replica" : "Master")}"); - Writer.WriteLine($"{secondary.EndPoint}: {secondary.ServerType}, Mode: {(secondary.IsReplica ? "Replica" : "Master")}"); - - // Create a separate multiplexer with a valid view of the world to distinguish between failures of - // server topology changes from failures to recognize those changes - Writer.WriteLine("Connecting to secondary validation connection."); - using (var conn2 = ConnectionMultiplexer.Connect(config)) - { - var primary2 = conn2.GetServer(TestConfig.Current.FailoverMasterServerAndPort); - var secondary2 = conn2.GetServer(TestConfig.Current.FailoverReplicaServerAndPort); - - Writer.WriteLine($"Check: {primary2.EndPoint}: {primary2.ServerType}, Mode: {(primary2.IsReplica ? "Replica" : "Master")}"); - Writer.WriteLine($"Check: {secondary2.EndPoint}: {secondary2.ServerType}, Mode: {(secondary2.IsReplica ? "Replica" : "Master")}"); - - Assert.False(primary2.IsReplica, $"{primary2.EndPoint} should be a master (verification connection)."); - Assert.True(secondary2.IsReplica, $"{secondary2.EndPoint} should be a replica (verification connection)."); - - var db2 = conn2.GetDatabase(); - - Assert.Equal(primary2.EndPoint, db2.IdentifyEndpoint(key, CommandFlags.PreferMaster)); - Assert.Equal(primary2.EndPoint, db2.IdentifyEndpoint(key, CommandFlags.DemandMaster)); - Assert.Equal(secondary2.EndPoint, db2.IdentifyEndpoint(key, CommandFlags.PreferReplica)); - Assert.Equal(secondary2.EndPoint, db2.IdentifyEndpoint(key, CommandFlags.DemandReplica)); - } - - await UntilCondition(TimeSpan.FromSeconds(20), () => !primary.IsReplica && secondary.IsReplica); - - Assert.False(primary.IsReplica, $"{primary.EndPoint} should be a master."); - Assert.True(secondary.IsReplica, $"{secondary.EndPoint} should be a replica."); - - Assert.Equal(primary.EndPoint, db.IdentifyEndpoint(key, CommandFlags.PreferMaster)); - Assert.Equal(primary.EndPoint, db.IdentifyEndpoint(key, CommandFlags.DemandMaster)); - Assert.Equal(secondary.EndPoint, db.IdentifyEndpoint(key, CommandFlags.PreferReplica)); - Assert.Equal(secondary.EndPoint, db.IdentifyEndpoint(key, CommandFlags.DemandReplica)); - } - } - -#if DEBUG - [Fact] - public async Task SubscriptionsSurviveMasterSwitchAsync() - { - void TopologyFail() => Skip.Inconclusive("Replication tolopogy change failed...and that's both inconsistent and not what we're testing."); - - if (RunningInCI) - { - Skip.Inconclusive("TODO: Fix race in broadcast reconfig a zero latency."); - } - - using (var a = Create(allowAdmin: true, shared: false)) - using (var b = Create(allowAdmin: true, shared: false)) - { - RedisChannel channel = Me(); - Log("Using Channel: " + channel); - var subA = a.GetSubscriber(); - var subB = b.GetSubscriber(); - - long masterChanged = 0, aCount = 0, bCount = 0; - a.ConfigurationChangedBroadcast += delegate - { - Log("A noticed config broadcast: " + Interlocked.Increment(ref masterChanged)); - }; - b.ConfigurationChangedBroadcast += delegate - { - Log("B noticed config broadcast: " + Interlocked.Increment(ref masterChanged)); - }; - subA.Subscribe(channel, (_, message) => - { - Log("A got message: " + message); - Interlocked.Increment(ref aCount); - }); - subB.Subscribe(channel, (_, message) => - { - Log("B got message: " + message); - Interlocked.Increment(ref bCount); - }); - - Assert.False(a.GetServer(TestConfig.Current.FailoverMasterServerAndPort).IsReplica, $"A Connection: {TestConfig.Current.FailoverMasterServerAndPort} should be a master"); - if (!a.GetServer(TestConfig.Current.FailoverReplicaServerAndPort).IsReplica) - { - TopologyFail(); - } - Assert.True(a.GetServer(TestConfig.Current.FailoverReplicaServerAndPort).IsReplica, $"A Connection: {TestConfig.Current.FailoverReplicaServerAndPort} should be a replica"); - Assert.False(b.GetServer(TestConfig.Current.FailoverMasterServerAndPort).IsReplica, $"B Connection: {TestConfig.Current.FailoverMasterServerAndPort} should be a master"); - Assert.True(b.GetServer(TestConfig.Current.FailoverReplicaServerAndPort).IsReplica, $"B Connection: {TestConfig.Current.FailoverReplicaServerAndPort} should be a replica"); - - Log("Failover 1 Complete"); - var epA = subA.SubscribedEndpoint(channel); - var epB = subB.SubscribedEndpoint(channel); - Log(" A: " + EndPointCollection.ToString(epA)); - Log(" B: " + EndPointCollection.ToString(epB)); - subA.Publish(channel, "A1"); - subB.Publish(channel, "B1"); - Log(" SubA ping: " + subA.Ping()); - Log(" SubB ping: " + subB.Ping()); - // If redis is under load due to this suite, it may take a moment to send across. - await UntilCondition(TimeSpan.FromSeconds(5), () => Interlocked.Read(ref aCount) == 2 && Interlocked.Read(ref bCount) == 2).ForAwait(); - - Assert.Equal(2, Interlocked.Read(ref aCount)); - Assert.Equal(2, Interlocked.Read(ref bCount)); - Assert.Equal(0, Interlocked.Read(ref masterChanged)); - - try - { - Interlocked.Exchange(ref masterChanged, 0); - Interlocked.Exchange(ref aCount, 0); - Interlocked.Exchange(ref bCount, 0); - Log("Changing master..."); - using (var sw = new StringWriter()) - { - a.GetServer(TestConfig.Current.FailoverReplicaServerAndPort).MakeMaster(ReplicationChangeOptions.All, sw); - Log(sw.ToString()); - } - Log("Waiting for connection B to detect..."); - await UntilCondition(TimeSpan.FromSeconds(10), () => b.GetServer(TestConfig.Current.FailoverMasterServerAndPort).IsReplica).ForAwait(); - subA.Ping(); - subB.Ping(); - Log("Falover 2 Attempted. Pausing..."); - Log(" A " + TestConfig.Current.FailoverMasterServerAndPort + " status: " + (a.GetServer(TestConfig.Current.FailoverMasterServerAndPort).IsReplica ? "Replica" : "Master")); - Log(" A " + TestConfig.Current.FailoverReplicaServerAndPort + " status: " + (a.GetServer(TestConfig.Current.FailoverReplicaServerAndPort).IsReplica ? "Replica" : "Master")); - Log(" B " + TestConfig.Current.FailoverMasterServerAndPort + " status: " + (b.GetServer(TestConfig.Current.FailoverMasterServerAndPort).IsReplica ? "Replica" : "Master")); - Log(" B " + TestConfig.Current.FailoverReplicaServerAndPort + " status: " + (b.GetServer(TestConfig.Current.FailoverReplicaServerAndPort).IsReplica ? "Replica" : "Master")); - - if (!a.GetServer(TestConfig.Current.FailoverMasterServerAndPort).IsReplica) - { - TopologyFail(); - } - Log("Falover 2 Complete."); - - Assert.True(a.GetServer(TestConfig.Current.FailoverMasterServerAndPort).IsReplica, $"A Connection: {TestConfig.Current.FailoverMasterServerAndPort} should be a replica"); - Assert.False(a.GetServer(TestConfig.Current.FailoverReplicaServerAndPort).IsReplica, $"A Connection: {TestConfig.Current.FailoverReplicaServerAndPort} should be a master"); - await UntilCondition(TimeSpan.FromSeconds(10), () => b.GetServer(TestConfig.Current.FailoverMasterServerAndPort).IsReplica).ForAwait(); - var sanityCheck = b.GetServer(TestConfig.Current.FailoverMasterServerAndPort).IsReplica; - if (!sanityCheck) - { - Log("FAILURE: B has not detected the topology change."); - foreach (var server in b.GetServerSnapshot().ToArray()) - { - Log(" Server" + server.EndPoint); - Log(" State: " + server.ConnectionState); - Log(" IsReplica: " + !server.IsReplica); - Log(" Type: " + server.ServerType); - } - //Skip.Inconclusive("Not enough latency."); - } - Assert.True(sanityCheck, $"B Connection: {TestConfig.Current.FailoverMasterServerAndPort} should be a replica"); - Assert.False(b.GetServer(TestConfig.Current.FailoverReplicaServerAndPort).IsReplica, $"B Connection: {TestConfig.Current.FailoverReplicaServerAndPort} should be a master"); - - Log("Pause complete"); - Log(" A outstanding: " + a.GetCounters().TotalOutstanding); - Log(" B outstanding: " + b.GetCounters().TotalOutstanding); - subA.Ping(); - subB.Ping(); - await Task.Delay(5000).ForAwait(); - epA = subA.SubscribedEndpoint(channel); - epB = subB.SubscribedEndpoint(channel); - Log("Subscription complete"); - Log(" A: " + EndPointCollection.ToString(epA)); - Log(" B: " + EndPointCollection.ToString(epB)); - var aSentTo = subA.Publish(channel, "A2"); - var bSentTo = subB.Publish(channel, "B2"); - Log(" A2 sent to: " + aSentTo); - Log(" B2 sent to: " + bSentTo); - subA.Ping(); - subB.Ping(); - Log("Ping Complete. Checking..."); - await UntilCondition(TimeSpan.FromSeconds(10), () => Interlocked.Read(ref aCount) == 2 && Interlocked.Read(ref bCount) == 2).ForAwait(); - - Log("Counts so far:"); - Log(" aCount: " + Interlocked.Read(ref aCount)); - Log(" bCount: " + Interlocked.Read(ref bCount)); - Log(" masterChanged: " + Interlocked.Read(ref masterChanged)); - - Assert.Equal(2, Interlocked.Read(ref aCount)); - Assert.Equal(2, Interlocked.Read(ref bCount)); - // Expect 10, because a sees a, but b sees a and b due to replication - Assert.Equal(10, Interlocked.CompareExchange(ref masterChanged, 0, 0)); - } - catch - { - LogNoTime(""); - Log("ERROR: Something went bad - see above! Roooooolling back. Back it up. Baaaaaack it on up."); - LogNoTime(""); - throw; - } - finally - { - Log("Restoring configuration..."); - try - { - a.GetServer(TestConfig.Current.FailoverMasterServerAndPort).MakeMaster(ReplicationChangeOptions.All); - await Task.Delay(1000).ForAwait(); - } - catch { /* Don't bomb here */ } - } - } - } -#endif - } -} diff --git a/tests/StackExchange.Redis.Tests/FailoverTests.cs b/tests/StackExchange.Redis.Tests/FailoverTests.cs new file mode 100644 index 000000000..9c330a3f3 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/FailoverTests.cs @@ -0,0 +1,443 @@ +#if NET +using System; +using System.IO; +using System.Threading; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +[Collection(NonParallelCollection.Name)] +public class FailoverTests(ITestOutputHelper output) : TestBase(output), IAsyncLifetime +{ + protected override string GetConfiguration() => GetPrimaryReplicaConfig().ToString(); + + public ValueTask DisposeAsync() => ValueTask.CompletedTask; + + public async ValueTask InitializeAsync() + { + await using var conn = Create(); + + var shouldBePrimary = conn.GetServer(TestConfig.Current.FailoverPrimaryServerAndPort); + if (shouldBePrimary.IsReplica) + { + Log(shouldBePrimary.EndPoint + " should be primary, fixing..."); + await shouldBePrimary.MakePrimaryAsync(ReplicationChangeOptions.SetTiebreaker); + } + + var shouldBeReplica = conn.GetServer(TestConfig.Current.FailoverReplicaServerAndPort); + if (!shouldBeReplica.IsReplica) + { + Log(shouldBeReplica.EndPoint + " should be a replica, fixing..."); + await shouldBeReplica.ReplicaOfAsync(shouldBePrimary.EndPoint); + await Task.Delay(2000).ForAwait(); + } + } + + private static ConfigurationOptions GetPrimaryReplicaConfig() + { + return new ConfigurationOptions + { + AllowAdmin = true, + SyncTimeout = 100000, + EndPoints = + { + { TestConfig.Current.FailoverPrimaryServer, TestConfig.Current.FailoverPrimaryPort }, + { TestConfig.Current.FailoverReplicaServer, TestConfig.Current.FailoverReplicaPort }, + }, + }; + } + + [Fact] + public async Task ConfigureAsync() + { + await using var conn = Create(); + + await Task.Delay(1000).ForAwait(); + Log("About to reconfigure....."); + await conn.ConfigureAsync().ForAwait(); + Log("Reconfigured"); + } + + [Fact] + public async Task ConfigureSync() + { + await using var conn = Create(); + + await Task.Delay(1000).ForAwait(); + Log("About to reconfigure....."); + conn.Configure(); + Log("Reconfigured"); + } + + [Fact] + public async Task ConfigVerifyReceiveConfigChangeBroadcast() + { + _ = GetConfiguration(); + await using var senderConn = Create(allowAdmin: true); + await using var receiverConn = Create(syncTimeout: 2000); + + int total = 0; + receiverConn.ConfigurationChangedBroadcast += (s, a) => + { + Log("Config changed: " + (a.EndPoint == null ? "(none)" : a.EndPoint.ToString())); + Interlocked.Increment(ref total); + }; + // send a reconfigure/reconnect message + long count = senderConn.PublishReconfigure(); + await GetServer(receiverConn).PingAsync(); + await GetServer(receiverConn).PingAsync(); + await Task.Delay(1000).ConfigureAwait(false); + Assert.True(count == -1 || count >= 2, "subscribers"); + Assert.True(Interlocked.CompareExchange(ref total, 0, 0) >= 1, "total (1st)"); + + Interlocked.Exchange(ref total, 0); + + // and send a second time via a re-primary operation + var server = GetServer(senderConn); + if (server.IsReplica) Assert.Skip("didn't expect a replica"); + await server.MakePrimaryAsync(ReplicationChangeOptions.Broadcast); + await Task.Delay(1000).ConfigureAwait(false); + await GetServer(receiverConn).PingAsync(); + await GetServer(receiverConn).PingAsync(); + Assert.True(Interlocked.CompareExchange(ref total, 0, 0) >= 1, "total (2nd)"); + } + + [Fact] + public async Task DereplicateGoesToPrimary() + { + ConfigurationOptions config = GetPrimaryReplicaConfig(); + config.ConfigCheckSeconds = 5; + + await using var conn = await ConnectionMultiplexer.ConnectAsync(config); + + var primary = conn.GetServer(TestConfig.Current.FailoverPrimaryServerAndPort); + var secondary = conn.GetServer(TestConfig.Current.FailoverReplicaServerAndPort); + + await primary.PingAsync(); + await secondary.PingAsync(); + + await primary.MakePrimaryAsync(ReplicationChangeOptions.SetTiebreaker); + await secondary.MakePrimaryAsync(ReplicationChangeOptions.None); + + await Task.Delay(100).ConfigureAwait(false); + + await primary.PingAsync(); + await secondary.PingAsync(); + + using (var writer = new StringWriter()) + { + conn.Configure(writer); + string log = writer.ToString(); + Log(log); + bool isUnanimous = log.Contains("tie-break is unanimous at " + TestConfig.Current.FailoverPrimaryServerAndPort); + if (!isUnanimous) Assert.Skip("this is timing sensitive; unable to verify this time"); + } + + // k, so we know everyone loves 6379; is that what we get? + var db = conn.GetDatabase(); + RedisKey key = Me(); + + Assert.Equal(primary.EndPoint, db.IdentifyEndpoint(key, CommandFlags.PreferMaster)); + Assert.Equal(primary.EndPoint, db.IdentifyEndpoint(key, CommandFlags.DemandMaster)); + Assert.Equal(primary.EndPoint, db.IdentifyEndpoint(key, CommandFlags.PreferReplica)); + + var ex = Assert.Throws(() => db.IdentifyEndpoint(key, CommandFlags.DemandReplica)); + Assert.StartsWith("No connection is active/available to service this operation: EXISTS " + Me(), ex.Message); + Log("Invoking MakePrimaryAsync()..."); + await primary.MakePrimaryAsync(ReplicationChangeOptions.Broadcast | ReplicationChangeOptions.ReplicateToOtherEndpoints | ReplicationChangeOptions.SetTiebreaker, Writer); + Log("Finished MakePrimaryAsync() call."); + + await Task.Delay(100).ConfigureAwait(false); + + Log("Invoking Ping() (post-primary)"); + await primary.PingAsync(); + await secondary.PingAsync(); + Log("Finished Ping() (post-primary)"); + + Assert.True(primary.IsConnected, $"{primary.EndPoint} is not connected."); + Assert.True(secondary.IsConnected, $"{secondary.EndPoint} is not connected."); + + Log($"{primary.EndPoint}: {primary.ServerType}, Mode: {(primary.IsReplica ? "Replica" : "Primary")}"); + Log($"{secondary.EndPoint}: {secondary.ServerType}, Mode: {(secondary.IsReplica ? "Replica" : "Primary")}"); + + // Create a separate multiplexer with a valid view of the world to distinguish between failures of + // server topology changes from failures to recognize those changes + Log("Connecting to secondary validation connection."); + using (var conn2 = ConnectionMultiplexer.Connect(config)) + { + var primary2 = conn2.GetServer(TestConfig.Current.FailoverPrimaryServerAndPort); + var secondary2 = conn2.GetServer(TestConfig.Current.FailoverReplicaServerAndPort); + + Log($"Check: {primary2.EndPoint}: {primary2.ServerType}, Mode: {(primary2.IsReplica ? "Replica" : "Primary")}"); + Log($"Check: {secondary2.EndPoint}: {secondary2.ServerType}, Mode: {(secondary2.IsReplica ? "Replica" : "Primary")}"); + + Assert.False(primary2.IsReplica, $"{primary2.EndPoint} should be a primary (verification connection)."); + Assert.True(secondary2.IsReplica, $"{secondary2.EndPoint} should be a replica (verification connection)."); + + var db2 = conn2.GetDatabase(); + + Assert.Equal(primary2.EndPoint, db2.IdentifyEndpoint(key, CommandFlags.PreferMaster)); + Assert.Equal(primary2.EndPoint, db2.IdentifyEndpoint(key, CommandFlags.DemandMaster)); + Assert.Equal(secondary2.EndPoint, db2.IdentifyEndpoint(key, CommandFlags.PreferReplica)); + Assert.Equal(secondary2.EndPoint, db2.IdentifyEndpoint(key, CommandFlags.DemandReplica)); + } + + await UntilConditionAsync(TimeSpan.FromSeconds(20), () => !primary.IsReplica && secondary.IsReplica); + + Assert.False(primary.IsReplica, $"{primary.EndPoint} should be a primary."); + Assert.True(secondary.IsReplica, $"{secondary.EndPoint} should be a replica."); + + Assert.Equal(primary.EndPoint, db.IdentifyEndpoint(key, CommandFlags.PreferMaster)); + Assert.Equal(primary.EndPoint, db.IdentifyEndpoint(key, CommandFlags.DemandMaster)); + Assert.Equal(secondary.EndPoint, db.IdentifyEndpoint(key, CommandFlags.PreferReplica)); + Assert.Equal(secondary.EndPoint, db.IdentifyEndpoint(key, CommandFlags.DemandReplica)); + } + +#if DEBUG + [Fact] + public async Task SubscriptionsSurviveConnectionFailureAsync() + { + await using var conn = Create(allowAdmin: true, shared: false, log: Writer, syncTimeout: 1000); + + var profiler = conn.AddProfiler(); + RedisChannel channel = RedisChannel.Literal(Me()); + var sub = conn.GetSubscriber(); + int counter = 0; + Assert.True(sub.IsConnected()); + await sub.SubscribeAsync(channel, (arg1, arg2) => Interlocked.Increment(ref counter)).ConfigureAwait(false); + + var profile1 = Log(profiler); + + Assert.Equal(1, conn.GetSubscriptionsCount()); + + await Task.Delay(200).ConfigureAwait(false); + + await sub.PublishAsync(channel, "abc").ConfigureAwait(false); + await sub.PingAsync(); + await Task.Delay(200).ConfigureAwait(false); + + var counter1 = Volatile.Read(ref counter); + Log($"Expecting 1 message, got {counter1}"); + Assert.Equal(1, counter1); + + var server = GetServer(conn); + var socketCount = server.GetCounters().Subscription.SocketCount; + Log($"Expecting 1 socket, got {socketCount}"); + Assert.Equal(1, socketCount); + + // We might fail both connections or just the primary in the time period + SetExpectedAmbientFailureCount(-1); + + // Make sure we fail all the way + conn.AllowConnect = false; + Log("Failing connection"); + // Fail all connections + server.SimulateConnectionFailure(SimulatedFailureType.All); + // Trigger failure (RedisTimeoutException or RedisConnectionException because + // of backlog behavior) + Assert.False(sub.IsConnected(channel)); + + var ex = Assert.ThrowsAny(() => Log($"Ping: {sub.Ping(CommandFlags.DemandMaster)}ms")); + Assert.True(ex is RedisTimeoutException or RedisConnectionException); + Log($"Failed as expected: {ex.Message}"); + + // Now reconnect... + conn.AllowConnect = true; + Log("Waiting on reconnect"); + // Wait until we're reconnected + await UntilConditionAsync(TimeSpan.FromSeconds(10), () => sub.IsConnected(channel)); + Log("Reconnected"); + // Ensure we're reconnected + Assert.True(sub.IsConnected(channel)); + + // Ensure we've sent the subscribe command after reconnecting + var profile2 = Log(profiler); + // Assert.Equal(1, profile2.Count(p => p.Command == nameof(RedisCommand.SUBSCRIBE))); + Log("Issuing ping after reconnected"); + await sub.PingAsync(); + + var muxerSubCount = conn.GetSubscriptionsCount(); + Log($"Muxer thinks we have {muxerSubCount} subscriber(s)."); + Assert.Equal(1, muxerSubCount); + + var muxerSubs = conn.GetSubscriptions(); + foreach (var pair in muxerSubs) + { + var muxerSub = pair.Value; + Log($" Muxer Sub: {pair.Key}: (EndPoint: {muxerSub.GetAnyCurrentServer()}, Connected: {muxerSub.IsConnectedAny()})"); + } + + Log("Publishing"); + var published = await sub.PublishAsync(channel, "abc").ConfigureAwait(false); + + Log($"Published to {published} subscriber(s)."); + Assert.Equal(1, published); + + // Give it a few seconds to get our messages + Log("Waiting for 2 messages"); + await UntilConditionAsync(TimeSpan.FromSeconds(5), () => Volatile.Read(ref counter) == 2); + + var counter2 = Volatile.Read(ref counter); + Log($"Expecting 2 messages, got {counter2}"); + Assert.Equal(2, counter2); + + // Log all commands at the end + Log("All commands since connecting:"); + var profile3 = profiler.FinishProfiling(); + foreach (var command in profile3) + { + Log($"{command.EndPoint}: {command}"); + } + } + + [Fact] + public async Task SubscriptionsSurvivePrimarySwitchAsync() + { + static void TopologyFail() => Assert.Skip("Replication topology change failed...and that's both inconsistent and not what we're testing."); + + await using var aConn = Create(allowAdmin: true, shared: false); + await using var bConn = Create(allowAdmin: true, shared: false); + + RedisChannel channel = RedisChannel.Literal(Me()); + Log("Using Channel: " + channel); + var subA = aConn.GetSubscriber(); + var subB = bConn.GetSubscriber(); + + long primaryChanged = 0, aCount = 0, bCount = 0; + aConn.ConfigurationChangedBroadcast += (s, args) => Log("A noticed config broadcast: " + Interlocked.Increment(ref primaryChanged) + " (Endpoint:" + args.EndPoint + ")"); + bConn.ConfigurationChangedBroadcast += (s, args) => Log("B noticed config broadcast: " + Interlocked.Increment(ref primaryChanged) + " (Endpoint:" + args.EndPoint + ")"); + subA.Subscribe(channel, (_, message) => + { + Log("A got message: " + message); + Interlocked.Increment(ref aCount); + }); + subB.Subscribe(channel, (_, message) => + { + Log("B got message: " + message); + Interlocked.Increment(ref bCount); + }); + + Assert.False(aConn.GetServer(TestConfig.Current.FailoverPrimaryServerAndPort).IsReplica, $"A Connection: {TestConfig.Current.FailoverPrimaryServerAndPort} should be a primary"); + if (!aConn.GetServer(TestConfig.Current.FailoverReplicaServerAndPort).IsReplica) + { + TopologyFail(); + } + Assert.True(aConn.GetServer(TestConfig.Current.FailoverReplicaServerAndPort).IsReplica, $"A Connection: {TestConfig.Current.FailoverReplicaServerAndPort} should be a replica"); + Assert.False(bConn.GetServer(TestConfig.Current.FailoverPrimaryServerAndPort).IsReplica, $"B Connection: {TestConfig.Current.FailoverPrimaryServerAndPort} should be a primary"); + Assert.True(bConn.GetServer(TestConfig.Current.FailoverReplicaServerAndPort).IsReplica, $"B Connection: {TestConfig.Current.FailoverReplicaServerAndPort} should be a replica"); + + Log("Failover 1 Complete"); + var epA = subA.SubscribedEndpoint(channel); + var epB = subB.SubscribedEndpoint(channel); + Log(" A: " + EndPointCollection.ToString(epA)); + Log(" B: " + EndPointCollection.ToString(epB)); + subA.Publish(channel, "A1"); + subB.Publish(channel, "B1"); + Log(" SubA ping: " + subA.Ping()); + Log(" SubB ping: " + subB.Ping()); + // If redis is under load due to this suite, it may take a moment to send across. + await UntilConditionAsync(TimeSpan.FromSeconds(5), () => Interlocked.Read(ref aCount) == 2 && Interlocked.Read(ref bCount) == 2).ForAwait(); + + Assert.Equal(2, Interlocked.Read(ref aCount)); + Assert.Equal(2, Interlocked.Read(ref bCount)); + Assert.Equal(0, Interlocked.Read(ref primaryChanged)); + + try + { + Interlocked.Exchange(ref primaryChanged, 0); + Interlocked.Exchange(ref aCount, 0); + Interlocked.Exchange(ref bCount, 0); + Log("Changing primary..."); + using (var sw = new StringWriter()) + { + await aConn.GetServer(TestConfig.Current.FailoverReplicaServerAndPort).MakePrimaryAsync(ReplicationChangeOptions.All, sw); + Log(sw.ToString()); + } + Log("Waiting for connection B to detect..."); + await UntilConditionAsync(TimeSpan.FromSeconds(10), () => bConn.GetServer(TestConfig.Current.FailoverPrimaryServerAndPort).IsReplica).ForAwait(); + await subA.PingAsync(); + await subB.PingAsync(); + Log("Failover 2 Attempted. Pausing..."); + Log(" A " + TestConfig.Current.FailoverPrimaryServerAndPort + " status: " + (aConn.GetServer(TestConfig.Current.FailoverPrimaryServerAndPort).IsReplica ? "Replica" : "Primary")); + Log(" A " + TestConfig.Current.FailoverReplicaServerAndPort + " status: " + (aConn.GetServer(TestConfig.Current.FailoverReplicaServerAndPort).IsReplica ? "Replica" : "Primary")); + Log(" B " + TestConfig.Current.FailoverPrimaryServerAndPort + " status: " + (bConn.GetServer(TestConfig.Current.FailoverPrimaryServerAndPort).IsReplica ? "Replica" : "Primary")); + Log(" B " + TestConfig.Current.FailoverReplicaServerAndPort + " status: " + (bConn.GetServer(TestConfig.Current.FailoverReplicaServerAndPort).IsReplica ? "Replica" : "Primary")); + + if (!aConn.GetServer(TestConfig.Current.FailoverPrimaryServerAndPort).IsReplica) + { + TopologyFail(); + } + Log("Failover 2 Complete."); + + Assert.True(aConn.GetServer(TestConfig.Current.FailoverPrimaryServerAndPort).IsReplica, $"A Connection: {TestConfig.Current.FailoverPrimaryServerAndPort} should be a replica"); + Assert.False(aConn.GetServer(TestConfig.Current.FailoverReplicaServerAndPort).IsReplica, $"A Connection: {TestConfig.Current.FailoverReplicaServerAndPort} should be a primary"); + await UntilConditionAsync(TimeSpan.FromSeconds(10), () => bConn.GetServer(TestConfig.Current.FailoverPrimaryServerAndPort).IsReplica).ForAwait(); + var sanityCheck = bConn.GetServer(TestConfig.Current.FailoverPrimaryServerAndPort).IsReplica; + if (!sanityCheck) + { + Log("FAILURE: B has not detected the topology change."); + foreach (var server in bConn.GetServerSnapshot().ToArray()) + { + Log(" Server: " + server.EndPoint); + Log(" State (Interactive): " + server.InteractiveConnectionState); + Log(" State (Subscription): " + server.SubscriptionConnectionState); + Log(" IsReplica: " + !server.IsReplica); + Log(" Type: " + server.ServerType); + } + // Assert.Skip("Not enough latency."); + } + Assert.True(sanityCheck, $"B Connection: {TestConfig.Current.FailoverPrimaryServerAndPort} should be a replica"); + Assert.False(bConn.GetServer(TestConfig.Current.FailoverReplicaServerAndPort).IsReplica, $"B Connection: {TestConfig.Current.FailoverReplicaServerAndPort} should be a primary"); + + Log("Pause complete"); + Log(" A outstanding: " + aConn.GetCounters().TotalOutstanding); + Log(" B outstanding: " + bConn.GetCounters().TotalOutstanding); + await subA.PingAsync(); + await subB.PingAsync(); + await Task.Delay(5000).ForAwait(); + epA = subA.SubscribedEndpoint(channel); + epB = subB.SubscribedEndpoint(channel); + Log("Subscription complete"); + Log(" A: " + EndPointCollection.ToString(epA)); + Log(" B: " + EndPointCollection.ToString(epB)); + var aSentTo = subA.Publish(channel, "A2"); + var bSentTo = subB.Publish(channel, "B2"); + Log(" A2 sent to: " + aSentTo); + Log(" B2 sent to: " + bSentTo); + await subA.PingAsync(); + await subB.PingAsync(); + Log("Ping Complete. Checking..."); + await UntilConditionAsync(TimeSpan.FromSeconds(10), () => Interlocked.Read(ref aCount) == 2 && Interlocked.Read(ref bCount) == 2).ForAwait(); + + Log("Counts so far:"); + Log(" aCount: " + Interlocked.Read(ref aCount)); + Log(" bCount: " + Interlocked.Read(ref bCount)); + Log(" primaryChanged: " + Interlocked.Read(ref primaryChanged)); + + Assert.Equal(2, Interlocked.Read(ref aCount)); + Assert.Equal(2, Interlocked.Read(ref bCount)); + // Expect 12, because a sees a, but b sees a and b due to replication, but contenders may add their own + Assert.True(Interlocked.CompareExchange(ref primaryChanged, 0, 0) >= 12); + } + catch + { + Log(""); + Log("ERROR: Something went bad - see above! Roooooolling back. Back it up. Baaaaaack it on up."); + Log(""); + throw; + } + finally + { + Log("Restoring configuration..."); + try + { + await aConn.GetServer(TestConfig.Current.FailoverPrimaryServerAndPort).MakePrimaryAsync(ReplicationChangeOptions.All); + await Task.Delay(1000).ForAwait(); + } + catch { /* Don't bomb here */ } + } + } +#endif +} +#endif diff --git a/tests/StackExchange.Redis.Tests/FeatureFlagTests.cs b/tests/StackExchange.Redis.Tests/FeatureFlagTests.cs new file mode 100644 index 000000000..bf5aacc13 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/FeatureFlagTests.cs @@ -0,0 +1,25 @@ +using Xunit; + +namespace StackExchange.Redis.Tests; + +[Collection(NonParallelCollection.Name)] +public class FeatureFlagTests +{ + [Fact] + public void UnknownFlagToggle() + { + Assert.False(ConnectionMultiplexer.GetFeatureFlag("nope")); + ConnectionMultiplexer.SetFeatureFlag("nope", true); + Assert.False(ConnectionMultiplexer.GetFeatureFlag("nope")); + } + + [Fact] + public void KnownFlagToggle() + { + Assert.False(ConnectionMultiplexer.GetFeatureFlag("preventthreadtheft")); + ConnectionMultiplexer.SetFeatureFlag("preventthreadtheft", true); + Assert.True(ConnectionMultiplexer.GetFeatureFlag("preventthreadtheft")); + ConnectionMultiplexer.SetFeatureFlag("preventthreadtheft", false); + Assert.False(ConnectionMultiplexer.GetFeatureFlag("preventthreadtheft")); + } +} diff --git a/tests/StackExchange.Redis.Tests/FeatureFlags.cs b/tests/StackExchange.Redis.Tests/FeatureFlags.cs deleted file mode 100644 index 5d0b66d19..000000000 --- a/tests/StackExchange.Redis.Tests/FeatureFlags.cs +++ /dev/null @@ -1,26 +0,0 @@ -using Xunit; - -namespace StackExchange.Redis.Tests -{ - [Collection(NonParallelCollection.Name)] - public class FeatureFlags - { - [Fact] - public void UnknownFlagToggle() - { - Assert.False(ConnectionMultiplexer.GetFeatureFlag("nope")); - ConnectionMultiplexer.SetFeatureFlag("nope", true); - Assert.False(ConnectionMultiplexer.GetFeatureFlag("nope")); - } - - [Fact] - public void KnownFlagToggle() - { - Assert.False(ConnectionMultiplexer.GetFeatureFlag("preventthreadtheft")); - ConnectionMultiplexer.SetFeatureFlag("preventthreadtheft", true); - Assert.True(ConnectionMultiplexer.GetFeatureFlag("preventthreadtheft")); - ConnectionMultiplexer.SetFeatureFlag("preventthreadtheft", false); - Assert.False(ConnectionMultiplexer.GetFeatureFlag("preventthreadtheft")); - } - } -} diff --git a/tests/StackExchange.Redis.Tests/FloatingPoint.cs b/tests/StackExchange.Redis.Tests/FloatingPoint.cs deleted file mode 100644 index 7f5df8ea4..000000000 --- a/tests/StackExchange.Redis.Tests/FloatingPoint.cs +++ /dev/null @@ -1,168 +0,0 @@ -using System; -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - [Collection(SharedConnectionFixture.Key)] - public class FloatingPoint : TestBase - { - public FloatingPoint(ITestOutputHelper output, SharedConnectionFixture fixture) : base (output, fixture) { } - - private static bool Within(double x, double y, double delta) - { - return Math.Abs(x - y) <= delta; - } - - [Fact] - public void IncrDecrFloatingPoint() - { - using (var conn = Create()) - { - var db = conn.GetDatabase(); - RedisKey key = Me(); - db.KeyDelete(key, CommandFlags.FireAndForget); - double[] incr = - { - 12.134, - -14561.0000002, - 125.3421, - -2.49892498 - }, decr = - { - 99.312, - 12, - -35 - }; - double sum = 0; - foreach (var value in incr) - { - db.StringIncrement(key, value, CommandFlags.FireAndForget); - sum += value; - } - foreach (var value in decr) - { - db.StringDecrement(key, value, CommandFlags.FireAndForget); - sum -= value; - } - var val = (double)db.StringGet(key); - - Assert.True(Within(sum, val, 0.0001)); - } - } - - [Fact] - public async Task IncrDecrFloatingPointAsync() - { - using (var conn = Create()) - { - var db = conn.GetDatabase(); - RedisKey key = Me(); - db.KeyDelete(key, CommandFlags.FireAndForget); - double[] incr = - { - 12.134, - -14561.0000002, - 125.3421, - -2.49892498 - }, decr = - { - 99.312, - 12, - -35 - }; - double sum = 0; - foreach (var value in incr) - { - await db.StringIncrementAsync(key, value).ForAwait(); - sum += value; - } - foreach (var value in decr) - { - await db.StringDecrementAsync(key, value).ForAwait(); - sum -= value; - } - var val = (double)await db.StringGetAsync(key).ForAwait(); - - Assert.True(Within(sum, val, 0.0001)); - } - } - - [Fact] - public void HashIncrDecrFloatingPoint() - { - using (var conn = Create()) - { - var db = conn.GetDatabase(); - RedisKey key = Me(); - RedisValue field = "foo"; - db.KeyDelete(key, CommandFlags.FireAndForget); - double[] incr = - { - 12.134, - -14561.0000002, - 125.3421, - -2.49892498 - }, decr = - { - 99.312, - 12, - -35 - }; - double sum = 0; - foreach (var value in incr) - { - db.HashIncrement(key, field, value, CommandFlags.FireAndForget); - sum += value; - } - foreach (var value in decr) - { - db.HashDecrement(key, field, value, CommandFlags.FireAndForget); - sum -= value; - } - var val = (double)db.HashGet(key, field); - - Assert.True(Within(sum, val, 0.0001), $"{sum} not within 0.0001 of {val}"); - } - } - - [Fact] - public async Task HashIncrDecrFloatingPointAsync() - { - using (var conn = Create()) - { - var db = conn.GetDatabase(); - RedisKey key = Me(); - RedisValue field = "bar"; - db.KeyDelete(key, CommandFlags.FireAndForget); - double[] incr = - { - 12.134, - -14561.0000002, - 125.3421, - -2.49892498 - }, decr = - { - 99.312, - 12, - -35 - }; - double sum = 0; - foreach (var value in incr) - { - _ = db.HashIncrementAsync(key, field, value); - sum += value; - } - foreach (var value in decr) - { - _ = db.HashDecrementAsync(key, field, value); - sum -= value; - } - var val = (double)await db.HashGetAsync(key, field).ForAwait(); - - Assert.True(Within(sum, val, 0.0001)); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/FloatingPointTests.cs b/tests/StackExchange.Redis.Tests/FloatingPointTests.cs new file mode 100644 index 000000000..9b4b2bd7e --- /dev/null +++ b/tests/StackExchange.Redis.Tests/FloatingPointTests.cs @@ -0,0 +1,160 @@ +using System; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class FloatingPointTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + private static bool Within(double x, double y, double delta) => Math.Abs(x - y) <= delta; + + [Fact] + public async Task IncrDecrFloatingPoint() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + RedisKey key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + double[] incr = + [ + 12.134, + -14561.0000002, + 125.3421, + -2.49892498, + ], + decr = + [ + 99.312, + 12, + -35, + ]; + double sum = 0; + foreach (var value in incr) + { + db.StringIncrement(key, value, CommandFlags.FireAndForget); + sum += value; + } + foreach (var value in decr) + { + db.StringDecrement(key, value, CommandFlags.FireAndForget); + sum -= value; + } + var val = (double)db.StringGet(key); + + Assert.True(Within(sum, val, 0.0001)); + } + + [Fact] + public async Task IncrDecrFloatingPointAsync() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + RedisKey key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + double[] incr = + [ + 12.134, + -14561.0000002, + 125.3421, + -2.49892498, + ], + decr = + [ + 99.312, + 12, + -35, + ]; + double sum = 0; + foreach (var value in incr) + { + await db.StringIncrementAsync(key, value).ForAwait(); + sum += value; + } + foreach (var value in decr) + { + await db.StringDecrementAsync(key, value).ForAwait(); + sum -= value; + } + var val = (double)await db.StringGetAsync(key).ForAwait(); + + Assert.True(Within(sum, val, 0.0001)); + } + + [Fact] + public async Task HashIncrDecrFloatingPoint() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + RedisKey key = Me(); + RedisValue field = "foo"; + db.KeyDelete(key, CommandFlags.FireAndForget); + double[] incr = + [ + 12.134, + -14561.0000002, + 125.3421, + -2.49892498, + ], + decr = + [ + 99.312, + 12, + -35, + ]; + double sum = 0; + foreach (var value in incr) + { + db.HashIncrement(key, field, value, CommandFlags.FireAndForget); + sum += value; + } + foreach (var value in decr) + { + db.HashDecrement(key, field, value, CommandFlags.FireAndForget); + sum -= value; + } + var val = (double)db.HashGet(key, field); + + Assert.True(Within(sum, val, 0.0001), $"{sum} not within 0.0001 of {val}"); + } + + [Fact] + public async Task HashIncrDecrFloatingPointAsync() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + RedisKey key = Me(); + RedisValue field = "bar"; + db.KeyDelete(key, CommandFlags.FireAndForget); + double[] incr = + [ + 12.134, + -14561.0000002, + 125.3421, + -2.49892498, + ], + decr = + [ + 99.312, + 12, + -35, + ]; + double sum = 0; + foreach (var value in incr) + { + _ = db.HashIncrementAsync(key, field, value); + sum += value; + } + foreach (var value in decr) + { + _ = db.HashDecrementAsync(key, field, value); + sum -= value; + } + var val = (double)await db.HashGetAsync(key, field).ForAwait(); + + Assert.True(Within(sum, val, 0.0001)); + } +} diff --git a/tests/StackExchange.Redis.Tests/FormatTests.cs b/tests/StackExchange.Redis.Tests/FormatTests.cs index 02f532698..0054ce11d 100644 --- a/tests/StackExchange.Redis.Tests/FormatTests.cs +++ b/tests/StackExchange.Redis.Tests/FormatTests.cs @@ -1,82 +1,177 @@ -using System.Collections.Generic; +using System; +using System.Collections.Generic; using System.Net; +using System.Text; using Xunit; -using Xunit.Abstractions; -namespace StackExchange.Redis.Tests +namespace StackExchange.Redis.Tests; + +public class FormatTests(ITestOutputHelper output) : TestBase(output) { - public class FormatTests : TestBase + public static IEnumerable EndpointData() + { + // note: the 3rd arg is for formatting; null means "expect the original string" + + // DNS + yield return new object?[] { "localhost", new DnsEndPoint("localhost", 0), null }; + yield return new object?[] { "localhost:6390", new DnsEndPoint("localhost", 6390), null }; + yield return new object?[] { "bob.the.builder.com", new DnsEndPoint("bob.the.builder.com", 0), null }; + yield return new object?[] { "bob.the.builder.com:6390", new DnsEndPoint("bob.the.builder.com", 6390), null }; + // IPv4 + yield return new object?[] { "0.0.0.0", new IPEndPoint(IPAddress.Parse("0.0.0.0"), 0), null }; + yield return new object?[] { "127.0.0.1", new IPEndPoint(IPAddress.Parse("127.0.0.1"), 0), null }; + yield return new object?[] { "127.1", new IPEndPoint(IPAddress.Parse("127.1"), 0), "127.0.0.1" }; + yield return new object?[] { "127.1:6389", new IPEndPoint(IPAddress.Parse("127.1"), 6389), "127.0.0.1:6389" }; + yield return new object?[] { "127.0.0.1:6389", new IPEndPoint(IPAddress.Parse("127.0.0.1"), 6389), null }; + yield return new object?[] { "127.0.0.1:1", new IPEndPoint(IPAddress.Parse("127.0.0.1"), 1), null }; + yield return new object?[] { "127.0.0.1:2", new IPEndPoint(IPAddress.Parse("127.0.0.1"), 2), null }; + yield return new object?[] { "10.10.9.18:2", new IPEndPoint(IPAddress.Parse("10.10.9.18"), 2), null }; + // IPv6 + yield return new object?[] { "::1", new IPEndPoint(IPAddress.Parse("::1"), 0), null }; + yield return new object?[] { "::1:6379", new IPEndPoint(IPAddress.Parse("::0.1.99.121"), 0), "::0.1.99.121" }; // remember your brackets! + yield return new object?[] { "[::1]:6379", new IPEndPoint(IPAddress.Parse("::1"), 6379), null }; + yield return new object?[] { "[::1]", new IPEndPoint(IPAddress.Parse("::1"), 0), "::1" }; + yield return new object?[] { "[::1]:1000", new IPEndPoint(IPAddress.Parse("::1"), 1000), null }; + yield return new object?[] { "2001:db7:85a3:8d2:1319:8a2e:370:7348", new IPEndPoint(IPAddress.Parse("2001:db7:85a3:8d2:1319:8a2e:370:7348"), 0), null }; + yield return new object?[] { "[2001:db7:85a3:8d2:1319:8a2e:370:7348]", new IPEndPoint(IPAddress.Parse("2001:db7:85a3:8d2:1319:8a2e:370:7348"), 0), "2001:db7:85a3:8d2:1319:8a2e:370:7348" }; + yield return new object?[] { "[2001:db7:85a3:8d2:1319:8a2e:370:7348]:1000", new IPEndPoint(IPAddress.Parse("2001:db7:85a3:8d2:1319:8a2e:370:7348"), 1000), null }; + } + + [Theory] + [MemberData(nameof(EndpointData))] + public void ParseEndPoint(string data, EndPoint expected, string? expectedFormat) + { + Assert.True(Format.TryParseEndPoint(data, out var result)); + Assert.Equal(expected, result); + + // and write again + var s = Format.ToString(result); + expectedFormat ??= data; + Assert.Equal(expectedFormat, s); + } + + [Theory] + [InlineData(CommandFlags.None, "None")] +#if NETFRAMEWORK + [InlineData(CommandFlags.PreferReplica, "PreferMaster, PreferReplica")] // 2-bit flag is hit-and-miss + [InlineData(CommandFlags.DemandReplica, "PreferMaster, DemandReplica")] // 2-bit flag is hit-and-miss +#else + [InlineData(CommandFlags.PreferReplica, "PreferReplica")] // 2-bit flag is hit-and-miss + [InlineData(CommandFlags.DemandReplica, "DemandReplica")] // 2-bit flag is hit-and-miss +#endif + +#if NET8_0_OR_GREATER + [InlineData(CommandFlags.PreferReplica | CommandFlags.FireAndForget, "FireAndForget, PreferReplica")] // 2-bit flag is hit-and-miss + [InlineData(CommandFlags.DemandReplica | CommandFlags.FireAndForget, "FireAndForget, DemandReplica")] // 2-bit flag is hit-and-miss +#else + [InlineData(CommandFlags.PreferReplica | CommandFlags.FireAndForget, "PreferMaster, FireAndForget, PreferReplica")] // 2-bit flag is hit-and-miss + [InlineData(CommandFlags.DemandReplica | CommandFlags.FireAndForget, "PreferMaster, FireAndForget, DemandReplica")] // 2-bit flag is hit-and-miss +#endif + public void CommandFlagsFormatting(CommandFlags value, string expected) { - public FormatTests(ITestOutputHelper output) : base(output) { } + Assert.SkipWhen(Runtime.IsMono, "Mono has different enum flag behavior"); + Assert.Equal(expected, value.ToString()); + } + + [Theory] + [InlineData(ClientType.Normal, "Normal")] + [InlineData(ClientType.Replica, "Replica")] + [InlineData(ClientType.PubSub, "PubSub")] + public void ClientTypeFormatting(ClientType value, string expected) + => Assert.Equal(expected, value.ToString()); - public static IEnumerable EndpointData() + [Theory] + [InlineData(ClientFlags.None, "None")] + [InlineData(ClientFlags.Replica | ClientFlags.Transaction, "Replica, Transaction")] + [InlineData(ClientFlags.Transaction | ClientFlags.ReplicaMonitor | ClientFlags.UnixDomainSocket, "ReplicaMonitor, Transaction, UnixDomainSocket")] + public void ClientFlagsFormatting(ClientFlags value, string expected) + => Assert.Equal(expected, value.ToString()); + + [Theory] + [InlineData(ReplicationChangeOptions.None, "None")] + [InlineData(ReplicationChangeOptions.ReplicateToOtherEndpoints, "ReplicateToOtherEndpoints")] + [InlineData(ReplicationChangeOptions.SetTiebreaker | ReplicationChangeOptions.ReplicateToOtherEndpoints, "SetTiebreaker, ReplicateToOtherEndpoints")] + [InlineData(ReplicationChangeOptions.Broadcast | ReplicationChangeOptions.SetTiebreaker | ReplicationChangeOptions.ReplicateToOtherEndpoints, "All")] + public void ReplicationChangeOptionsFormatting(ReplicationChangeOptions value, string expected) + => Assert.Equal(expected, value.ToString()); + + [Theory] + [InlineData(0, "0")] + [InlineData(1, "1")] + [InlineData(-1, "-1")] + [InlineData(100, "100")] + [InlineData(-100, "-100")] + [InlineData(int.MaxValue, "2147483647")] + [InlineData(int.MinValue, "-2147483648")] + public unsafe void FormatInt32(int value, string expectedValue) + { + Span dest = stackalloc byte[expectedValue.Length]; + Assert.Equal(expectedValue.Length, Format.FormatInt32(value, dest)); + fixed (byte* s = dest) { - // DNS - yield return new object[] { "localhost", new DnsEndPoint("localhost", 0) }; - yield return new object[] { "localhost:6390", new DnsEndPoint("localhost", 6390) }; - yield return new object[] { "bob.the.builder.com", new DnsEndPoint("bob.the.builder.com", 0) }; - yield return new object[] { "bob.the.builder.com:6390", new DnsEndPoint("bob.the.builder.com", 6390) }; - // IPv4 - yield return new object[] { "0.0.0.0", new IPEndPoint(IPAddress.Parse("0.0.0.0"), 0) }; - yield return new object[] { "127.0.0.1", new IPEndPoint(IPAddress.Parse("127.0.0.1"), 0) }; - yield return new object[] { "127.1", new IPEndPoint(IPAddress.Parse("127.1"), 0) }; - yield return new object[] { "127.1:6389", new IPEndPoint(IPAddress.Parse("127.1"), 6389) }; - yield return new object[] { "127.0.0.1:6389", new IPEndPoint(IPAddress.Parse("127.0.0.1"), 6389) }; - yield return new object[] { "127.0.0.1:1", new IPEndPoint(IPAddress.Parse("127.0.0.1"), 1) }; - yield return new object[] { "127.0.0.1:2", new IPEndPoint(IPAddress.Parse("127.0.0.1"), 2) }; - yield return new object[] { "10.10.9.18:2", new IPEndPoint(IPAddress.Parse("10.10.9.18"), 2) }; - // IPv6 - yield return new object[] { "::1", new IPEndPoint(IPAddress.Parse("::1"), 0) }; - yield return new object[] { "::1:6379", new IPEndPoint(IPAddress.Parse("::0.1.99.121"), 0) }; // remember your brackets! - yield return new object[] { "[::1]:6379", new IPEndPoint(IPAddress.Parse("::1"), 6379) }; - yield return new object[] { "[::1]", new IPEndPoint(IPAddress.Parse("::1"), 0) }; - yield return new object[] { "[::1]:1000", new IPEndPoint(IPAddress.Parse("::1"), 1000) }; - yield return new object[] { "[2001:db7:85a3:8d2:1319:8a2e:370:7348]", new IPEndPoint(IPAddress.Parse("2001:db7:85a3:8d2:1319:8a2e:370:7348"), 0) }; - yield return new object[] { "[2001:db7:85a3:8d2:1319:8a2e:370:7348]:1000", new IPEndPoint(IPAddress.Parse("2001:db7:85a3:8d2:1319:8a2e:370:7348"), 1000) }; + Assert.Equal(expectedValue, Encoding.ASCII.GetString(s, expectedValue.Length)); } + } - [Theory] - [MemberData(nameof(EndpointData))] - public void ParseEndPoint(string data, EndPoint expected) + [Theory] + [InlineData(0, "0")] + [InlineData(1, "1")] + [InlineData(-1, "-1")] + [InlineData(100, "100")] + [InlineData(-100, "-100")] + [InlineData(long.MaxValue, "9223372036854775807")] + [InlineData(long.MinValue, "-9223372036854775808")] + public unsafe void FormatInt64(long value, string expectedValue) + { + Assert.Equal(expectedValue.Length, Format.MeasureInt64(value)); + Span dest = stackalloc byte[expectedValue.Length]; + Assert.Equal(expectedValue.Length, Format.FormatInt64(value, dest)); + fixed (byte* s = dest) { - var result = Format.TryParseEndPoint(data); - Assert.Equal(expected, result); + Assert.Equal(expectedValue, Encoding.ASCII.GetString(s, expectedValue.Length)); } + } - [Theory] - [InlineData(CommandFlags.None, "None")] -#if NET472 - [InlineData(CommandFlags.PreferReplica, "PreferMaster, PreferReplica")] // 2-bit flag is hit-and-miss - [InlineData(CommandFlags.DemandReplica, "PreferMaster, DemandReplica")] // 2-bit flag is hit-and-miss -#else - [InlineData(CommandFlags.PreferReplica, "PreferReplica")] // 2-bit flag is hit-and-miss - [InlineData(CommandFlags.DemandReplica, "DemandReplica")] // 2-bit flag is hit-and-miss -#endif - [InlineData(CommandFlags.PreferReplica | CommandFlags.FireAndForget, "PreferMaster, FireAndForget, PreferReplica")] // 2-bit flag is hit-and-miss - [InlineData(CommandFlags.DemandReplica | CommandFlags.FireAndForget, "PreferMaster, FireAndForget, DemandReplica")] // 2-bit flag is hit-and-miss - public void CommandFlagsFormatting(CommandFlags value, string expected) - => Assert.Equal(expected, value.ToString()); - - [Theory] - [InlineData(ClientType.Normal, "Normal")] - [InlineData(ClientType.Replica, "Replica")] - [InlineData(ClientType.PubSub, "PubSub")] - public void ClientTypeFormatting(ClientType value, string expected) - => Assert.Equal(expected, value.ToString()); + [Theory] + [InlineData(0, "0")] + [InlineData(1, "1")] + [InlineData(100, "100")] + [InlineData(ulong.MaxValue, "18446744073709551615")] + public unsafe void FormatUInt64(ulong value, string expectedValue) + { + Assert.Equal(expectedValue.Length, Format.MeasureUInt64(value)); + Span dest = stackalloc byte[expectedValue.Length]; + Assert.Equal(expectedValue.Length, Format.FormatUInt64(value, dest)); + fixed (byte* s = dest) + { + Assert.Equal(expectedValue, Encoding.ASCII.GetString(s, expectedValue.Length)); + } + } - [Theory] - [InlineData(ClientFlags.None, "None")] - [InlineData(ClientFlags.Replica | ClientFlags.Transaction, "Replica, Transaction")] - [InlineData(ClientFlags.Transaction | ClientFlags.ReplicaMonitor | ClientFlags.UnixDomainSocket, "ReplicaMonitor, Transaction, UnixDomainSocket")] - public void ClientFlagsFormatting(ClientFlags value, string expected) - => Assert.Equal(expected, value.ToString()); + [Theory] + [InlineData(0, "0")] + [InlineData(1, "1")] + [InlineData(-1, "-1")] + [InlineData(0.5, "0.5")] + [InlineData(0.50001, "0.50000999999999995")] + [InlineData(Math.PI, "3.1415926535897931")] + [InlineData(100, "100")] + [InlineData(-100, "-100")] + [InlineData(double.MaxValue, "1.7976931348623157E+308")] + [InlineData(double.MinValue, "-1.7976931348623157E+308")] + [InlineData(double.Epsilon, "4.9406564584124654E-324")] + [InlineData(double.PositiveInfinity, "+inf")] + [InlineData(double.NegativeInfinity, "-inf")] + [InlineData(double.NaN, "NaN")] // never used in normal code - [Theory] - [InlineData(ReplicationChangeOptions.None, "None")] - [InlineData(ReplicationChangeOptions.ReplicateToOtherEndpoints, "ReplicateToOtherEndpoints")] - [InlineData(ReplicationChangeOptions.SetTiebreaker | ReplicationChangeOptions.ReplicateToOtherEndpoints, "SetTiebreaker, ReplicateToOtherEndpoints")] - [InlineData(ReplicationChangeOptions.Broadcast | ReplicationChangeOptions.SetTiebreaker | ReplicationChangeOptions.ReplicateToOtherEndpoints, "All")] - public void ReplicationChangeOptionsFormatting(ReplicationChangeOptions value, string expected) - => Assert.Equal(expected, value.ToString()); + public unsafe void FormatDouble(double value, string expectedValue) + { + Assert.Equal(expectedValue.Length, Format.MeasureDouble(value)); + Span dest = stackalloc byte[expectedValue.Length]; + Assert.Equal(expectedValue.Length, Format.FormatDouble(value, dest)); + fixed (byte* s = dest) + { + Assert.Equal(expectedValue, Encoding.ASCII.GetString(s, expectedValue.Length)); + } } } diff --git a/tests/StackExchange.Redis.Tests/GarbageCollectionTests.cs b/tests/StackExchange.Redis.Tests/GarbageCollectionTests.cs index c9af996d1..ef28ed6e9 100644 --- a/tests/StackExchange.Redis.Tests/GarbageCollectionTests.cs +++ b/tests/StackExchange.Redis.Tests/GarbageCollectionTests.cs @@ -2,56 +2,102 @@ using System.Threading; using System.Threading.Tasks; using Xunit; -using Xunit.Abstractions; -namespace StackExchange.Redis.Tests +namespace StackExchange.Redis.Tests; + +[Collection(NonParallelCollection.Name)] // because I need to measure some things that could get confused +public class GarbageCollectionTests(ITestOutputHelper helper) : TestBase(helper) { - [Collection(NonParallelCollection.Name)] // because I need to measure some things that could get confused - public class GarbageCollectionTests : TestBase + private static void ForceGC() { - public GarbageCollectionTests(ITestOutputHelper helper) : base(helper) { } - - private static void ForceGC() + for (int i = 0; i < 3; i++) { - for (int i = 0; i < 3; i++) - { - GC.Collect(GC.MaxGeneration, GCCollectionMode.Forced); - GC.WaitForPendingFinalizers(); - } + GC.Collect(GC.MaxGeneration, GCCollectionMode.Forced); + GC.WaitForPendingFinalizers(); } + } - [Fact(Skip = "needs investigation on netcoreapp3.1")] - public async Task MuxerIsCollected() - { + [Fact] + public async Task MuxerIsCollected() + { #if DEBUG - Skip.Inconclusive("Only predictable in release builds"); + Assert.Skip("Only predictable in release builds"); #endif - // this is more nuanced than it looks; multiple sockets with - // async callbacks, plus a heartbeat on a timer + // this is more nuanced than it looks; multiple sockets with + // async callbacks, plus a heartbeat on a timer - // deliberately not "using" - we *want* to leak this - var muxer = Create(); - muxer.GetDatabase().Ping(); // smoke-test + // deliberately not "using" - we *want* to leak this + var conn = Create(); + await conn.GetDatabase().PingAsync(); // smoke-test - ForceGC(); + ForceGC(); -//#if DEBUG // this counter only exists in debug +// #if DEBUG // this counter only exists in debug // int before = ConnectionMultiplexer.CollectedWithoutDispose; -//#endif - var wr = new WeakReference(muxer); - muxer = null; +// #endif + var wr = new WeakReference(conn); + conn = null; - ForceGC(); - await Task.Delay(2000).ForAwait(); // GC is twitchy - ForceGC(); + ForceGC(); + await Task.Delay(2000).ForAwait(); // GC is twitchy + ForceGC(); - // should be collectable - Assert.Null(wr.Target); + // should be collectable + Assert.Null(wr.Target); -//#if DEBUG // this counter only exists in debug +// #if DEBUG // this counter only exists in debug // int after = ConnectionMultiplexer.CollectedWithoutDispose; // Assert.Equal(before + 1, after); -//#endif +// #endif + } + + [Fact] + public async Task UnrootedBackloggedAsyncTaskIsCompletedOnTimeout() + { + Skip.UnlessLongRunning(); + // Run the test on a separate thread without keeping a reference to the task to ensure + // that there are no references to the variables in test task from the main thread. + // WithTimeout must not be used within Task.Run because timers are rooted and would keep everything alive. + var startGC = new TaskCompletionSource(); + Task? completedTestTask = null; + _ = Task.Run(async () => + { + await using var conn = await ConnectionMultiplexer.ConnectAsync( + new ConfigurationOptions() + { + BacklogPolicy = BacklogPolicy.Default, + AbortOnConnectFail = false, + ConnectTimeout = 50, + SyncTimeout = 1000, + AllowAdmin = true, + EndPoints = { GetConfiguration() }, + }, + Writer); + var db = conn.GetDatabase(); + + // Disconnect and don't allow re-connection + conn.AllowConnect = false; + var server = conn.GetServerSnapshot()[0]; + server.SimulateConnectionFailure(SimulatedFailureType.All); + Assert.False(conn.IsConnected); + + var pingTask = Assert.ThrowsAsync(() => db.PingAsync()); + startGC.SetResult(true); + await pingTask; + }).ContinueWith(testTask => Volatile.Write(ref completedTestTask, testTask)); + + // Use sync wait and sleep to ensure a more timely GC. + var timeoutTask = Task.Delay(5000); + Task.WaitAny(startGC.Task, timeoutTask); + while (Volatile.Read(ref completedTestTask) == null && !timeoutTask.IsCompleted) + { + ForceGC(); + Thread.Sleep(200); } + + var testTask = Volatile.Read(ref completedTestTask); + if (testTask == null) Assert.Fail("Timeout."); + + await testTask; } } diff --git a/tests/StackExchange.Redis.Tests/GeoTests.cs b/tests/StackExchange.Redis.Tests/GeoTests.cs index 476604da2..c46f65be7 100644 --- a/tests/StackExchange.Redis.Tests/GeoTests.cs +++ b/tests/StackExchange.Redis.Tests/GeoTests.cs @@ -1,207 +1,624 @@ -using Xunit; -using System; -using Xunit.Abstractions; +using System; using System.Threading.Tasks; +using Xunit; -namespace StackExchange.Redis.Tests +namespace StackExchange.Redis.Tests; + +[RunPerProtocol] +public class GeoTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) { - [Collection(SharedConnectionFixture.Key)] - public class GeoTests : TestBase - { - public GeoTests(ITestOutputHelper output, SharedConnectionFixture fixture) : base (output, fixture) { } - - public static GeoEntry - palermo = new GeoEntry(13.361389, 38.115556, "Palermo"), - catania = new GeoEntry(15.087269, 37.502669, "Catania"), - agrigento = new GeoEntry(13.5765, 37.311, "Agrigento"), - cefalù = new GeoEntry(14.0188, 38.0084, "Cefalù"); - public static GeoEntry[] all = { palermo, catania, agrigento, cefalù }; - - [Fact] - public void GeoAdd() - { - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Geo), r => r.Geo); - var db = conn.GetDatabase(); - RedisKey key = Me(); - db.KeyDelete(key, CommandFlags.FireAndForget); - - // add while not there - Assert.True(db.GeoAdd(key, cefalù.Longitude, cefalù.Latitude, cefalù.Member)); - Assert.Equal(2, db.GeoAdd(key, new [] { palermo, catania })); - Assert.True(db.GeoAdd(key, agrigento)); - - // now add again - Assert.False(db.GeoAdd(key, cefalù.Longitude, cefalù.Latitude, cefalù.Member)); - Assert.Equal(0, db.GeoAdd(key, new [] { palermo, catania })); - Assert.False(db.GeoAdd(key, agrigento)); - - // Validate - var pos = db.GeoPosition(key, palermo.Member); - Assert.NotNull(pos); - Assert.Equal(palermo.Longitude, pos.Value.Longitude, 5); - Assert.Equal(palermo.Latitude, pos.Value.Latitude, 5); - } - } - - [Fact] - public void GetDistance() - { - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Geo), r => r.Geo); - var db = conn.GetDatabase(); - RedisKey key = Me(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.GeoAdd(key, all, CommandFlags.FireAndForget); - var val = db.GeoDistance(key, "Palermo", "Catania", GeoUnit.Meters); - Assert.True(val.HasValue); - Assert.Equal(166274.1516, val); - - val = db.GeoDistance(key, "Palermo", "Nowhere", GeoUnit.Meters); - Assert.False(val.HasValue); - } - } - - [Fact] - public void GeoHash() - { - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Geo), r => r.Geo); - var db = conn.GetDatabase(); - RedisKey key = Me(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.GeoAdd(key, all, CommandFlags.FireAndForget); - - var hashes = db.GeoHash(key, new RedisValue[] { palermo.Member, "Nowhere", agrigento.Member }); - Assert.Equal(3, hashes.Length); - Assert.Equal("sqc8b49rny0", hashes[0]); - Assert.Null(hashes[1]); - Assert.Equal("sq9skbq0760", hashes[2]); - - var hash = db.GeoHash(key, "Palermo"); - Assert.Equal("sqc8b49rny0", hash); - - hash = db.GeoHash(key, "Nowhere"); - Assert.Null(hash); - } - } - - [Fact] - public void GeoGetPosition() - { - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Geo), r => r.Geo); - var db = conn.GetDatabase(); - RedisKey key = Me(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.GeoAdd(key, all, CommandFlags.FireAndForget); - - var pos = db.GeoPosition(key, palermo.Member); - Assert.True(pos.HasValue); - Assert.Equal(Math.Round(palermo.Longitude, 6), Math.Round(pos.Value.Longitude, 6)); - Assert.Equal(Math.Round(palermo.Latitude, 6), Math.Round(pos.Value.Latitude, 6)); - - pos = db.GeoPosition(key, "Nowhere"); - Assert.False(pos.HasValue); - } - } - - [Fact] - public void GeoRemove() - { - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Geo), r => r.Geo); - var db = conn.GetDatabase(); - RedisKey key = Me(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.GeoAdd(key, all, CommandFlags.FireAndForget); - - var pos = db.GeoPosition(key, "Palermo"); - Assert.True(pos.HasValue); - - Assert.False(db.GeoRemove(key, "Nowhere")); - Assert.True(db.GeoRemove(key, "Palermo")); - Assert.False(db.GeoRemove(key, "Palermo")); - - pos = db.GeoPosition(key, "Palermo"); - Assert.False(pos.HasValue); - } - } - - [Fact] - public void GeoRadius() - { - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Geo), r => r.Geo); - var db = conn.GetDatabase(); - RedisKey key = Me(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.GeoAdd(key, all, CommandFlags.FireAndForget); - - var results = db.GeoRadius(key, cefalù.Member, 60, GeoUnit.Miles, 2, Order.Ascending); - Assert.Equal(2, results.Length); - - Assert.Equal(results[0].Member, cefalù.Member); - Assert.Equal(0, results[0].Distance.Value); - Assert.Equal(Math.Round(results[0].Position.Value.Longitude, 5), Math.Round(cefalù.Position.Longitude, 5)); - Assert.Equal(Math.Round(results[0].Position.Value.Latitude, 5), Math.Round(cefalù.Position.Latitude, 5)); - Assert.False(results[0].Hash.HasValue); - - Assert.Equal(results[1].Member, palermo.Member); - Assert.Equal(Math.Round(36.5319, 6), Math.Round(results[1].Distance.Value, 6)); - Assert.Equal(Math.Round(results[1].Position.Value.Longitude, 5), Math.Round(palermo.Position.Longitude, 5)); - Assert.Equal(Math.Round(results[1].Position.Value.Latitude, 5), Math.Round(palermo.Position.Latitude, 5)); - Assert.False(results[1].Hash.HasValue); - - results = db.GeoRadius(key, cefalù.Member, 60, GeoUnit.Miles, 2, Order.Ascending, GeoRadiusOptions.None); - Assert.Equal(2, results.Length); - Assert.Equal(results[0].Member, cefalù.Member); - Assert.False(results[0].Position.HasValue); - Assert.False(results[0].Distance.HasValue); - Assert.False(results[0].Hash.HasValue); - - Assert.Equal(results[1].Member, palermo.Member); - Assert.False(results[1].Position.HasValue); - Assert.False(results[1].Distance.HasValue); - Assert.False(results[1].Hash.HasValue); - } - } - - [Fact] - public async Task GeoRadiusOverloads() - { - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Geo), r => r.Geo); - var db = conn.GetDatabase(); - RedisKey key = Me(); - db.KeyDelete(key, CommandFlags.FireAndForget); - - Assert.True(db.GeoAdd(key, -1.759925, 52.19493, "steve")); - Assert.True(db.GeoAdd(key, -3.360655, 54.66395, "dave")); - - // Invalid overload - // Since this would throw ERR could not decode requested zset member, we catch and return something more useful to the user earlier. - var ex = Assert.Throws(() => db.GeoRadius(key, -1.759925, 52.19493, GeoUnit.Miles, 500, Order.Ascending, GeoRadiusOptions.WithDistance)); - Assert.StartsWith("Member should not be a double, you likely want the GeoRadius(RedisKey, double, double, ...) overload.", ex.Message); - Assert.Equal("member", ex.ParamName); - ex = await Assert.ThrowsAsync(() => db.GeoRadiusAsync(key, -1.759925, 52.19493, GeoUnit.Miles, 500, Order.Ascending, GeoRadiusOptions.WithDistance)).ForAwait(); - Assert.StartsWith("Member should not be a double, you likely want the GeoRadius(RedisKey, double, double, ...) overload.", ex.Message); - Assert.Equal("member", ex.ParamName); - - // The good stuff - GeoRadiusResult[] result = db.GeoRadius(key, -1.759925, 52.19493, 500, unit: GeoUnit.Miles, order: Order.Ascending, options: GeoRadiusOptions.WithDistance); - Assert.NotNull(result); - - result = await db.GeoRadiusAsync(key, -1.759925, 52.19493, 500, unit: GeoUnit.Miles, order: Order.Ascending, options: GeoRadiusOptions.WithDistance).ForAwait(); - Assert.NotNull(result); - } - } + private static readonly GeoEntry + Palermo = new GeoEntry(13.361389, 38.115556, "Palermo"), + Catania = new GeoEntry(15.087269, 37.502669, "Catania"), + Agrigento = new GeoEntry(13.5765, 37.311, "Agrigento"), + Cefalù = new GeoEntry(14.0188, 38.0084, "Cefalù"); + + private static readonly GeoEntry[] All = [Palermo, Catania, Agrigento, Cefalù]; + + [Fact] + public async Task GeoAdd() + { + await using var conn = Create(require: RedisFeatures.v3_2_0); + + var db = conn.GetDatabase(); + RedisKey key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + // add while not there + Assert.True(db.GeoAdd(key, Cefalù.Longitude, Cefalù.Latitude, Cefalù.Member)); + Assert.Equal(2, db.GeoAdd(key, [Palermo, Catania])); + Assert.True(db.GeoAdd(key, Agrigento)); + + // now add again + Assert.False(db.GeoAdd(key, Cefalù.Longitude, Cefalù.Latitude, Cefalù.Member)); + Assert.Equal(0, db.GeoAdd(key, [Palermo, Catania])); + Assert.False(db.GeoAdd(key, Agrigento)); + + // Validate + var pos = db.GeoPosition(key, Palermo.Member); + Assert.NotNull(pos); + Assert.Equal(Palermo.Longitude, pos!.Value.Longitude, 5); + Assert.Equal(Palermo.Latitude, pos!.Value.Latitude, 5); + } + + [Fact] + public async Task GetDistance() + { + await using var conn = Create(require: RedisFeatures.v3_2_0); + + var db = conn.GetDatabase(); + RedisKey key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.GeoAdd(key, All, CommandFlags.FireAndForget); + var val = db.GeoDistance(key, "Palermo", "Catania", GeoUnit.Meters); + Assert.True(val.HasValue); + Assert.Equal(166274.1516, val); + + val = db.GeoDistance(key, "Palermo", "Nowhere", GeoUnit.Meters); + Assert.False(val.HasValue); + } + + [Fact] + public async Task GeoHash() + { + await using var conn = Create(require: RedisFeatures.v3_2_0); + + var db = conn.GetDatabase(); + RedisKey key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.GeoAdd(key, All, CommandFlags.FireAndForget); + + var hashes = db.GeoHash(key, [Palermo.Member, "Nowhere", Agrigento.Member]); + Assert.NotNull(hashes); + Assert.Equal(3, hashes.Length); + Assert.Equal("sqc8b49rny0", hashes[0]); + Assert.Null(hashes[1]); + Assert.Equal("sq9skbq0760", hashes[2]); + + var hash = db.GeoHash(key, "Palermo"); + Assert.Equal("sqc8b49rny0", hash); + + hash = db.GeoHash(key, "Nowhere"); + Assert.Null(hash); + } + + [Fact] + public async Task GeoGetPosition() + { + await using var conn = Create(require: RedisFeatures.v3_2_0); + + var db = conn.GetDatabase(); + RedisKey key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.GeoAdd(key, All, CommandFlags.FireAndForget); + + var pos = db.GeoPosition(key, Palermo.Member); + Assert.True(pos.HasValue); + Assert.Equal(Math.Round(Palermo.Longitude, 6), Math.Round(pos.Value.Longitude, 6)); + Assert.Equal(Math.Round(Palermo.Latitude, 6), Math.Round(pos.Value.Latitude, 6)); + + pos = db.GeoPosition(key, "Nowhere"); + Assert.False(pos.HasValue); + } + + [Fact] + public async Task GeoRemove() + { + await using var conn = Create(require: RedisFeatures.v3_2_0); + + var db = conn.GetDatabase(); + RedisKey key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.GeoAdd(key, All, CommandFlags.FireAndForget); + + var pos = db.GeoPosition(key, "Palermo"); + Assert.True(pos.HasValue); + + Assert.False(db.GeoRemove(key, "Nowhere")); + Assert.True(db.GeoRemove(key, "Palermo")); + Assert.False(db.GeoRemove(key, "Palermo")); + + pos = db.GeoPosition(key, "Palermo"); + Assert.False(pos.HasValue); + } + + [Fact] + public async Task GeoRadius() + { + await using var conn = Create(require: RedisFeatures.v3_2_0); + + var db = conn.GetDatabase(); + RedisKey key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.GeoAdd(key, All, CommandFlags.FireAndForget); + + var results = db.GeoRadius(key, Cefalù.Member, 60, GeoUnit.Miles, 2, Order.Ascending); + Assert.Equal(2, results.Length); + + Assert.Equal(results[0].Member, Cefalù.Member); + Assert.Equal(0, results[0].Distance); + var position0 = results[0].Position; + Assert.NotNull(position0); + Assert.Equal(Math.Round(position0!.Value.Longitude, 5), Math.Round(Cefalù.Position.Longitude, 5)); + Assert.Equal(Math.Round(position0!.Value.Latitude, 5), Math.Round(Cefalù.Position.Latitude, 5)); + Assert.False(results[0].Hash.HasValue); + + Assert.Equal(results[1].Member, Palermo.Member); + var distance1 = results[1].Distance; + Assert.NotNull(distance1); + Assert.Equal(Math.Round(36.5319, 6), Math.Round(distance1!.Value, 6)); + var position1 = results[1].Position; + Assert.NotNull(position1); + Assert.Equal(Math.Round(position1!.Value.Longitude, 5), Math.Round(Palermo.Position.Longitude, 5)); + Assert.Equal(Math.Round(position1!.Value.Latitude, 5), Math.Round(Palermo.Position.Latitude, 5)); + Assert.False(results[1].Hash.HasValue); + + results = db.GeoRadius(key, Cefalù.Member, 60, GeoUnit.Miles, 2, Order.Ascending, GeoRadiusOptions.None); + Assert.Equal(2, results.Length); + Assert.Equal(results[0].Member, Cefalù.Member); + Assert.False(results[0].Position.HasValue); + Assert.False(results[0].Distance.HasValue); + Assert.False(results[0].Hash.HasValue); + + Assert.Equal(results[1].Member, Palermo.Member); + Assert.False(results[1].Position.HasValue); + Assert.False(results[1].Distance.HasValue); + Assert.False(results[1].Hash.HasValue); + } + + [Fact] + public async Task GeoRadiusOverloads() + { + await using var conn = Create(require: RedisFeatures.v3_2_0); + + var db = conn.GetDatabase(); + RedisKey key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + Assert.True(db.GeoAdd(key, -1.759925, 52.19493, "steve")); + Assert.True(db.GeoAdd(key, -3.360655, 54.66395, "dave")); + + // Invalid overload + // Since this would throw ERR could not decode requested zset member, we catch and return something more useful to the user earlier. + var ex = Assert.Throws(() => db.GeoRadius(key, -1.759925, 52.19493, GeoUnit.Miles, 500, Order.Ascending, GeoRadiusOptions.WithDistance)); + Assert.StartsWith("Member should not be a double, you likely want the GeoRadius(RedisKey, double, double, ...) overload.", ex.Message); + Assert.Equal("member", ex.ParamName); + ex = await Assert.ThrowsAsync(() => db.GeoRadiusAsync(key, -1.759925, 52.19493, GeoUnit.Miles, 500, Order.Ascending, GeoRadiusOptions.WithDistance)).ForAwait(); + Assert.StartsWith("Member should not be a double, you likely want the GeoRadius(RedisKey, double, double, ...) overload.", ex.Message); + Assert.Equal("member", ex.ParamName); + + // The good stuff + GeoRadiusResult[] result = db.GeoRadius(key, -1.759925, 52.19493, 500, unit: GeoUnit.Miles, order: Order.Ascending, options: GeoRadiusOptions.WithDistance); + Assert.NotNull(result); + + result = await db.GeoRadiusAsync(key, -1.759925, 52.19493, 500, unit: GeoUnit.Miles, order: Order.Ascending, options: GeoRadiusOptions.WithDistance).ForAwait(); + Assert.NotNull(result); + } + + private async Task GeoSearchSetupAsync(RedisKey key, IDatabase db) + { + await db.KeyDeleteAsync(key); + await db.GeoAddAsync(key, 82.6534, 27.7682, "rays"); + await db.GeoAddAsync(key, 79.3891, 43.6418, "blue jays"); + await db.GeoAddAsync(key, 76.6217, 39.2838, "orioles"); + await db.GeoAddAsync(key, 71.0927, 42.3467, "red sox"); + await db.GeoAddAsync(key, 73.9262, 40.8296, "yankees"); + } + + private void GeoSearchSetup(RedisKey key, IDatabase db) + { + db.KeyDelete(key); + db.GeoAdd(key, 82.6534, 27.7682, "rays"); + db.GeoAdd(key, 79.3891, 43.6418, "blue jays"); + db.GeoAdd(key, 76.6217, 39.2838, "orioles"); + db.GeoAdd(key, 71.0927, 42.3467, "red sox"); + db.GeoAdd(key, 73.9262, 40.8296, "yankees"); + } + + [Fact] + public async Task GeoSearchCircleMemberAsync() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var key = Me(); + var db = conn.GetDatabase(); + await GeoSearchSetupAsync(key, db); + + var circle = new GeoSearchCircle(500, GeoUnit.Miles); + var res = await db.GeoSearchAsync(key, "yankees", circle); + Assert.Contains(res, x => x.Member == "yankees"); + Assert.Contains(res, x => x.Member == "red sox"); + Assert.Contains(res, x => x.Member == "orioles"); + Assert.Contains(res, x => x.Member == "blue jays"); + Assert.NotNull(res[0].Distance); + Assert.NotNull(res[0].Position); + Assert.Null(res[0].Hash); + Assert.Equal(4, res.Length); + } + + [Fact] + public async Task GeoSearchCircleMemberAsyncOnlyHash() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var key = Me(); + var db = conn.GetDatabase(); + await GeoSearchSetupAsync(key, db); + + var circle = new GeoSearchCircle(500, GeoUnit.Miles); + var res = await db.GeoSearchAsync(key, "yankees", circle, options: GeoRadiusOptions.WithGeoHash); + Assert.Contains(res, x => x.Member == "yankees"); + Assert.Contains(res, x => x.Member == "red sox"); + Assert.Contains(res, x => x.Member == "orioles"); + Assert.Contains(res, x => x.Member == "blue jays"); + Assert.Null(res[0].Distance); + Assert.Null(res[0].Position); + Assert.NotNull(res[0].Hash); + Assert.Equal(4, res.Length); + } + + [Fact] + public async Task GeoSearchCircleMemberAsyncHashAndDistance() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var key = Me(); + var db = conn.GetDatabase(); + await GeoSearchSetupAsync(key, db); + + var circle = new GeoSearchCircle(500, GeoUnit.Miles); + var res = await db.GeoSearchAsync(key, "yankees", circle, options: GeoRadiusOptions.WithGeoHash | GeoRadiusOptions.WithDistance); + Assert.Contains(res, x => x.Member == "yankees"); + Assert.Contains(res, x => x.Member == "red sox"); + Assert.Contains(res, x => x.Member == "orioles"); + Assert.Contains(res, x => x.Member == "blue jays"); + Assert.NotNull(res[0].Distance); + Assert.Null(res[0].Position); + Assert.NotNull(res[0].Hash); + Assert.Equal(4, res.Length); + } + + [Fact] + public async Task GeoSearchCircleLonLatAsync() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var key = Me(); + var db = conn.GetDatabase(); + await GeoSearchSetupAsync(key, db); + + var circle = new GeoSearchCircle(500, GeoUnit.Miles); + var res = await db.GeoSearchAsync(key, 73.9262, 40.8296, circle); + Assert.Contains(res, x => x.Member == "yankees"); + Assert.Contains(res, x => x.Member == "red sox"); + Assert.Contains(res, x => x.Member == "orioles"); + Assert.Contains(res, x => x.Member == "blue jays"); + Assert.Equal(4, res.Length); + } + + [Fact] + public async Task GeoSearchCircleMember() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var key = Me(); + var db = conn.GetDatabase(); + GeoSearchSetup(key, db); + + var circle = new GeoSearchCircle(500 * 1609); + var res = db.GeoSearch(key, "yankees", circle); + Assert.Contains(res, x => x.Member == "yankees"); + Assert.Contains(res, x => x.Member == "red sox"); + Assert.Contains(res, x => x.Member == "orioles"); + Assert.Contains(res, x => x.Member == "blue jays"); + Assert.Equal(4, res.Length); + } + + [Fact] + public async Task GeoSearchCircleLonLat() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var key = Me(); + var db = conn.GetDatabase(); + GeoSearchSetup(key, db); + + var circle = new GeoSearchCircle(500 * 5280, GeoUnit.Feet); + var res = db.GeoSearch(key, 73.9262, 40.8296, circle); + Assert.Contains(res, x => x.Member == "yankees"); + Assert.Contains(res, x => x.Member == "red sox"); + Assert.Contains(res, x => x.Member == "orioles"); + Assert.Contains(res, x => x.Member == "blue jays"); + Assert.Equal(4, res.Length); + } + + [Fact] + public async Task GeoSearchBoxMemberAsync() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var key = Me(); + var db = conn.GetDatabase(); + await GeoSearchSetupAsync(key, db); + + var box = new GeoSearchBox(500, 500, GeoUnit.Kilometers); + var res = await db.GeoSearchAsync(key, "yankees", box); + Assert.Contains(res, x => x.Member == "yankees"); + Assert.Contains(res, x => x.Member == "red sox"); + Assert.Contains(res, x => x.Member == "orioles"); + Assert.Equal(3, res.Length); + } + + [Fact] + public async Task GeoSearchBoxLonLatAsync() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var key = Me(); + var db = conn.GetDatabase(); + await GeoSearchSetupAsync(key, db); + + var box = new GeoSearchBox(500, 500, GeoUnit.Kilometers); + var res = await db.GeoSearchAsync(key, 73.9262, 40.8296, box); + Assert.Contains(res, x => x.Member == "yankees"); + Assert.Contains(res, x => x.Member == "red sox"); + Assert.Contains(res, x => x.Member == "orioles"); + Assert.Equal(3, res.Length); + } + + [Fact] + public async Task GeoSearchBoxMember() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var key = Me(); + var db = conn.GetDatabase(); + GeoSearchSetup(key, db); + + var box = new GeoSearchBox(500, 500, GeoUnit.Kilometers); + var res = db.GeoSearch(key, "yankees", box); + Assert.Contains(res, x => x.Member == "yankees"); + Assert.Contains(res, x => x.Member == "red sox"); + Assert.Contains(res, x => x.Member == "orioles"); + Assert.Equal(3, res.Length); + } + + [Fact] + public async Task GeoSearchBoxLonLat() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var key = Me(); + var db = conn.GetDatabase(); + GeoSearchSetup(key, db); + + var box = new GeoSearchBox(500, 500, GeoUnit.Kilometers); + var res = db.GeoSearch(key, 73.9262, 40.8296, box); + Assert.Contains(res, x => x.Member == "yankees"); + Assert.Contains(res, x => x.Member == "red sox"); + Assert.Contains(res, x => x.Member == "orioles"); + Assert.Equal(3, res.Length); + } + + [Fact] + public async Task GeoSearchLimitCount() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var key = Me(); + var db = conn.GetDatabase(); + GeoSearchSetup(key, db); + + var box = new GeoSearchBox(500, 500, GeoUnit.Kilometers); + var res = db.GeoSearch(key, 73.9262, 40.8296, box, count: 2); + Assert.Contains(res, x => x.Member == "yankees"); + Assert.Contains(res, x => x.Member == "orioles"); + Assert.Equal(2, res.Length); + } + + [Fact] + public async Task GeoSearchLimitCountMakeNoDemands() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var key = Me(); + var db = conn.GetDatabase(); + GeoSearchSetup(key, db); + + var box = new GeoSearchBox(500, 500, GeoUnit.Kilometers); + var res = db.GeoSearch(key, 73.9262, 40.8296, box, count: 2, demandClosest: false); + Assert.Contains(res, x => x.Member == "red sox"); // this order MIGHT not be fully deterministic, seems to work for our purposes. + Assert.Contains(res, x => x.Member == "orioles"); + Assert.Equal(2, res.Length); + } + + [Fact] + public async Task GeoSearchBoxLonLatDescending() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var key = Me(); + var db = conn.GetDatabase(); + await GeoSearchSetupAsync(key, db); + + var box = new GeoSearchBox(500, 500, GeoUnit.Kilometers); + var res = await db.GeoSearchAsync(key, 73.9262, 40.8296, box, order: Order.Descending); + Assert.Contains(res, x => x.Member == "yankees"); + Assert.Contains(res, x => x.Member == "red sox"); + Assert.Contains(res, x => x.Member == "orioles"); + Assert.Equal(3, res.Length); + Assert.Equal("red sox", res[0].Member); + } + + [Fact] + public async Task GeoSearchBoxMemberAndStoreAsync() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var me = Me(); + var db = conn.GetDatabase(); + RedisKey sourceKey = $"{me}:source"; + RedisKey destinationKey = $"{me}:destination"; + await db.KeyDeleteAsync(destinationKey); + await GeoSearchSetupAsync(sourceKey, db); + + var box = new GeoSearchBox(500, 500, GeoUnit.Kilometers); + var res = await db.GeoSearchAndStoreAsync(sourceKey, destinationKey, "yankees", box); + var set = await db.GeoSearchAsync(destinationKey, "yankees", new GeoSearchCircle(10000, GeoUnit.Miles)); + Assert.Contains(set, x => x.Member == "yankees"); + Assert.Contains(set, x => x.Member == "red sox"); + Assert.Contains(set, x => x.Member == "orioles"); + Assert.Equal(3, set.Length); + Assert.Equal(3, res); + } + + [Fact] + public async Task GeoSearchBoxLonLatAndStoreAsync() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var me = Me(); + var db = conn.GetDatabase(); + RedisKey sourceKey = $"{me}:source"; + RedisKey destinationKey = $"{me}:destination"; + await db.KeyDeleteAsync(destinationKey); + await GeoSearchSetupAsync(sourceKey, db); + + var box = new GeoSearchBox(500, 500, GeoUnit.Kilometers); + var res = await db.GeoSearchAndStoreAsync(sourceKey, destinationKey, 73.9262, 40.8296, box); + var set = await db.GeoSearchAsync(destinationKey, "yankees", new GeoSearchCircle(10000, GeoUnit.Miles)); + Assert.Contains(set, x => x.Member == "yankees"); + Assert.Contains(set, x => x.Member == "red sox"); + Assert.Contains(set, x => x.Member == "orioles"); + Assert.Equal(3, set.Length); + Assert.Equal(3, res); + } + + [Fact] + public async Task GeoSearchCircleMemberAndStoreAsync() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var me = Me(); + var db = conn.GetDatabase(); + RedisKey sourceKey = $"{me}:source"; + RedisKey destinationKey = $"{me}:destination"; + await db.KeyDeleteAsync(destinationKey); + await GeoSearchSetupAsync(sourceKey, db); + + var circle = new GeoSearchCircle(500, GeoUnit.Kilometers); + var res = await db.GeoSearchAndStoreAsync(sourceKey, destinationKey, "yankees", circle); + var set = await db.GeoSearchAsync(destinationKey, "yankees", new GeoSearchCircle(10000, GeoUnit.Miles)); + Assert.Contains(set, x => x.Member == "yankees"); + Assert.Contains(set, x => x.Member == "red sox"); + Assert.Contains(set, x => x.Member == "orioles"); + Assert.Equal(3, set.Length); + Assert.Equal(3, res); + } + + [Fact] + public async Task GeoSearchCircleLonLatAndStoreAsync() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var me = Me(); + var db = conn.GetDatabase(); + RedisKey sourceKey = $"{me}:source"; + RedisKey destinationKey = $"{me}:destination"; + await db.KeyDeleteAsync(destinationKey); + await GeoSearchSetupAsync(sourceKey, db); + + var circle = new GeoSearchCircle(500, GeoUnit.Kilometers); + var res = await db.GeoSearchAndStoreAsync(sourceKey, destinationKey, 73.9262, 40.8296, circle); + var set = await db.GeoSearchAsync(destinationKey, "yankees", new GeoSearchCircle(10000, GeoUnit.Miles)); + Assert.Contains(set, x => x.Member == "yankees"); + Assert.Contains(set, x => x.Member == "red sox"); + Assert.Contains(set, x => x.Member == "orioles"); + Assert.Equal(3, set.Length); + Assert.Equal(3, res); + } + + [Fact] + public async Task GeoSearchCircleMemberAndStore() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var me = Me(); + var db = conn.GetDatabase(); + RedisKey sourceKey = $"{me}:source"; + RedisKey destinationKey = $"{me}:destination"; + db.KeyDelete(destinationKey); + GeoSearchSetup(sourceKey, db); + + var circle = new GeoSearchCircle(500, GeoUnit.Kilometers); + var res = db.GeoSearchAndStore(sourceKey, destinationKey, "yankees", circle); + var set = db.GeoSearch(destinationKey, "yankees", new GeoSearchCircle(10000, GeoUnit.Miles)); + Assert.Contains(set, x => x.Member == "yankees"); + Assert.Contains(set, x => x.Member == "red sox"); + Assert.Contains(set, x => x.Member == "orioles"); + Assert.Equal(3, set.Length); + Assert.Equal(3, res); + } + + [Fact] + public async Task GeoSearchCircleLonLatAndStore() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var me = Me(); + var db = conn.GetDatabase(); + RedisKey sourceKey = $"{me}:source"; + RedisKey destinationKey = $"{me}:destination"; + db.KeyDelete(destinationKey); + GeoSearchSetup(sourceKey, db); + + var circle = new GeoSearchCircle(500, GeoUnit.Kilometers); + var res = db.GeoSearchAndStore(sourceKey, destinationKey, 73.9262, 40.8296, circle); + var set = db.GeoSearch(destinationKey, "yankees", new GeoSearchCircle(10000, GeoUnit.Miles)); + Assert.Contains(set, x => x.Member == "yankees"); + Assert.Contains(set, x => x.Member == "red sox"); + Assert.Contains(set, x => x.Member == "orioles"); + Assert.Equal(3, set.Length); + Assert.Equal(3, res); + } + + [Fact] + public async Task GeoSearchCircleAndStoreDistOnly() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var me = Me(); + var db = conn.GetDatabase(); + RedisKey sourceKey = $"{me}:source"; + RedisKey destinationKey = $"{me}:destination"; + db.KeyDelete(destinationKey); + GeoSearchSetup(sourceKey, db); + + var circle = new GeoSearchCircle(500, GeoUnit.Kilometers); + var res = db.GeoSearchAndStore(sourceKey, destinationKey, 73.9262, 40.8296, circle, storeDistances: true); + var set = db.SortedSetRangeByRankWithScores(destinationKey); + Assert.Contains(set, x => x.Element == "yankees"); + Assert.Contains(set, x => x.Element == "red sox"); + Assert.Contains(set, x => x.Element == "orioles"); + Assert.InRange(Array.Find(set, x => x.Element == "yankees").Score, 0, .2); + Assert.InRange(Array.Find(set, x => x.Element == "orioles").Score, 286, 287); + Assert.InRange(Array.Find(set, x => x.Element == "red sox").Score, 289, 290); + Assert.Equal(3, set.Length); + Assert.Equal(3, res); + } + + [Fact] + public async Task GeoSearchBadArgs() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var key = Me(); + var db = conn.GetDatabase(); + db.KeyDelete(key); + var circle = new GeoSearchCircle(500, GeoUnit.Kilometers); + var exception = Assert.Throws(() => + db.GeoSearch(key, "irrelevant", circle, demandClosest: false)); + + Assert.Contains("demandClosest must be true if you are not limiting the count for a GEOSEARCH", exception.Message); } } diff --git a/tests/StackExchange.Redis.Tests/GetServerTests.cs b/tests/StackExchange.Redis.Tests/GetServerTests.cs new file mode 100644 index 000000000..50cb9e7ef --- /dev/null +++ b/tests/StackExchange.Redis.Tests/GetServerTests.cs @@ -0,0 +1,150 @@ +using System.Collections.Generic; +using System.Linq; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public abstract class GetServerTestsBase(ITestOutputHelper output, SharedConnectionFixture fixture) + : TestBase(output, fixture) +{ + protected abstract bool IsCluster { get; } + + [Fact] + public async Task GetServersMemoization() + { + await using var conn = Create(); + + var servers0 = conn.GetServers(); + var servers1 = conn.GetServers(); + + // different array, exact same contents + Assert.NotSame(servers0, servers1); + Assert.NotEmpty(servers0); + Assert.NotNull(servers0); + Assert.NotNull(servers1); + Assert.Equal(servers0.Length, servers1.Length); + for (int i = 0; i < servers0.Length; i++) + { + Assert.Same(servers0[i], servers1[i]); + } + } + + [Fact] + public async Task GetServerByEndpointMemoization() + { + await using var conn = Create(); + var ep = conn.GetEndPoints().First(); + + IServer x = conn.GetServer(ep), y = conn.GetServer(ep); + Assert.Same(x, y); + + object asyncState = "whatever"; + x = conn.GetServer(ep, asyncState); + y = conn.GetServer(ep, asyncState); + Assert.NotSame(x, y); + } + + [Fact] + public async Task GetServerByKeyMemoization() + { + await using var conn = Create(); + RedisKey key = Me(); + string value = $"{key}:value"; + await conn.GetDatabase().StringSetAsync(key, value); + + IServer x = conn.GetServer(key), y = conn.GetServer(key); + Assert.False(y.IsReplica, "IsReplica"); + Assert.Same(x, y); + + y = conn.GetServer(key, flags: CommandFlags.DemandMaster); + Assert.Same(x, y); + + // async state demands separate instance + y = conn.GetServer(key, "async state", flags: CommandFlags.DemandMaster); + Assert.NotSame(x, y); + + // primary and replica should be different + y = conn.GetServer(key, flags: CommandFlags.DemandReplica); + Assert.NotSame(x, y); + Assert.True(y.IsReplica, "IsReplica"); + + // replica again: same + var z = conn.GetServer(key, flags: CommandFlags.DemandReplica); + Assert.Same(y, z); + + // check routed correctly + var actual = (string?)await x.ExecuteAsync(null, "get", [key], CommandFlags.NoRedirect); + Assert.Equal(value, actual); // check value against primary + + // for replica, don't check the value, because of replication delay - just: no error + _ = y.ExecuteAsync(null, "get", [key], CommandFlags.NoRedirect); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task GetServerWithDefaultKey(bool explicitNull) + { + await using var conn = Create(); + bool isCluster = conn.ServerSelectionStrategy.ServerType == ServerType.Cluster; + Assert.Equal(IsCluster, isCluster); // check our assumptions! + + // we expect explicit null and default to act the same, but: check + RedisKey key = explicitNull ? RedisKey.Null : default(RedisKey); + + IServer primary = conn.GetServer(key); + Assert.False(primary.IsReplica); + + IServer replica = conn.GetServer(key, flags: CommandFlags.DemandReplica); + Assert.True(replica.IsReplica); + + // check multiple calls + HashSet uniques = []; + for (int i = 0; i < 100; i++) + { + uniques.Add(conn.GetServer(key)); + } + + if (isCluster) + { + Assert.True(uniques.Count > 1); // should be able to get arbitrary servers + } + else + { + Assert.Single(uniques); + } + + uniques.Clear(); + for (int i = 0; i < 100; i++) + { + uniques.Add(conn.GetServer(key, flags: CommandFlags.DemandReplica)); + } + + if (isCluster) + { + Assert.True(uniques.Count > 1); // should be able to get arbitrary servers + } + else + { + Assert.Single(uniques); + } + } +} + +[RunPerProtocol] +public class GetServerTestsCluster(ITestOutputHelper output, SharedConnectionFixture fixture) : GetServerTestsBase(output, fixture) +{ + protected override string GetConfiguration() => TestConfig.Current.ClusterServersAndPorts; + + protected override bool IsCluster => true; +} + +[RunPerProtocol] +public class GetServerTestsStandalone(ITestOutputHelper output, SharedConnectionFixture fixture) : GetServerTestsBase(output, fixture) +{ + protected override string GetConfiguration() => // we want to test flags usage including replicas + TestConfig.Current.PrimaryServerAndPort + "," + TestConfig.Current.ReplicaServerAndPort; + + protected override bool IsCluster => false; +} diff --git a/tests/StackExchange.Redis.Tests/GlobalSuppressions.cs b/tests/StackExchange.Redis.Tests/GlobalSuppressions.cs index 1862e28f2..05be64b21 100644 --- a/tests/StackExchange.Redis.Tests/GlobalSuppressions.cs +++ b/tests/StackExchange.Redis.Tests/GlobalSuppressions.cs @@ -1,16 +1,21 @@ - -// This file is used by Code Analysis to maintain SuppressMessage +// This file is used by Code Analysis to maintain SuppressMessage // attributes that are applied to this project. -// Project-level suppressions either have no target or are given +// Project-level suppressions either have no target or are given // a specific target and scoped to a namespace, type, member, etc. -[assembly: System.Diagnostics.CodeAnalysis.SuppressMessage("Redundancy", "RCS1163:Unused parameter.", Justification = "", Scope = "member", Target = "~M:StackExchange.Redis.Tests.ConnectionFailedErrors.SSLCertificateValidationError(System.Boolean)")] -[assembly: System.Diagnostics.CodeAnalysis.SuppressMessage("Redundancy", "RCS1163:Unused parameter.", Justification = "", Scope = "member", Target = "~M:StackExchange.Redis.Tests.PreserveOrder.Execute(System.Boolean)")] -[assembly: System.Diagnostics.CodeAnalysis.SuppressMessage("Redundancy", "RCS1163:Unused parameter.", Justification = "", Scope = "member", Target = "~M:StackExchange.Redis.Tests.PubSub.ExplicitPublishMode")] -[assembly: System.Diagnostics.CodeAnalysis.SuppressMessage("Redundancy", "RCS1163:Unused parameter.", Justification = "", Scope = "member", Target = "~M:StackExchange.Redis.Tests.PubSub.TestBasicPubSubFireAndForget(System.Boolean)")] -[assembly: System.Diagnostics.CodeAnalysis.SuppressMessage("Redundancy", "RCS1163:Unused parameter.", Justification = "", Scope = "member", Target = "~M:StackExchange.Redis.Tests.PubSub.TestPatternPubSub(System.Boolean)")] -[assembly: System.Diagnostics.CodeAnalysis.SuppressMessage("Redundancy", "RCS1163:Unused parameter.", Justification = "", Scope = "member", Target = "~M:StackExchange.Redis.Tests.PubSub.TestBasicPubSub(System.Boolean,System.String,System.Boolean)")] -[assembly: System.Diagnostics.CodeAnalysis.SuppressMessage("Redundancy", "RCS1163:Unused parameter.", Justification = "", Scope = "member", Target = "~M:StackExchange.Redis.Tests.SSL.ConnectToSSLServer(System.Boolean,System.Boolean)")] -[assembly: System.Diagnostics.CodeAnalysis.SuppressMessage("Redundancy", "RCS1163:Unused parameter.", Justification = "", Scope = "member", Target = "~M:StackExchange.Redis.Tests.SSL.ShowCertFailures(StackExchange.Redis.Tests.Helpers.TextWriterOutputHelper)~System.Net.Security.RemoteCertificateValidationCallback")] -[assembly: System.Diagnostics.CodeAnalysis.SuppressMessage("Usage", "xUnit1004:Test methods should not be skipped", Justification = "", Scope = "member", Target = "~M:StackExchange.Redis.Tests.ConnectionShutdown.ShutdownRaisesConnectionFailedAndRestore")] -[assembly: System.Diagnostics.CodeAnalysis.SuppressMessage("Usage", "xUnit1004:Test methods should not be skipped", Justification = "", Scope = "member", Target = "~M:StackExchange.Redis.Tests.Issues.BgSaveResponse.ShouldntThrowException(StackExchange.Redis.SaveType)")] +using System.Diagnostics.CodeAnalysis; + +[assembly: SuppressMessage("Redundancy", "RCS1163:Unused parameter.", Justification = "Pending", Scope = "member", Target = "~M:StackExchange.Redis.Tests.ConnectionFailedErrorsTests.SSLCertificateValidationError(System.Boolean)")] +[assembly: SuppressMessage("Redundancy", "RCS1163:Unused parameter.", Justification = "Pending", Scope = "member", Target = "~M:StackExchange.Redis.Tests.PubSubTests.ExplicitPublishMode")] +[assembly: SuppressMessage("Redundancy", "RCS1163:Unused parameter.", Justification = "Pending", Scope = "member", Target = "~M:StackExchange.Redis.Tests.SSLTests.ConnectToSSLServer(System.Boolean,System.Boolean)")] +[assembly: SuppressMessage("Redundancy", "RCS1163:Unused parameter.", Justification = "Pending", Scope = "member", Target = "~M:StackExchange.Redis.Tests.SSLTests.ShowCertFailures(StackExchange.Redis.Tests.Helpers.TextWriterOutputHelper)~System.Net.Security.RemoteCertificateValidationCallback")] +[assembly: SuppressMessage("Usage", "xUnit1004:Test methods should not be skipped", Justification = "Pending", Scope = "member", Target = "~M:StackExchange.Redis.Tests.ConnectionShutdownTests.ShutdownRaisesConnectionFailedAndRestore")] +[assembly: SuppressMessage("Usage", "xUnit1004:Test methods should not be skipped", Justification = "Pending", Scope = "member", Target = "~M:StackExchange.Redis.Tests.Issues.BgSaveResponseTests.ShouldntThrowException(StackExchange.Redis.SaveType)")] +[assembly: SuppressMessage("Roslynator", "RCS1077:Optimize LINQ method call.", Justification = "Pending", Scope = "member", Target = "~M:StackExchange.Redis.Tests.SentinelTests.PrimaryConnectTest~System.Threading.Tasks.Task")] +[assembly: SuppressMessage("Roslynator", "RCS1077:Optimize LINQ method call.", Justification = "Pending", Scope = "member", Target = "~M:StackExchange.Redis.Tests.SentinelTests.PrimaryConnectAsyncTest~System.Threading.Tasks.Task")] +[assembly: SuppressMessage("Roslynator", "RCS1077:Optimize LINQ method call.", Justification = "Pending", Scope = "member", Target = "~M:StackExchange.Redis.Tests.SentinelBase.WaitForReplicationAsync(StackExchange.Redis.IServer,System.Nullable{System.TimeSpan})~System.Threading.Tasks.Task")] +[assembly: SuppressMessage("Roslynator", "RCS1077:Optimize LINQ method call.", Justification = "Pending", Scope = "member", Target = "~M:StackExchange.Redis.Tests.SentinelFailoverTests.ManagedPrimaryConnectionEndToEndWithFailoverTest~System.Threading.Tasks.Task")] +[assembly: SuppressMessage("Performance", "CA1846:Prefer 'AsSpan' over 'Substring'", Justification = "Pending", Scope = "member", Target = "~M:RedisSharp.Redis.ReadData~System.Byte[]")] +[assembly: SuppressMessage("Style", "IDE0066:Convert switch statement to expression", Justification = "Pending", Scope = "member", Target = "~M:StackExchange.Redis.Tests.NamingTests.IgnoreMethodConventions(System.Reflection.MethodInfo)~System.Boolean")] +[assembly: SuppressMessage("Roslynator", "RCS1075:Avoid empty catch clause that catches System.Exception.", Justification = "Pending", Scope = "member", Target = "~M:StackExchange.Redis.Tests.SentinelBase.WaitForReadyAsync(System.Net.EndPoint,System.Boolean,System.Nullable{System.TimeSpan})~System.Threading.Tasks.Task")] +[assembly: SuppressMessage("Roslynator", "RCS1075:Avoid empty catch clause that catches System.Exception.", Justification = "Pending", Scope = "member", Target = "~M:StackExchange.Redis.Tests.SentinelBase.WaitForRoleAsync(StackExchange.Redis.IServer,System.String,System.Nullable{System.TimeSpan})~System.Threading.Tasks.Task")] diff --git a/tests/StackExchange.Redis.Tests/GlobalUsings.cs b/tests/StackExchange.Redis.Tests/GlobalUsings.cs new file mode 100644 index 000000000..ca9c34d74 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/GlobalUsings.cs @@ -0,0 +1,3 @@ +extern alias respite; +global using AsciiHash = respite::RESPite.AsciiHash; +global using AsciiHashAttribute = respite::RESPite.AsciiHashAttribute; diff --git a/tests/StackExchange.Redis.Tests/HashFieldTests.cs b/tests/StackExchange.Redis.Tests/HashFieldTests.cs new file mode 100644 index 000000000..2bb98eb85 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/HashFieldTests.cs @@ -0,0 +1,571 @@ +using System; +using System.Linq; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +/// +/// Tests for . +/// +[RunPerProtocol] +public class HashFieldTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + private readonly DateTime nextCentury = new DateTime(2101, 1, 1, 0, 0, 0, DateTimeKind.Utc); + private readonly TimeSpan oneYearInMs = TimeSpan.FromMilliseconds(31536000000); + + private readonly HashEntry[] entries = [new("f1", 1), new("f2", 2)]; + + private readonly RedisValue[] fields = ["f1", "f2"]; + + private readonly RedisValue[] values = [1, 2]; + + [Fact] + public void HashFieldExpire() + { + var db = Create(require: RedisFeatures.v7_4_0_rc1).GetDatabase(); + var hashKey = Me(); + db.HashSet(hashKey, entries); + + var fieldsResult = db.HashFieldExpire(hashKey, fields, oneYearInMs); + Assert.Equal([ExpireResult.Success, ExpireResult.Success], fieldsResult); + + fieldsResult = db.HashFieldExpire(hashKey, fields, nextCentury); + Assert.Equal([ExpireResult.Success, ExpireResult.Success,], fieldsResult); + } + + [Fact] + public void HashFieldExpireNoKey() + { + var db = Create(require: RedisFeatures.v7_4_0_rc2).GetDatabase(); + var hashKey = Me(); + + var fieldsResult = db.HashFieldExpire(hashKey, fields, oneYearInMs); + Assert.Equal([ExpireResult.NoSuchField, ExpireResult.NoSuchField], fieldsResult); + + fieldsResult = db.HashFieldExpire(hashKey, fields, nextCentury); + Assert.Equal([ExpireResult.NoSuchField, ExpireResult.NoSuchField], fieldsResult); + } + + [Fact] + public async Task HashFieldExpireAsync() + { + var db = Create(require: RedisFeatures.v7_4_0_rc1).GetDatabase(); + var hashKey = Me(); + db.HashSet(hashKey, entries); + + var fieldsResult = await db.HashFieldExpireAsync(hashKey, fields, oneYearInMs); + Assert.Equal([ExpireResult.Success, ExpireResult.Success], fieldsResult); + + fieldsResult = await db.HashFieldExpireAsync(hashKey, fields, nextCentury); + Assert.Equal([ExpireResult.Success, ExpireResult.Success], fieldsResult); + } + + [Fact] + public async Task HashFieldExpireAsyncNoKey() + { + var db = Create(require: RedisFeatures.v7_4_0_rc2).GetDatabase(); + var hashKey = Me(); + + var fieldsResult = await db.HashFieldExpireAsync(hashKey, fields, oneYearInMs); + Assert.Equal([ExpireResult.NoSuchField, ExpireResult.NoSuchField], fieldsResult); + + fieldsResult = await db.HashFieldExpireAsync(hashKey, fields, nextCentury); + Assert.Equal([ExpireResult.NoSuchField, ExpireResult.NoSuchField], fieldsResult); + } + + [Fact] + public void HashFieldGetExpireDateTimeIsDue() + { + var db = Create(require: RedisFeatures.v7_4_0_rc1).GetDatabase(); + var hashKey = Me(); + db.HashSet(hashKey, entries); + + var result = db.HashFieldExpire(hashKey, ["f1"], new DateTime(2000, 1, 1, 0, 0, 0, DateTimeKind.Utc)); + Assert.Equal([ExpireResult.Due], result); + } + + [Fact] + public void HashFieldExpireNoField() + { + var db = Create(require: RedisFeatures.v7_4_0_rc1).GetDatabase(); + var hashKey = Me(); + db.HashSet(hashKey, entries); + + var result = db.HashFieldExpire(hashKey, ["nonExistingField"], oneYearInMs); + Assert.Equal([ExpireResult.NoSuchField], result); + } + + [Fact] + public void HashFieldExpireConditionsSatisfied() + { + var db = Create(require: RedisFeatures.v7_4_0_rc1).GetDatabase(); + var hashKey = Me(); + db.KeyDelete(hashKey); + db.HashSet(hashKey, entries); + db.HashSet(hashKey, [new("f3", 3), new("f4", 4)]); + var initialExpire = db.HashFieldExpire(hashKey, ["f2", "f3", "f4"], new DateTime(2050, 1, 1, 0, 0, 0, DateTimeKind.Utc)); + Assert.Equal([ExpireResult.Success, ExpireResult.Success, ExpireResult.Success], initialExpire); + + var result = db.HashFieldExpire(hashKey, ["f1"], oneYearInMs, ExpireWhen.HasNoExpiry); + Assert.Equal([ExpireResult.Success], result); + + result = db.HashFieldExpire(hashKey, ["f2"], oneYearInMs, ExpireWhen.HasExpiry); + Assert.Equal([ExpireResult.Success], result); + + result = db.HashFieldExpire(hashKey, ["f3"], nextCentury, ExpireWhen.GreaterThanCurrentExpiry); + Assert.Equal([ExpireResult.Success], result); + + result = db.HashFieldExpire(hashKey, ["f4"], oneYearInMs, ExpireWhen.LessThanCurrentExpiry); + Assert.Equal([ExpireResult.Success], result); + } + + [Fact] + public void HashFieldExpireConditionsNotSatisfied() + { + var db = Create(require: RedisFeatures.v7_4_0_rc1).GetDatabase(); + var hashKey = Me(); + db.KeyDelete(hashKey); + db.HashSet(hashKey, entries); + db.HashSet(hashKey, [new("f3", 3), new("f4", 4)]); + var initialExpire = db.HashFieldExpire(hashKey, ["f2", "f3", "f4"], new DateTime(2050, 1, 1, 0, 0, 0, DateTimeKind.Utc)); + Assert.Equal([ExpireResult.Success, ExpireResult.Success, ExpireResult.Success], initialExpire); + + var result = db.HashFieldExpire(hashKey, ["f1"], oneYearInMs, ExpireWhen.HasExpiry); + Assert.Equal([ExpireResult.ConditionNotMet], result); + + result = db.HashFieldExpire(hashKey, ["f2"], oneYearInMs, ExpireWhen.HasNoExpiry); + Assert.Equal([ExpireResult.ConditionNotMet], result); + + result = db.HashFieldExpire(hashKey, ["f3"], nextCentury, ExpireWhen.LessThanCurrentExpiry); + Assert.Equal([ExpireResult.ConditionNotMet], result); + + result = db.HashFieldExpire(hashKey, ["f4"], oneYearInMs, ExpireWhen.GreaterThanCurrentExpiry); + Assert.Equal([ExpireResult.ConditionNotMet], result); + } + + [Fact] + public void HashFieldGetExpireDateTime() + { + var db = Create(require: RedisFeatures.v7_4_0_rc1).GetDatabase(); + var hashKey = Me(); + db.HashSet(hashKey, entries); + db.HashFieldExpire(hashKey, fields, nextCentury); + long ms = new DateTimeOffset(nextCentury).ToUnixTimeMilliseconds(); + + var result = db.HashFieldGetExpireDateTime(hashKey, ["f1"]); + Assert.Equal([ms], result); + + var fieldsResult = db.HashFieldGetExpireDateTime(hashKey, fields); + Assert.Equal([ms, ms], fieldsResult); + } + + [Fact] + public void HashFieldExpireFieldNoExpireTime() + { + var db = Create(require: RedisFeatures.v7_4_0_rc1).GetDatabase(); + var hashKey = Me(); + db.HashSet(hashKey, entries); + + var result = db.HashFieldGetExpireDateTime(hashKey, ["f1"]); + Assert.Equal([-1L], result); + + var fieldsResult = db.HashFieldGetExpireDateTime(hashKey, fields); + Assert.Equal([-1, -1,], fieldsResult); + } + + [Fact] + public void HashFieldGetExpireDateTimeNoKey() + { + var db = Create(require: RedisFeatures.v7_4_0_rc2).GetDatabase(); + var hashKey = Me(); + + var fieldsResult = db.HashFieldGetExpireDateTime(hashKey, fields); + Assert.Equal([-2, -2,], fieldsResult); + } + + [Fact] + public void HashFieldGetExpireDateTimeNoField() + { + var db = Create(require: RedisFeatures.v7_4_0_rc1).GetDatabase(); + var hashKey = Me(); + db.HashSet(hashKey, entries); + db.HashFieldExpire(hashKey, fields, oneYearInMs); + + var fieldsResult = db.HashFieldGetExpireDateTime(hashKey, ["notExistingField1", "notExistingField2"]); + Assert.Equal([-2, -2,], fieldsResult); + } + + [Fact] + public void HashFieldGetTimeToLive() + { + var db = Create(require: RedisFeatures.v7_4_0_rc1).GetDatabase(); + var hashKey = Me(); + db.HashSet(hashKey, entries); + db.HashFieldExpire(hashKey, fields, oneYearInMs); + long ms = new DateTimeOffset(nextCentury).ToUnixTimeMilliseconds(); + + var result = db.HashFieldGetTimeToLive(hashKey, ["f1"]); + Assert.NotNull(result); + Assert.True(result.Length == 1); + Assert.True(result[0] > 0); + + var fieldsResult = db.HashFieldGetTimeToLive(hashKey, fields); + Assert.NotNull(fieldsResult); + Assert.True(fieldsResult.Length > 0); + Assert.True(fieldsResult.All(x => x > 0)); + } + + [Fact] + public void HashFieldGetTimeToLiveNoExpireTime() + { + var db = Create(require: RedisFeatures.v7_4_0_rc1).GetDatabase(); + var hashKey = Me(); + db.HashSet(hashKey, entries); + + var fieldsResult = db.HashFieldGetTimeToLive(hashKey, fields); + Assert.Equal([-1, -1,], fieldsResult); + } + + [Fact] + public void HashFieldGetTimeToLiveNoKey() + { + var db = Create(require: RedisFeatures.v7_4_0_rc2).GetDatabase(); + var hashKey = Me(); + + var fieldsResult = db.HashFieldGetTimeToLive(hashKey, fields); + Assert.Equal([-2, -2,], fieldsResult); + } + + [Fact] + public void HashFieldGetTimeToLiveNoField() + { + var db = Create(require: RedisFeatures.v7_4_0_rc1).GetDatabase(); + var hashKey = Me(); + db.HashSet(hashKey, entries); + db.HashFieldExpire(hashKey, fields, oneYearInMs); + + var fieldsResult = db.HashFieldGetTimeToLive(hashKey, ["notExistingField1", "notExistingField2"]); + Assert.Equal([-2, -2,], fieldsResult); + } + + [Fact] + public void HashFieldPersist() + { + var db = Create(require: RedisFeatures.v7_4_0_rc1).GetDatabase(); + var hashKey = Me(); + db.HashSet(hashKey, entries); + db.HashFieldExpire(hashKey, fields, oneYearInMs); + long ms = new DateTimeOffset(nextCentury).ToUnixTimeMilliseconds(); + + var result = db.HashFieldPersist(hashKey, ["f1"]); + Assert.Equal([PersistResult.Success], result); + + db.HashFieldExpire(hashKey, fields, oneYearInMs); + + var fieldsResult = db.HashFieldPersist(hashKey, fields); + Assert.Equal([PersistResult.Success, PersistResult.Success], fieldsResult); + } + + [Fact] + public void HashFieldPersistNoExpireTime() + { + var db = Create(require: RedisFeatures.v7_4_0_rc1).GetDatabase(); + var hashKey = Me(); + db.HashSet(hashKey, entries); + + var fieldsResult = db.HashFieldPersist(hashKey, fields); + Assert.Equal([PersistResult.ConditionNotMet, PersistResult.ConditionNotMet], fieldsResult); + } + + [Fact] + public void HashFieldPersistNoKey() + { + var db = Create(require: RedisFeatures.v7_4_0_rc2).GetDatabase(); + var hashKey = Me(); + + var fieldsResult = db.HashFieldPersist(hashKey, fields); + Assert.Equal([PersistResult.NoSuchField, PersistResult.NoSuchField], fieldsResult); + } + + [Fact] + public void HashFieldPersistNoField() + { + var db = Create(require: RedisFeatures.v7_4_0_rc1).GetDatabase(); + var hashKey = Me(); + db.HashSet(hashKey, entries); + db.HashFieldExpire(hashKey, fields, oneYearInMs); + + var fieldsResult = db.HashFieldPersist(hashKey, ["notExistingField1", "notExistingField2"]); + Assert.Equal([PersistResult.NoSuchField, PersistResult.NoSuchField], fieldsResult); + } + + [Fact] + public void HashFieldGetAndSetExpiry() + { + using var conn = Create(require: RedisFeatures.v8_0_0_M04); + var db = conn.GetDatabase(); + var hashKey = Me(); + + // testing with timespan + db.HashSet(hashKey, entries); + var fieldResult = db.HashFieldGetAndSetExpiry(hashKey, "f1", TimeSpan.FromHours(1)); + Assert.Equal(1, fieldResult); + var fieldTtl = db.HashFieldGetTimeToLive(hashKey, new RedisValue[] { "f1" })[0]; + Assert.InRange(fieldTtl, TimeSpan.FromMinutes(59).TotalMilliseconds, TimeSpan.FromHours(1).TotalMilliseconds); + + // testing with datetime + db.HashSet(hashKey, entries); + fieldResult = db.HashFieldGetAndSetExpiry(hashKey, "f1", DateTime.Now.AddMinutes(120)); + Assert.Equal(1, fieldResult); + fieldTtl = db.HashFieldGetTimeToLive(hashKey, new RedisValue[] { "f1" })[0]; + Assert.InRange(fieldTtl, TimeSpan.FromMinutes(119).TotalMilliseconds, TimeSpan.FromHours(2).TotalMilliseconds); + + // testing persist + fieldResult = db.HashFieldGetAndSetExpiry(hashKey, "f1", persist: true); + Assert.Equal(1, fieldResult); + fieldTtl = db.HashFieldGetTimeToLive(hashKey, new RedisValue[] { "f1" })[0]; + Assert.Equal(-1, fieldTtl); + + // testing multiple fields with timespan + db.HashSet(hashKey, entries); + var fieldResults = db.HashFieldGetAndSetExpiry(hashKey, fields, TimeSpan.FromHours(1)); + Assert.Equal(values, fieldResults); + var fieldTtls = db.HashFieldGetTimeToLive(hashKey, fields); + Assert.InRange(fieldTtls[0], TimeSpan.FromMinutes(59).TotalMilliseconds, TimeSpan.FromHours(1).TotalMilliseconds); + Assert.InRange(fieldTtls[1], TimeSpan.FromMinutes(59).TotalMilliseconds, TimeSpan.FromHours(1).TotalMilliseconds); + + // testing multiple fields with datetime + db.HashSet(hashKey, entries); + fieldResults = db.HashFieldGetAndSetExpiry(hashKey, fields, DateTime.Now.AddMinutes(120)); + Assert.Equal(values, fieldResults); + fieldTtls = db.HashFieldGetTimeToLive(hashKey, fields); + Assert.InRange(fieldTtls[0], TimeSpan.FromMinutes(119).TotalMilliseconds, TimeSpan.FromHours(2).TotalMilliseconds); + Assert.InRange(fieldTtls[1], TimeSpan.FromMinutes(119).TotalMilliseconds, TimeSpan.FromHours(2).TotalMilliseconds); + + // testing multiple fields with persist + fieldResults = db.HashFieldGetAndSetExpiry(hashKey, fields, persist: true); + Assert.Equal(values, fieldResults); + fieldTtls = db.HashFieldGetTimeToLive(hashKey, fields); + Assert.Equal(new long[] { -1, -1 }, fieldTtls); + } + + [Fact] + public async Task HashFieldGetAndSetExpiryAsync() + { + await using var conn = Create(require: RedisFeatures.v8_0_0_M04); + var db = conn.GetDatabase(); + var hashKey = Me(); + + // testing with timespan + db.HashSet(hashKey, entries); + var fieldResult = await db.HashFieldGetAndSetExpiryAsync(hashKey, "f1", TimeSpan.FromHours(1)); + Assert.Equal(1, fieldResult); + var fieldTtl = db.HashFieldGetTimeToLive(hashKey, new RedisValue[] { "f1" })[0]; + Assert.InRange(fieldTtl, TimeSpan.FromMinutes(59).TotalMilliseconds, TimeSpan.FromHours(1).TotalMilliseconds); + + // testing with datetime + db.HashSet(hashKey, entries); + fieldResult = await db.HashFieldGetAndSetExpiryAsync(hashKey, "f1", DateTime.Now.AddMinutes(120)); + Assert.Equal(1, fieldResult); + fieldTtl = db.HashFieldGetTimeToLive(hashKey, new RedisValue[] { "f1" })[0]; + Assert.InRange(fieldTtl, TimeSpan.FromMinutes(119).TotalMilliseconds, TimeSpan.FromHours(2).TotalMilliseconds); + + // testing persist + fieldResult = await db.HashFieldGetAndSetExpiryAsync(hashKey, "f1", persist: true); + Assert.Equal(1, fieldResult); + fieldTtl = db.HashFieldGetTimeToLive(hashKey, new RedisValue[] { "f1" })[0]; + Assert.Equal(-1, fieldTtl); + + // testing multiple fields with timespan + db.HashSet(hashKey, entries); + var fieldResults = await db.HashFieldGetAndSetExpiryAsync(hashKey, fields, TimeSpan.FromHours(1)); + Assert.Equal(values, fieldResults); + var fieldTtls = db.HashFieldGetTimeToLive(hashKey, fields); + Assert.InRange(fieldTtls[0], TimeSpan.FromMinutes(59).TotalMilliseconds, TimeSpan.FromHours(1).TotalMilliseconds); + Assert.InRange(fieldTtls[1], TimeSpan.FromMinutes(59).TotalMilliseconds, TimeSpan.FromHours(1).TotalMilliseconds); + + // testing multiple fields with datetime + db.HashSet(hashKey, entries); + fieldResults = await db.HashFieldGetAndSetExpiryAsync(hashKey, fields, DateTime.Now.AddMinutes(120)); + Assert.Equal(values, fieldResults); + fieldTtls = db.HashFieldGetTimeToLive(hashKey, fields); + Assert.InRange(fieldTtls[0], TimeSpan.FromMinutes(119).TotalMilliseconds, TimeSpan.FromHours(2).TotalMilliseconds); + Assert.InRange(fieldTtls[1], TimeSpan.FromMinutes(119).TotalMilliseconds, TimeSpan.FromHours(2).TotalMilliseconds); + + // testing multiple fields with persist + fieldResults = await db.HashFieldGetAndSetExpiryAsync(hashKey, fields, persist: true); + Assert.Equal(values, fieldResults); + fieldTtls = db.HashFieldGetTimeToLive(hashKey, fields); + Assert.Equal(new long[] { -1, -1 }, fieldTtls); + } + + [Fact] + public void HashFieldSetAndSetExpiry() + { + using var conn = Create(require: RedisFeatures.v8_0_0_M04); + var db = conn.GetDatabase(); + var hashKey = Me(); + + // testing with timespan + var result = db.HashFieldSetAndSetExpiry(hashKey, "f1", 1, TimeSpan.FromHours(1)); + Assert.Equal(1, result); + var fieldTtl = db.HashFieldGetTimeToLive(hashKey, new RedisValue[] { "f1" })[0]; + Assert.InRange(fieldTtl, TimeSpan.FromMinutes(59).TotalMilliseconds, TimeSpan.FromHours(1).TotalMilliseconds); + + // testing with datetime + result = db.HashFieldSetAndSetExpiry(hashKey, "f1", 1, DateTime.Now.AddMinutes(120)); + Assert.Equal(1, result); + fieldTtl = db.HashFieldGetTimeToLive(hashKey, new RedisValue[] { "f1" })[0]; + Assert.InRange(fieldTtl, TimeSpan.FromMinutes(119).TotalMilliseconds, TimeSpan.FromHours(2).TotalMilliseconds); + + // testing with keepttl + result = db.HashFieldSetAndSetExpiry(hashKey, "f1", 1, keepTtl: true); + Assert.Equal(1, result); + fieldTtl = db.HashFieldGetTimeToLive(hashKey, new RedisValue[] { "f1" })[0]; + Assert.InRange(fieldTtl, TimeSpan.FromMinutes(119).TotalMilliseconds, TimeSpan.FromHours(2).TotalMilliseconds); + + // testing multiple fields with timespan + result = db.HashFieldSetAndSetExpiry(hashKey, entries, TimeSpan.FromHours(1)); + Assert.Equal(1, result); + var fieldTtls = db.HashFieldGetTimeToLive(hashKey, fields); + Assert.InRange(fieldTtls[0], TimeSpan.FromMinutes(59).TotalMilliseconds, TimeSpan.FromHours(1).TotalMilliseconds); + Assert.InRange(fieldTtls[1], TimeSpan.FromMinutes(59).TotalMilliseconds, TimeSpan.FromHours(1).TotalMilliseconds); + + // testing multiple fields with datetime + result = db.HashFieldSetAndSetExpiry(hashKey, entries, DateTime.Now.AddMinutes(120)); + Assert.Equal(1, result); + fieldTtls = db.HashFieldGetTimeToLive(hashKey, fields); + Assert.InRange(fieldTtls[0], TimeSpan.FromMinutes(119).TotalMilliseconds, TimeSpan.FromHours(2).TotalMilliseconds); + Assert.InRange(fieldTtls[1], TimeSpan.FromMinutes(119).TotalMilliseconds, TimeSpan.FromHours(2).TotalMilliseconds); + + // testing multiple fields with keepttl + result = db.HashFieldSetAndSetExpiry(hashKey, entries, keepTtl: true); + Assert.Equal(1, result); + fieldTtls = db.HashFieldGetTimeToLive(hashKey, fields); + Assert.InRange(fieldTtls[0], TimeSpan.FromMinutes(119).TotalMilliseconds, TimeSpan.FromHours(2).TotalMilliseconds); + Assert.InRange(fieldTtls[1], TimeSpan.FromMinutes(119).TotalMilliseconds, TimeSpan.FromHours(2).TotalMilliseconds); + + // testing with ExpireWhen.Exists + db.KeyDelete(hashKey); + result = db.HashFieldSetAndSetExpiry(hashKey, "f1", 1, TimeSpan.FromHours(1), when: When.Exists); + Assert.Equal(0, result); // should not set because it doesnt exist + + // testing with ExpireWhen.NotExists + result = db.HashFieldSetAndSetExpiry(hashKey, "f1", 1, TimeSpan.FromHours(1), when: When.NotExists); + Assert.Equal(1, result); // should set because it doesnt exist + fieldTtl = db.HashFieldGetTimeToLive(hashKey, new RedisValue[] { "f1" })[0]; + Assert.InRange(fieldTtl, TimeSpan.FromMinutes(59).TotalMilliseconds, TimeSpan.FromHours(1).TotalMilliseconds); + + // testing with ExpireWhen.GreaterThanCurrentExpiry + result = db.HashFieldSetAndSetExpiry(hashKey, "f1", -1, keepTtl: true, when: When.Exists); + Assert.Equal(1, result); // should set because it exists + fieldTtl = db.HashFieldGetTimeToLive(hashKey, new RedisValue[] { "f1" })[0]; + Assert.InRange(fieldTtl, TimeSpan.FromMinutes(59).TotalMilliseconds, TimeSpan.FromHours(1).TotalMilliseconds); + } + + [Fact] + public async Task HashFieldSetAndSetExpiryAsync() + { + await using var conn = Create(require: RedisFeatures.v8_0_0_M04); + var db = conn.GetDatabase(); + var hashKey = Me(); + + // testing with timespan + var result = await db.HashFieldSetAndSetExpiryAsync(hashKey, "f1", 1, TimeSpan.FromHours(1)); + Assert.Equal(1, result); + var fieldTtl = db.HashFieldGetTimeToLive(hashKey, new RedisValue[] { "f1" })[0]; + Assert.InRange(fieldTtl, TimeSpan.FromMinutes(59).TotalMilliseconds, TimeSpan.FromHours(1).TotalMilliseconds); + + // testing with datetime + result = await db.HashFieldSetAndSetExpiryAsync(hashKey, "f1", 1, DateTime.Now.AddMinutes(120)); + Assert.Equal(1, result); + fieldTtl = db.HashFieldGetTimeToLive(hashKey, new RedisValue[] { "f1" })[0]; + Assert.InRange(fieldTtl, TimeSpan.FromMinutes(119).TotalMilliseconds, TimeSpan.FromHours(2).TotalMilliseconds); + + // testing with keepttl + result = await db.HashFieldSetAndSetExpiryAsync(hashKey, "f1", 1, keepTtl: true); + Assert.Equal(1, result); + fieldTtl = db.HashFieldGetTimeToLive(hashKey, new RedisValue[] { "f1" })[0]; + Assert.InRange(fieldTtl, TimeSpan.FromMinutes(119).TotalMilliseconds, TimeSpan.FromHours(2).TotalMilliseconds); + + // testing multiple fields with timespan + result = await db.HashFieldSetAndSetExpiryAsync(hashKey, entries, TimeSpan.FromHours(1)); + Assert.Equal(1, result); + var fieldTtls = db.HashFieldGetTimeToLive(hashKey, fields); + Assert.InRange(fieldTtls[0], TimeSpan.FromMinutes(59).TotalMilliseconds, TimeSpan.FromHours(1).TotalMilliseconds); + Assert.InRange(fieldTtls[1], TimeSpan.FromMinutes(59).TotalMilliseconds, TimeSpan.FromHours(1).TotalMilliseconds); + + // testing multiple fields with datetime + result = await db.HashFieldSetAndSetExpiryAsync(hashKey, entries, DateTime.Now.AddMinutes(120)); + Assert.Equal(1, result); + fieldTtls = db.HashFieldGetTimeToLive(hashKey, fields); + Assert.InRange(fieldTtls[0], TimeSpan.FromMinutes(119).TotalMilliseconds, TimeSpan.FromHours(2).TotalMilliseconds); + Assert.InRange(fieldTtls[1], TimeSpan.FromMinutes(119).TotalMilliseconds, TimeSpan.FromHours(2).TotalMilliseconds); + + // testing multiple fields with keepttl + result = await db.HashFieldSetAndSetExpiryAsync(hashKey, entries, keepTtl: true); + Assert.Equal(1, result); + fieldTtls = db.HashFieldGetTimeToLive(hashKey, fields); + Assert.InRange(fieldTtls[0], TimeSpan.FromMinutes(119).TotalMilliseconds, TimeSpan.FromHours(2).TotalMilliseconds); + Assert.InRange(fieldTtls[1], TimeSpan.FromMinutes(119).TotalMilliseconds, TimeSpan.FromHours(2).TotalMilliseconds); + + // testing with ExpireWhen.Exists + db.KeyDelete(hashKey); + result = await db.HashFieldSetAndSetExpiryAsync(hashKey, "f1", 1, TimeSpan.FromHours(1), when: When.Exists); + Assert.Equal(0, result); // should not set because it doesnt exist + + // testing with ExpireWhen.NotExists + result = await db.HashFieldSetAndSetExpiryAsync(hashKey, "f1", 1, TimeSpan.FromHours(1), when: When.NotExists); + Assert.Equal(1, result); // should set because it doesnt exist + fieldTtl = db.HashFieldGetTimeToLive(hashKey, new RedisValue[] { "f1" })[0]; + Assert.InRange(fieldTtl, TimeSpan.FromMinutes(59).TotalMilliseconds, TimeSpan.FromHours(1).TotalMilliseconds); + + // testing with ExpireWhen.GreaterThanCurrentExpiry + result = await db.HashFieldSetAndSetExpiryAsync(hashKey, "f1", -1, keepTtl: true, when: When.Exists); + Assert.Equal(1, result); // should set because it exists + fieldTtl = db.HashFieldGetTimeToLive(hashKey, new RedisValue[] { "f1" })[0]; + Assert.InRange(fieldTtl, TimeSpan.FromMinutes(59).TotalMilliseconds, TimeSpan.FromHours(1).TotalMilliseconds); + } + [Fact] + public void HashFieldGetAndDelete() + { + using var conn = Create(require: RedisFeatures.v8_0_0_M04); + var db = conn.GetDatabase(); + var hashKey = Me(); + + // single field + db.HashSet(hashKey, entries); + var fieldResult = db.HashFieldGetAndDelete(hashKey, "f1"); + Assert.Equal(1, fieldResult); + Assert.False(db.HashExists(hashKey, "f1")); + + // multiple fields + db.HashSet(hashKey, entries); + var fieldResults = db.HashFieldGetAndDelete(hashKey, fields); + Assert.Equal(values, fieldResults); + Assert.False(db.HashExists(hashKey, "f1")); + Assert.False(db.HashExists(hashKey, "f2")); + } + + [Fact] + public async Task HashFieldGetAndDeleteAsync() + { + await using var conn = Create(require: RedisFeatures.v8_0_0_M04); + var db = conn.GetDatabase(); + var hashKey = Me(); + + // single field + db.HashSet(hashKey, entries); + var fieldResult = await db.HashFieldGetAndDeleteAsync(hashKey, "f1"); + Assert.Equal(1, fieldResult); + Assert.False(db.HashExists(hashKey, "f1")); + + // multiple fields + db.HashSet(hashKey, entries); + var fieldResults = await db.HashFieldGetAndDeleteAsync(hashKey, fields); + Assert.Equal(values, fieldResults); + Assert.False(db.HashExists(hashKey, "f1")); + Assert.False(db.HashExists(hashKey, "f2")); + } +} diff --git a/tests/StackExchange.Redis.Tests/HashTests.cs b/tests/StackExchange.Redis.Tests/HashTests.cs new file mode 100644 index 000000000..9523ca102 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/HashTests.cs @@ -0,0 +1,769 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +/// +/// Tests for . +/// +[RunPerProtocol] +public class HashTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + [Fact] + public async Task TestIncrBy() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + var key = Me(); + _ = db.KeyDeleteAsync(key).ForAwait(); + + const int iterations = 100; + var aTasks = new Task[iterations]; + var bTasks = new Task[iterations]; + for (int i = 1; i < iterations + 1; i++) + { + aTasks[i - 1] = db.HashIncrementAsync(key, "a", 1); + bTasks[i - 1] = db.HashIncrementAsync(key, "b", -1); + } + await Task.WhenAll(bTasks).ForAwait(); + for (int i = 1; i < iterations + 1; i++) + { + Assert.Equal(i, aTasks[i - 1].Result); + Assert.Equal(-i, bTasks[i - 1].Result); + } + } + + [Fact] + public async Task ScanAsync() + { + await using var conn = Create(require: RedisFeatures.v2_8_0); + + var db = conn.GetDatabase(); + var key = Me(); + await db.KeyDeleteAsync(key); + for (int i = 0; i < 200; i++) + { + await db.HashSetAsync(key, "key" + i, "value " + i); + } + + int count = 0; + // works for async + await foreach (var _ in db.HashScanAsync(key, pageSize: 20)) + { + count++; + } + Assert.Equal(200, count); + + // and sync=>async (via cast) + count = 0; + await foreach (var _ in (IAsyncEnumerable)db.HashScan(key, pageSize: 20)) + { + count++; + } + Assert.Equal(200, count); + + // and sync (native) + count = 0; + foreach (var _ in db.HashScan(key, pageSize: 20)) + { + count++; + } + Assert.Equal(200, count); + + // and async=>sync (via cast) + count = 0; + foreach (var _ in (IEnumerable)db.HashScanAsync(key, pageSize: 20)) + { + count++; + } + Assert.Equal(200, count); + } + + [Fact] + public async Task Scan() + { + await using var conn = Create(require: RedisFeatures.v2_8_0); + + var db = conn.GetDatabase(); + + var key = Me(); + _ = db.KeyDeleteAsync(key); + _ = db.HashSetAsync(key, "abc", "def"); + _ = db.HashSetAsync(key, "ghi", "jkl"); + _ = db.HashSetAsync(key, "mno", "pqr"); + + var t1 = db.HashScan(key); + var t2 = db.HashScan(key, "*h*"); + var t3 = db.HashScan(key); + var t4 = db.HashScan(key, "*h*"); + + var v1 = t1.ToArray(); + var v2 = t2.ToArray(); + var v3 = t3.ToArray(); + var v4 = t4.ToArray(); + + Assert.Equal(3, v1.Length); + Assert.Single(v2); + Assert.Equal(3, v3.Length); + Assert.Single(v4); + Array.Sort(v1, (x, y) => string.Compare(x.Name, y.Name)); + Array.Sort(v2, (x, y) => string.Compare(x.Name, y.Name)); + Array.Sort(v3, (x, y) => string.Compare(x.Name, y.Name)); + Array.Sort(v4, (x, y) => string.Compare(x.Name, y.Name)); + + Assert.Equal("abc=def,ghi=jkl,mno=pqr", string.Join(",", v1.Select(pair => pair.Name + "=" + pair.Value))); + Assert.Equal("ghi=jkl", string.Join(",", v2.Select(pair => pair.Name + "=" + pair.Value))); + Assert.Equal("abc=def,ghi=jkl,mno=pqr", string.Join(",", v3.Select(pair => pair.Name + "=" + pair.Value))); + Assert.Equal("ghi=jkl", string.Join(",", v4.Select(pair => pair.Name + "=" + pair.Value))); + } + + [Fact] + public async Task ScanNoValuesAsync() + { + await using var conn = Create(require: RedisFeatures.v7_4_0_rc1); + + var db = conn.GetDatabase(); + var key = Me(); + await db.KeyDeleteAsync(key); + for (int i = 0; i < 200; i++) + { + await db.HashSetAsync(key, "key" + i, "value " + i); + } + + int count = 0; + // works for async + await foreach (var _ in db.HashScanNoValuesAsync(key, pageSize: 20)) + { + count++; + } + Assert.Equal(200, count); + + // and sync=>async (via cast) + count = 0; + await foreach (var _ in (IAsyncEnumerable)db.HashScanNoValues(key, pageSize: 20)) + { + count++; + } + Assert.Equal(200, count); + + // and sync (native) + count = 0; + foreach (var _ in db.HashScanNoValues(key, pageSize: 20)) + { + count++; + } + Assert.Equal(200, count); + + // and async=>sync (via cast) + count = 0; + foreach (var _ in (IEnumerable)db.HashScanNoValuesAsync(key, pageSize: 20)) + { + count++; + } + Assert.Equal(200, count); + } + + [Fact] + public async Task ScanNoValues() + { + await using var conn = Create(require: RedisFeatures.v7_4_0_rc1); + + var db = conn.GetDatabase(); + + var key = Me(); + _ = db.KeyDeleteAsync(key); + _ = db.HashSetAsync(key, "abc", "def"); + _ = db.HashSetAsync(key, "ghi", "jkl"); + _ = db.HashSetAsync(key, "mno", "pqr"); + + var t1 = db.HashScanNoValues(key); + var t2 = db.HashScanNoValues(key, "*h*"); + var t3 = db.HashScanNoValues(key); + var t4 = db.HashScanNoValues(key, "*h*"); + + var v1 = t1.ToArray(); + var v2 = t2.ToArray(); + var v3 = t3.ToArray(); + var v4 = t4.ToArray(); + + Assert.Equal(3, v1.Length); + Assert.Single(v2); + Assert.Equal(3, v3.Length); + Assert.Single(v4); + + Array.Sort(v1); + Array.Sort(v2); + Array.Sort(v3); + Array.Sort(v4); + + Assert.Equal(new RedisValue[] { "abc", "ghi", "mno" }, v1); + Assert.Equal(new RedisValue[] { "ghi" }, v2); + Assert.Equal(new RedisValue[] { "abc", "ghi", "mno" }, v3); + Assert.Equal(new RedisValue[] { "ghi" }, v4); + } + + [Fact] + public async Task TestIncrementOnHashThatDoesntExist() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + _ = db.KeyDeleteAsync("keynotexist"); + var result1 = db.Wait(db.HashIncrementAsync("keynotexist", "fieldnotexist", 1)); + var result2 = db.Wait(db.HashIncrementAsync("keynotexist", "anotherfieldnotexist", 1)); + Assert.Equal(1, result1); + Assert.Equal(1, result2); + } + + [Fact] + public async Task TestIncrByFloat() + { + await using var conn = Create(require: RedisFeatures.v2_6_0); + + var db = conn.GetDatabase(); + var key = Me(); + _ = db.KeyDeleteAsync(key).ForAwait(); + var aTasks = new Task[1000]; + var bTasks = new Task[1000]; + for (int i = 1; i < 1001; i++) + { + aTasks[i - 1] = db.HashIncrementAsync(key, "a", 1.0); + bTasks[i - 1] = db.HashIncrementAsync(key, "b", -1.0); + } + await Task.WhenAll(bTasks).ForAwait(); + for (int i = 1; i < 1001; i++) + { + Assert.Equal(i, aTasks[i - 1].Result); + Assert.Equal(-i, bTasks[i - 1].Result); + } + } + + [Fact] + public async Task TestGetAll() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + var key = Me(); + await db.KeyDeleteAsync(key).ForAwait(); + var shouldMatch = new Dictionary(); + var random = new Random(); + + for (int i = 0; i < 1000; i++) + { + var guid = Guid.NewGuid(); + var value = random.Next(int.MaxValue); + + shouldMatch[guid] = value; + + _ = db.HashIncrementAsync(key, guid.ToString(), value); + } + + var inRedis = (await db.HashGetAllAsync(key).ForAwait()).ToDictionary( + x => Guid.Parse((string)x.Name!), x => int.Parse(x.Value!)); + + Assert.Equal(shouldMatch.Count, inRedis.Count); + + foreach (var k in shouldMatch.Keys) + { + Assert.Equal(shouldMatch[k], inRedis[k]); + } + } + + [Fact] + public async Task TestGet() + { + await using var conn = Create(); + + var key = Me(); + var db = conn.GetDatabase(); + var shouldMatch = new Dictionary(); + var random = new Random(); + + for (int i = 1; i < 1000; i++) + { + var guid = Guid.NewGuid(); + var value = random.Next(int.MaxValue); + + shouldMatch[guid] = value; + + _ = db.HashIncrementAsync(key, guid.ToString(), value); + } + + foreach (var k in shouldMatch.Keys) + { + var inRedis = await db.HashGetAsync(key, k.ToString()).ForAwait(); + var num = int.Parse(inRedis!); + + Assert.Equal(shouldMatch[k], num); + } + } + + /// + /// Tests for . + /// + [Fact] + public async Task TestSet() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + var hashkey = Me(); + var del = db.KeyDeleteAsync(hashkey).ForAwait(); + + var val0 = db.HashGetAsync(hashkey, "field").ForAwait(); + var set0 = db.HashSetAsync(hashkey, "field", "value1").ForAwait(); + var val1 = db.HashGetAsync(hashkey, "field").ForAwait(); + var set1 = db.HashSetAsync(hashkey, "field", "value2").ForAwait(); + var val2 = db.HashGetAsync(hashkey, "field").ForAwait(); + + var set2 = db.HashSetAsync(hashkey, "field-blob", Encoding.UTF8.GetBytes("value3")).ForAwait(); + var val3 = db.HashGetAsync(hashkey, "field-blob").ForAwait(); + + var set3 = db.HashSetAsync(hashkey, "empty_type1", "").ForAwait(); + var val4 = db.HashGetAsync(hashkey, "empty_type1").ForAwait(); + var set4 = db.HashSetAsync(hashkey, "empty_type2", RedisValue.EmptyString).ForAwait(); + var val5 = db.HashGetAsync(hashkey, "empty_type2").ForAwait(); + + await del; + Assert.Null((string?)(await val0)); + Assert.True(await set0); + Assert.Equal("value1", await val1); + Assert.False(await set1); + Assert.Equal("value2", await val2); + + Assert.True(await set2); + Assert.Equal("value3", await val3); + + Assert.True(await set3); + Assert.Equal("", await val4); + Assert.True(await set4); + Assert.Equal("", await val5); + } + + /// + /// Tests for . + /// + [Fact] + public async Task TestSetNotExists() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + var hashkey = Me(); + var del = db.KeyDeleteAsync(hashkey).ForAwait(); + + var val0 = db.HashGetAsync(hashkey, "field").ForAwait(); + var set0 = db.HashSetAsync(hashkey, "field", "value1", When.NotExists).ForAwait(); + var val1 = db.HashGetAsync(hashkey, "field").ForAwait(); + var set1 = db.HashSetAsync(hashkey, "field", "value2", When.NotExists).ForAwait(); + var val2 = db.HashGetAsync(hashkey, "field").ForAwait(); + + var set2 = db.HashSetAsync(hashkey, "field-blob", Encoding.UTF8.GetBytes("value3"), When.NotExists).ForAwait(); + var val3 = db.HashGetAsync(hashkey, "field-blob").ForAwait(); + var set3 = db.HashSetAsync(hashkey, "field-blob", Encoding.UTF8.GetBytes("value3"), When.NotExists).ForAwait(); + + await del; + Assert.Null((string?)(await val0)); + Assert.True(await set0); + Assert.Equal("value1", await val1); + Assert.False(await set1); + Assert.Equal("value1", await val2); + + Assert.True(await set2); + Assert.Equal("value3", await val3); + Assert.False(await set3); + } + + /// + /// Tests for . + /// + [Fact] + public async Task TestDelSingle() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + var hashkey = Me(); + await db.KeyDeleteAsync(hashkey).ForAwait(); + var del0 = db.HashDeleteAsync(hashkey, "field").ForAwait(); + + await db.HashSetAsync(hashkey, "field", "value").ForAwait(); + + var del1 = db.HashDeleteAsync(hashkey, "field").ForAwait(); + var del2 = db.HashDeleteAsync(hashkey, "field").ForAwait(); + + Assert.False(await del0); + Assert.True(await del1); + Assert.False(await del2); + } + + /// + /// Tests for . + /// + [Fact] + public async Task TestDelMulti() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + var hashkey = Me(); + db.HashSet(hashkey, "key1", "val1", flags: CommandFlags.FireAndForget); + db.HashSet(hashkey, "key2", "val2", flags: CommandFlags.FireAndForget); + db.HashSet(hashkey, "key3", "val3", flags: CommandFlags.FireAndForget); + + var s1 = db.HashExistsAsync(hashkey, "key1"); + var s2 = db.HashExistsAsync(hashkey, "key2"); + var s3 = db.HashExistsAsync(hashkey, "key3"); + + var removed = db.HashDeleteAsync(hashkey, ["key1", "key3"]); + + var d1 = db.HashExistsAsync(hashkey, "key1"); + var d2 = db.HashExistsAsync(hashkey, "key2"); + var d3 = db.HashExistsAsync(hashkey, "key3"); + + Assert.True(await s1); + Assert.True(await s2); + Assert.True(await s3); + + Assert.Equal(2, await removed); + + Assert.False(await d1); + Assert.True(await d2); + Assert.False(await d3); + + var removeFinal = db.HashDeleteAsync(hashkey, ["key2"]); + + Assert.Equal(0, await db.HashLengthAsync(hashkey).ForAwait()); + Assert.Equal(1, await removeFinal); + } + + /// + /// Tests for . + /// + [Fact] + public async Task TestDelMultiInsideTransaction() + { + await using var conn = Create(); + + var tran = conn.GetDatabase().CreateTransaction(); + { + var hashkey = Me(); + _ = tran.HashSetAsync(hashkey, "key1", "val1"); + _ = tran.HashSetAsync(hashkey, "key2", "val2"); + _ = tran.HashSetAsync(hashkey, "key3", "val3"); + + var s1 = tran.HashExistsAsync(hashkey, "key1"); + var s2 = tran.HashExistsAsync(hashkey, "key2"); + var s3 = tran.HashExistsAsync(hashkey, "key3"); + + var removed = tran.HashDeleteAsync(hashkey, ["key1", "key3"]); + + var d1 = tran.HashExistsAsync(hashkey, "key1"); + var d2 = tran.HashExistsAsync(hashkey, "key2"); + var d3 = tran.HashExistsAsync(hashkey, "key3"); + + tran.Execute(); + + Assert.True(await s1); + Assert.True(await s2); + Assert.True(await s3); + + Assert.Equal(2, await removed); + + Assert.False(await d1); + Assert.True(await d2); + Assert.False(await d3); + } + } + + /// + /// Tests for . + /// + [Fact] + public async Task TestExists() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + var hashkey = Me(); + _ = db.KeyDeleteAsync(hashkey).ForAwait(); + var ex0 = db.HashExistsAsync(hashkey, "field").ForAwait(); + _ = db.HashSetAsync(hashkey, "field", "value").ForAwait(); + var ex1 = db.HashExistsAsync(hashkey, "field").ForAwait(); + _ = db.HashDeleteAsync(hashkey, "field").ForAwait(); + _ = db.HashExistsAsync(hashkey, "field").ForAwait(); + + Assert.False(await ex0); + Assert.True(await ex1); + Assert.False(await ex0); + } + + /// + /// Tests for . + /// + [Fact] + public async Task TestHashKeys() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + var hashKey = Me(); + await db.KeyDeleteAsync(hashKey).ForAwait(); + + var keys0 = await db.HashKeysAsync(hashKey).ForAwait(); + Assert.Empty(keys0); + + await db.HashSetAsync(hashKey, "foo", "abc").ForAwait(); + await db.HashSetAsync(hashKey, "bar", "def").ForAwait(); + + var keys1 = db.HashKeysAsync(hashKey); + + var arr = await keys1; + Assert.Equal(2, arr.Length); + Assert.Equal("foo", arr[0]); + Assert.Equal("bar", arr[1]); + } + + /// + /// Tests for . + /// + [Fact] + public async Task TestHashValues() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + var hashkey = Me(); + await db.KeyDeleteAsync(hashkey).ForAwait(); + + var keys0 = await db.HashValuesAsync(hashkey).ForAwait(); + + await db.HashSetAsync(hashkey, "foo", "abc").ForAwait(); + await db.HashSetAsync(hashkey, "bar", "def").ForAwait(); + + var keys1 = db.HashValuesAsync(hashkey).ForAwait(); + + Assert.Empty(keys0); + + var arr = await keys1; + Assert.Equal(2, arr.Length); + Assert.Equal("abc", Encoding.UTF8.GetString(arr[0]!)); + Assert.Equal("def", Encoding.UTF8.GetString(arr[1]!)); + } + + /// + /// Tests for . + /// + [Fact] + public async Task TestHashLength() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + var hashkey = Me(); + db.KeyDelete(hashkey, CommandFlags.FireAndForget); + + var len0 = db.HashLengthAsync(hashkey); + + db.HashSet(hashkey, "foo", "abc", flags: CommandFlags.FireAndForget); + db.HashSet(hashkey, "bar", "def", flags: CommandFlags.FireAndForget); + + var len1 = db.HashLengthAsync(hashkey); + + Assert.Equal(0, await len0); + Assert.Equal(2, await len1); + } + + /// + /// Tests for . + /// + [Fact] + public async Task TestGetMulti() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + var hashkey = Me(); + db.KeyDelete(hashkey, CommandFlags.FireAndForget); + + RedisValue[] fields = ["foo", "bar", "blop"]; + var arr0 = await db.HashGetAsync(hashkey, fields).ForAwait(); + + db.HashSet(hashkey, "foo", "abc", flags: CommandFlags.FireAndForget); + db.HashSet(hashkey, "bar", "def", flags: CommandFlags.FireAndForget); + + var arr1 = await db.HashGetAsync(hashkey, fields).ForAwait(); + var arr2 = await db.HashGetAsync(hashkey, fields).ForAwait(); + + Assert.Equal(3, arr0.Length); + Assert.Null((string?)arr0[0]); + Assert.Null((string?)arr0[1]); + Assert.Null((string?)arr0[2]); + + Assert.Equal(3, arr1.Length); + Assert.Equal("abc", arr1[0]); + Assert.Equal("def", arr1[1]); + Assert.Null((string?)arr1[2]); + + Assert.Equal(3, arr2.Length); + Assert.Equal("abc", arr2[0]); + Assert.Equal("def", arr2[1]); + Assert.Null((string?)arr2[2]); + } + + /// + /// Tests for . + /// + [Fact] + public async Task TestGetPairs() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + var hashkey = Me(); + _ = db.KeyDeleteAsync(hashkey); + + var result0 = db.HashGetAllAsync(hashkey); + + _ = db.HashSetAsync(hashkey, "foo", "abc"); + _ = db.HashSetAsync(hashkey, "bar", "def"); + + var result1 = db.HashGetAllAsync(hashkey); + + Assert.Empty(conn.Wait(result0)); + var result = conn.Wait(result1).ToStringDictionary(); + Assert.Equal(2, result.Count); + Assert.Equal("abc", result["foo"]); + Assert.Equal("def", result["bar"]); + } + + /// + /// Tests for . + /// + [Fact] + public async Task TestSetPairs() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + var hashkey = Me(); + _ = db.KeyDeleteAsync(hashkey).ForAwait(); + + var result0 = db.HashGetAllAsync(hashkey); + + var data = new[] + { + new HashEntry("foo", Encoding.UTF8.GetBytes("abc")), + new HashEntry("bar", Encoding.UTF8.GetBytes("def")), + }; + _ = db.HashSetAsync(hashkey, data).ForAwait(); + + var result1 = db.Wait(db.HashGetAllAsync(hashkey)); + + Assert.Empty(result0.Result); + var result = result1.ToStringDictionary(); + Assert.Equal(2, result.Count); + Assert.Equal("abc", result["foo"]); + Assert.Equal("def", result["bar"]); + } + + [Fact] + public async Task TestWhenAlwaysAsync() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + var hashkey = Me(); + db.KeyDelete(hashkey, CommandFlags.FireAndForget); + + var result1 = await db.HashSetAsync(hashkey, "foo", "bar", When.Always, CommandFlags.None); + var result2 = await db.HashSetAsync(hashkey, "foo2", "bar", When.Always, CommandFlags.None); + var result3 = await db.HashSetAsync(hashkey, "foo", "bar", When.Always, CommandFlags.None); + var result4 = await db.HashSetAsync(hashkey, "foo", "bar2", When.Always, CommandFlags.None); + + Assert.True(result1, "Initial set key 1"); + Assert.True(result2, "Initial set key 2"); + // Fields modified *but not added* should be a zero/false. That's the behavior of HSET + Assert.False(result3, "Duplicate set key 1"); + Assert.False(result4, "Duplicate se key 1 variant"); + } + + [Fact] + public async Task HashRandomFieldAsync() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var hashKey = Me(); + var items = new HashEntry[] { new("new york", "yankees"), new("baltimore", "orioles"), new("boston", "red sox"), new("Tampa Bay", "rays"), new("Toronto", "blue jays") }; + await db.HashSetAsync(hashKey, items); + + var singleField = await db.HashRandomFieldAsync(hashKey); + var multiFields = await db.HashRandomFieldsAsync(hashKey, 3); + var withValues = await db.HashRandomFieldsWithValuesAsync(hashKey, 3); + Assert.Equal(3, multiFields.Length); + Assert.Equal(3, withValues.Length); + Assert.Contains(items, x => x.Name == singleField); + + foreach (var field in multiFields) + { + Assert.Contains(items, x => x.Name == field); + } + + foreach (var field in withValues) + { + Assert.Contains(items, x => x.Name == field.Name); + } + } + + [Fact] + public async Task HashRandomField() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var hashKey = Me(); + var items = new HashEntry[] { new("new york", "yankees"), new("baltimore", "orioles"), new("boston", "red sox"), new("Tampa Bay", "rays"), new("Toronto", "blue jays") }; + db.HashSet(hashKey, items); + + var singleField = db.HashRandomField(hashKey); + var multiFields = db.HashRandomFields(hashKey, 3); + var withValues = db.HashRandomFieldsWithValues(hashKey, 3); + Assert.Equal(3, multiFields.Length); + Assert.Equal(3, withValues.Length); + Assert.Contains(items, x => x.Name == singleField); + + foreach (var field in multiFields) + { + Assert.Contains(items, x => x.Name == field); + } + + foreach (var field in withValues) + { + Assert.Contains(items, x => x.Name == field.Name); + } + } + + [Fact] + public async Task HashRandomFieldEmptyHash() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var hashKey = Me(); + + var singleField = db.HashRandomField(hashKey); + var multiFields = db.HashRandomFields(hashKey, 3); + var withValues = db.HashRandomFieldsWithValues(hashKey, 3); + + Assert.Equal(RedisValue.Null, singleField); + Assert.Empty(multiFields); + Assert.Empty(withValues); + } +} diff --git a/tests/StackExchange.Redis.Tests/Hashes.cs b/tests/StackExchange.Redis.Tests/Hashes.cs deleted file mode 100644 index f1c37741c..000000000 --- a/tests/StackExchange.Redis.Tests/Hashes.cs +++ /dev/null @@ -1,599 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Text; -using System.Linq; -using Xunit; -using Xunit.Abstractions; -using System.Threading.Tasks; - -namespace StackExchange.Redis.Tests -{ - [Collection(SharedConnectionFixture.Key)] - public class Hashes : TestBase // https://redis.io/commands#hash - { - public Hashes(ITestOutputHelper output, SharedConnectionFixture fixture) : base(output, fixture) { } - - [Fact] - public async Task TestIncrBy() - { - using (var muxer = Create()) - { - var conn = muxer.GetDatabase(); - var key = Me(); - _ = conn.KeyDeleteAsync(key).ForAwait(); - - const int iterations = 100; - var aTasks = new Task[iterations]; - var bTasks = new Task[iterations]; - for (int i = 1; i < iterations + 1; i++) - { - aTasks[i - 1] = conn.HashIncrementAsync(key, "a", 1); - bTasks[i - 1] = conn.HashIncrementAsync(key, "b", -1); - } - await Task.WhenAll(bTasks).ForAwait(); - for (int i = 1; i < iterations + 1; i++) - { - Assert.Equal(i, aTasks[i - 1].Result); - Assert.Equal(-i, bTasks[i - 1].Result); - } - } - } - - [Fact] - public async Task ScanAsync() - { - using (var muxer = Create()) - { - Skip.IfMissingFeature(muxer, nameof(RedisFeatures.Scan), r => r.Scan); - var conn = muxer.GetDatabase(); - var key = Me(); - await conn.KeyDeleteAsync(key); - for(int i = 0; i < 200; i++) - { - await conn.HashSetAsync(key, "key" + i, "value " + i); - } - - int count = 0; - // works for async - await foreach(var _ in conn.HashScanAsync(key, pageSize: 20)) - { - count++; - } - Assert.Equal(200, count); - - // and sync=>async (via cast) - count = 0; - await foreach (var _ in (IAsyncEnumerable)conn.HashScan(key, pageSize: 20)) - { - count++; - } - Assert.Equal(200, count); - - // and sync (native) - count = 0; - foreach (var _ in conn.HashScan(key, pageSize: 20)) - { - count++; - } - Assert.Equal(200, count); - - // and async=>sync (via cast) - count = 0; - foreach (var _ in (IEnumerable)conn.HashScanAsync(key, pageSize: 20)) - { - count++; - } - Assert.Equal(200, count); - - } - } - - [Fact] - public void Scan() - { - using (var muxer = Create()) - { - Skip.IfMissingFeature(muxer, nameof(RedisFeatures.Scan), r => r.Scan); - var conn = muxer.GetDatabase(); - - var key = Me(); - conn.KeyDeleteAsync(key); - conn.HashSetAsync(key, "abc", "def"); - conn.HashSetAsync(key, "ghi", "jkl"); - conn.HashSetAsync(key, "mno", "pqr"); - - var t1 = conn.HashScan(key); - var t2 = conn.HashScan(key, "*h*"); - var t3 = conn.HashScan(key); - var t4 = conn.HashScan(key, "*h*"); - - var v1 = t1.ToArray(); - var v2 = t2.ToArray(); - var v3 = t3.ToArray(); - var v4 = t4.ToArray(); - - Assert.Equal(3, v1.Length); - Assert.Single(v2); - Assert.Equal(3, v3.Length); - Assert.Single(v4); - Array.Sort(v1, (x, y) => string.Compare(x.Name, y.Name)); - Array.Sort(v2, (x, y) => string.Compare(x.Name, y.Name)); - Array.Sort(v3, (x, y) => string.Compare(x.Name, y.Name)); - Array.Sort(v4, (x, y) => string.Compare(x.Name, y.Name)); - - Assert.Equal("abc=def,ghi=jkl,mno=pqr", string.Join(",", v1.Select(pair => pair.Name + "=" + pair.Value))); - Assert.Equal("ghi=jkl", string.Join(",", v2.Select(pair => pair.Name + "=" + pair.Value))); - Assert.Equal("abc=def,ghi=jkl,mno=pqr", string.Join(",", v3.Select(pair => pair.Name + "=" + pair.Value))); - Assert.Equal("ghi=jkl", string.Join(",", v4.Select(pair => pair.Name + "=" + pair.Value))); - } - } - - [Fact] - public void TestIncrementOnHashThatDoesntExist() - { - using (var muxer = Create()) - { - var conn = muxer.GetDatabase(); - conn.KeyDeleteAsync("keynotexist"); - var result1 = conn.Wait(conn.HashIncrementAsync("keynotexist", "fieldnotexist", 1)); - var result2 = conn.Wait(conn.HashIncrementAsync("keynotexist", "anotherfieldnotexist", 1)); - Assert.Equal(1, result1); - Assert.Equal(1, result2); - } - } - - [Fact] - public async Task TestIncrByFloat() - { - using (var muxer = Create()) - { - Skip.IfMissingFeature(muxer, nameof(RedisFeatures.IncrementFloat), r => r.IncrementFloat); - var conn = muxer.GetDatabase(); - var key = Me(); - _ = conn.KeyDeleteAsync(key).ForAwait(); - var aTasks = new Task[1000]; - var bTasks = new Task[1000]; - for (int i = 1; i < 1001; i++) - { - aTasks[i-1] = conn.HashIncrementAsync(key, "a", 1.0); - bTasks[i-1] = conn.HashIncrementAsync(key, "b", -1.0); - } - await Task.WhenAll(bTasks).ForAwait(); - for (int i = 1; i < 1001; i++) - { - Assert.Equal(i, aTasks[i-1].Result); - Assert.Equal(-i, bTasks[i-1].Result); - } - } - } - - [Fact] - public async Task TestGetAll() - { - using (var muxer = Create()) - { - var conn = muxer.GetDatabase(); - var key = Me(); - await conn.KeyDeleteAsync(key).ForAwait(); - var shouldMatch = new Dictionary(); - var random = new Random(); - - for (int i = 0; i < 1000; i++) - { - var guid = Guid.NewGuid(); - var value = random.Next(int.MaxValue); - - shouldMatch[guid] = value; - - _ = conn.HashIncrementAsync(key, guid.ToString(), value); - } - - var inRedis = (await conn.HashGetAllAsync(key).ForAwait()).ToDictionary( - x => Guid.Parse(x.Name), x => int.Parse(x.Value)); - - Assert.Equal(shouldMatch.Count, inRedis.Count); - - foreach (var k in shouldMatch.Keys) - { - Assert.Equal(shouldMatch[k], inRedis[k]); - } - } - } - - [Fact] - public async Task TestGet() - { - using (var muxer = Create()) - { - var key = Me(); - var conn = muxer.GetDatabase(); - var shouldMatch = new Dictionary(); - var random = new Random(); - - for (int i = 1; i < 1000; i++) - { - var guid = Guid.NewGuid(); - var value = random.Next(int.MaxValue); - - shouldMatch[guid] = value; - - _ = conn.HashIncrementAsync(key, guid.ToString(), value); - } - - foreach (var k in shouldMatch.Keys) - { - var inRedis = await conn.HashGetAsync(key, k.ToString()).ForAwait(); - var num = int.Parse(inRedis); - - Assert.Equal(shouldMatch[k], num); - } - } - } - - [Fact] - public async Task TestSet() // https://redis.io/commands/hset - { - using (var muxer = Create()) - { - var conn = muxer.GetDatabase(); - var hashkey = Me(); - var del = conn.KeyDeleteAsync(hashkey).ForAwait(); - - var val0 = conn.HashGetAsync(hashkey, "field").ForAwait(); - var set0 = conn.HashSetAsync(hashkey, "field", "value1").ForAwait(); - var val1 = conn.HashGetAsync(hashkey, "field").ForAwait(); - var set1 = conn.HashSetAsync(hashkey, "field", "value2").ForAwait(); - var val2 = conn.HashGetAsync(hashkey, "field").ForAwait(); - - var set2 = conn.HashSetAsync(hashkey, "field-blob", Encoding.UTF8.GetBytes("value3")).ForAwait(); - var val3 = conn.HashGetAsync(hashkey, "field-blob").ForAwait(); - - var set3 = conn.HashSetAsync(hashkey, "empty_type1", "").ForAwait(); - var val4 = conn.HashGetAsync(hashkey, "empty_type1").ForAwait(); - var set4 = conn.HashSetAsync(hashkey, "empty_type2", RedisValue.EmptyString).ForAwait(); - var val5 = conn.HashGetAsync(hashkey, "empty_type2").ForAwait(); - - await del; - Assert.Null((string)(await val0)); - Assert.True(await set0); - Assert.Equal("value1", await val1); - Assert.False(await set1); - Assert.Equal("value2", await val2); - - Assert.True(await set2); - Assert.Equal("value3", await val3); - - Assert.True(await set3); - Assert.Equal("", await val4); - Assert.True(await set4); - Assert.Equal("", await val5); - } - } - - [Fact] - public async Task TestSetNotExists() // https://redis.io/commands/hsetnx - { - using (var muxer = Create()) - { - var conn = muxer.GetDatabase(); - var hashkey = Me(); - var del = conn.KeyDeleteAsync(hashkey).ForAwait(); - - var val0 = conn.HashGetAsync(hashkey, "field").ForAwait(); - var set0 = conn.HashSetAsync(hashkey, "field", "value1", When.NotExists).ForAwait(); - var val1 = conn.HashGetAsync(hashkey, "field").ForAwait(); - var set1 = conn.HashSetAsync(hashkey, "field", "value2", When.NotExists).ForAwait(); - var val2 = conn.HashGetAsync(hashkey, "field").ForAwait(); - - var set2 = conn.HashSetAsync(hashkey, "field-blob", Encoding.UTF8.GetBytes("value3"), When.NotExists).ForAwait(); - var val3 = conn.HashGetAsync(hashkey, "field-blob").ForAwait(); - var set3 = conn.HashSetAsync(hashkey, "field-blob", Encoding.UTF8.GetBytes("value3"), When.NotExists).ForAwait(); - - await del; - Assert.Null((string)(await val0)); - Assert.True(await set0); - Assert.Equal("value1", await val1); - Assert.False(await set1); - Assert.Equal("value1", await val2); - - Assert.True(await set2); - Assert.Equal("value3", await val3); - Assert.False(await set3); - } - } - - [Fact] - public async Task TestDelSingle() // https://redis.io/commands/hdel - { - using (var muxer = Create()) - { - var conn = muxer.GetDatabase(); - var hashkey = Me(); - await conn.KeyDeleteAsync(hashkey).ForAwait(); - var del0 = conn.HashDeleteAsync(hashkey, "field").ForAwait(); - - await conn.HashSetAsync(hashkey, "field", "value").ForAwait(); - - var del1 = conn.HashDeleteAsync(hashkey, "field").ForAwait(); - var del2 = conn.HashDeleteAsync(hashkey, "field").ForAwait(); - - Assert.False(await del0); - Assert.True(await del1); - Assert.False(await del2); - } - } - - [Fact] - public async Task TestDelMulti() // https://redis.io/commands/hdel - { - using (var muxer = Create()) - { - var conn = muxer.GetDatabase(); - var hashkey = Me(); - conn.HashSet(hashkey, "key1", "val1", flags: CommandFlags.FireAndForget); - conn.HashSet(hashkey, "key2", "val2", flags: CommandFlags.FireAndForget); - conn.HashSet(hashkey, "key3", "val3", flags: CommandFlags.FireAndForget); - - var s1 = conn.HashExistsAsync(hashkey, "key1"); - var s2 = conn.HashExistsAsync(hashkey, "key2"); - var s3 = conn.HashExistsAsync(hashkey, "key3"); - - var removed = conn.HashDeleteAsync(hashkey, new RedisValue[] { "key1", "key3" }); - - var d1 = conn.HashExistsAsync(hashkey, "key1"); - var d2 = conn.HashExistsAsync(hashkey, "key2"); - var d3 = conn.HashExistsAsync(hashkey, "key3"); - - Assert.True(await s1); - Assert.True(await s2); - Assert.True(await s3); - - Assert.Equal(2, await removed); - - Assert.False(await d1); - Assert.True(await d2); - Assert.False(await d3); - - var removeFinal = conn.HashDeleteAsync(hashkey, new RedisValue[] { "key2" }); - - Assert.Equal(0, await conn.HashLengthAsync(hashkey).ForAwait()); - Assert.Equal(1, await removeFinal); - } - } - - [Fact] - public async Task TestDelMultiInsideTransaction() // https://redis.io/commands/hdel - { - using (var outer = Create()) - { - var conn = outer.GetDatabase().CreateTransaction(); - { - var hashkey = Me(); - _ = conn.HashSetAsync(hashkey, "key1", "val1"); - _ = conn.HashSetAsync(hashkey, "key2", "val2"); - _ = conn.HashSetAsync(hashkey, "key3", "val3"); - - var s1 = conn.HashExistsAsync(hashkey, "key1"); - var s2 = conn.HashExistsAsync(hashkey, "key2"); - var s3 = conn.HashExistsAsync(hashkey, "key3"); - - var removed = conn.HashDeleteAsync(hashkey, new RedisValue[] { "key1", "key3" }); - - var d1 = conn.HashExistsAsync(hashkey, "key1"); - var d2 = conn.HashExistsAsync(hashkey, "key2"); - var d3 = conn.HashExistsAsync(hashkey, "key3"); - - conn.Execute(); - - Assert.True(await s1); - Assert.True(await s2); - Assert.True(await s3); - - Assert.Equal(2, await removed); - - Assert.False(await d1); - Assert.True(await d2); - Assert.False(await d3); - } - } - } - - [Fact] - public async Task TestExists() // https://redis.io/commands/hexists - { - using (var muxer = Create()) - { - var conn = muxer.GetDatabase(); - var hashkey = Me(); - _ = conn.KeyDeleteAsync(hashkey).ForAwait(); - var ex0 = conn.HashExistsAsync(hashkey, "field").ForAwait(); - _ = conn.HashSetAsync(hashkey, "field", "value").ForAwait(); - var ex1 = conn.HashExistsAsync(hashkey, "field").ForAwait(); - _ = conn.HashDeleteAsync(hashkey, "field").ForAwait(); - _ = conn.HashExistsAsync(hashkey, "field").ForAwait(); - - Assert.False(await ex0); - Assert.True(await ex1); - Assert.False(await ex0); - } - } - - [Fact] - public async Task TestHashKeys() // https://redis.io/commands/hkeys - { - using (var muxer = Create()) - { - var conn = muxer.GetDatabase(); - var hashKey = Me(); - await conn.KeyDeleteAsync(hashKey).ForAwait(); - - var keys0 = await conn.HashKeysAsync(hashKey).ForAwait(); - Assert.Empty(keys0); - - await conn.HashSetAsync(hashKey, "foo", "abc").ForAwait(); - await conn.HashSetAsync(hashKey, "bar", "def").ForAwait(); - - var keys1 = conn.HashKeysAsync(hashKey); - - var arr = await keys1; - Assert.Equal(2, arr.Length); - Assert.Equal("foo", arr[0]); - Assert.Equal("bar", arr[1]); - } - } - - [Fact] - public async Task TestHashValues() // https://redis.io/commands/hvals - { - using (var muxer = Create()) - { - var conn = muxer.GetDatabase(); - var hashkey = Me(); - await conn.KeyDeleteAsync(hashkey).ForAwait(); - - var keys0 = await conn.HashValuesAsync(hashkey).ForAwait(); - - await conn.HashSetAsync(hashkey, "foo", "abc").ForAwait(); - await conn.HashSetAsync(hashkey, "bar", "def").ForAwait(); - - var keys1 = conn.HashValuesAsync(hashkey).ForAwait(); - - Assert.Empty(keys0); - - var arr = await keys1; - Assert.Equal(2, arr.Length); - Assert.Equal("abc", Encoding.UTF8.GetString(arr[0])); - Assert.Equal("def", Encoding.UTF8.GetString(arr[1])); - } - } - - [Fact] - public async Task TestHashLength() // https://redis.io/commands/hlen - { - using (var muxer = Create()) - { - var conn = muxer.GetDatabase(); - var hashkey = Me(); - conn.KeyDelete(hashkey, CommandFlags.FireAndForget); - - var len0 = conn.HashLengthAsync(hashkey); - - conn.HashSet(hashkey, "foo", "abc", flags: CommandFlags.FireAndForget); - conn.HashSet(hashkey, "bar", "def", flags: CommandFlags.FireAndForget); - - var len1 = conn.HashLengthAsync(hashkey); - - Assert.Equal(0, await len0); - Assert.Equal(2, await len1); - } - } - - [Fact] - public async Task TestGetMulti() // https://redis.io/commands/hmget - { - using (var muxer = Create()) - { - var conn = muxer.GetDatabase(); - var hashkey = Me(); - conn.KeyDelete(hashkey, CommandFlags.FireAndForget); - - RedisValue[] fields = { "foo", "bar", "blop" }; - var arr0 = await conn.HashGetAsync(hashkey, fields).ForAwait(); - - conn.HashSet(hashkey, "foo", "abc", flags: CommandFlags.FireAndForget); - conn.HashSet(hashkey, "bar", "def", flags: CommandFlags.FireAndForget); - - var arr1 = await conn.HashGetAsync(hashkey, fields).ForAwait(); - var arr2 = await conn.HashGetAsync(hashkey, fields).ForAwait(); - - Assert.Equal(3, arr0.Length); - Assert.Null((string)arr0[0]); - Assert.Null((string)arr0[1]); - Assert.Null((string)arr0[2]); - - Assert.Equal(3, arr1.Length); - Assert.Equal("abc", arr1[0]); - Assert.Equal("def", arr1[1]); - Assert.Null((string)arr1[2]); - - Assert.Equal(3, arr2.Length); - Assert.Equal("abc", arr2[0]); - Assert.Equal("def", arr2[1]); - Assert.Null((string)arr2[2]); - } - } - - [Fact] - public void TestGetPairs() // https://redis.io/commands/hgetall - { - using (var muxer = Create()) - { - var conn = muxer.GetDatabase(); - var hashkey = Me(); - conn.KeyDeleteAsync(hashkey); - - var result0 = conn.HashGetAllAsync(hashkey); - - conn.HashSetAsync(hashkey, "foo", "abc"); - conn.HashSetAsync(hashkey, "bar", "def"); - - var result1 = conn.HashGetAllAsync(hashkey); - - Assert.Empty(muxer.Wait(result0)); - var result = muxer.Wait(result1).ToStringDictionary(); - Assert.Equal(2, result.Count); - Assert.Equal("abc", result["foo"]); - Assert.Equal("def", result["bar"]); - } - } - - [Fact] - public void TestSetPairs() // https://redis.io/commands/hmset - { - using (var muxer = Create()) - { - var conn = muxer.GetDatabase(); - var hashkey = Me(); - conn.KeyDeleteAsync(hashkey).ForAwait(); - - var result0 = conn.HashGetAllAsync(hashkey); - - var data = new [] { - new HashEntry("foo", Encoding.UTF8.GetBytes("abc")), - new HashEntry("bar", Encoding.UTF8.GetBytes("def")) - }; - conn.HashSetAsync(hashkey, data).ForAwait(); - - var result1 = conn.Wait(conn.HashGetAllAsync(hashkey)); - - Assert.Empty(result0.Result); - var result = result1.ToStringDictionary(); - Assert.Equal(2, result.Count); - Assert.Equal("abc", result["foo"]); - Assert.Equal("def", result["bar"]); - } - } - - [Fact] - public async Task TestWhenAlwaysAsync() - { - using (var muxer = Create()) - { - var conn = muxer.GetDatabase(); - var hashkey = Me(); - conn.KeyDelete(hashkey, CommandFlags.FireAndForget); - - var result1 = await conn.HashSetAsync(hashkey, "foo", "bar", When.Always, CommandFlags.None); - var result2 = await conn.HashSetAsync(hashkey, "foo2", "bar", When.Always, CommandFlags.None); - var result3 = await conn.HashSetAsync(hashkey, "foo", "bar", When.Always, CommandFlags.None); - var result4 = await conn.HashSetAsync(hashkey, "foo", "bar2", When.Always, CommandFlags.None); - - Assert.True(result1, "Initial set key 1"); - Assert.True(result2, "Initial set key 2"); - // Fields modified *but not added* should be a zero/false. That's the behavior of HSET - Assert.False(result3, "Duplicate set key 1"); - Assert.False(result4, "Duplicate se key 1 variant"); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/HeartbeatTests.cs b/tests/StackExchange.Redis.Tests/HeartbeatTests.cs new file mode 100644 index 000000000..4de271f9a --- /dev/null +++ b/tests/StackExchange.Redis.Tests/HeartbeatTests.cs @@ -0,0 +1,46 @@ +using System; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +[RunPerProtocol] +public class HeartbeatTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + [Fact] + public async Task TestAutomaticHeartbeat() + { + RedisValue oldTimeout = RedisValue.Null; + await using var configConn = Create(allowAdmin: true); + + try + { + configConn.GetDatabase(); + var srv = GetAnyPrimary(configConn); + oldTimeout = srv.ConfigGet("timeout")[0].Value; + Log("Old Timeout: " + oldTimeout); + srv.ConfigSet("timeout", 3); + + await using var innerConn = Create(); + var innerDb = innerConn.GetDatabase(); + await innerDb.PingAsync(); // need to wait to pick up configuration etc + + var before = innerConn.OperationCount; + + Log("sleeping to test heartbeat..."); + await Task.Delay(TimeSpan.FromSeconds(5)).ForAwait(); + + var after = innerConn.OperationCount; + Assert.True(after >= before + 1, $"after: {after}, before: {before}"); + } + finally + { + if (!oldTimeout.IsNull) + { + Log("Resetting old timeout: " + oldTimeout); + var srv = GetAnyPrimary(configConn); + srv.ConfigSet("timeout", oldTimeout); + } + } + } +} diff --git a/tests/StackExchange.Redis.Tests/Helpers/Attributes.cs b/tests/StackExchange.Redis.Tests/Helpers/Attributes.cs index 33d8d3631..a3386e80c 100644 --- a/tests/StackExchange.Redis.Tests/Helpers/Attributes.cs +++ b/tests/StackExchange.Redis.Tests/Helpers/Attributes.cs @@ -1,185 +1,215 @@ using System; using System.Collections.Generic; +using System.Globalization; using System.Linq; +using System.Reflection; +using System.Runtime.CompilerServices; using System.Threading; using System.Threading.Tasks; -using Xunit.Abstractions; +using Xunit; +using Xunit.Internal; using Xunit.Sdk; - -namespace StackExchange.Redis.Tests +using Xunit.v3; + +#pragma warning disable SA1402 // File may only contain a single type +#pragma warning disable SA1502 // Element should not be on a single line +#pragma warning disable SA1649 // File name should match first type name +#pragma warning disable IDE0130 // Namespace does not match folder structure +namespace StackExchange.Redis.Tests; + +/// +/// Override for that truncates our DisplayName down. +/// +/// Attribute that is applied to a method to indicate that it is a fact that should +/// be run by the test runner. It can also be extended to support a customized definition +/// of a test method. +/// +/// +[AttributeUsage(AttributeTargets.Method, AllowMultiple = false)] +[XunitTestCaseDiscoverer(typeof(FactDiscoverer))] +public class FactAttribute([CallerFilePath] string? sourceFilePath = null, [CallerLineNumber] int sourceLineNumber = -1) : Xunit.FactAttribute(sourceFilePath, sourceLineNumber) { } + +/// +/// Override for that truncates our DisplayName down. +/// +/// Marks a test method as being a data theory. Data theories are tests which are +/// fed various bits of data from a data source, mapping to parameters on the test +/// method. If the data source contains multiple rows, then the test method is executed +/// multiple times (once with each data row). Data is provided by attributes which +/// derive from Xunit.Sdk.DataAttribute (notably, Xunit.InlineDataAttribute and Xunit.MemberDataAttribute). +/// +/// +[AttributeUsage(AttributeTargets.Method, AllowMultiple = false)] +[XunitTestCaseDiscoverer(typeof(TheoryDiscoverer))] +public class TheoryAttribute([CallerFilePath] string? sourceFilePath = null, [CallerLineNumber] int sourceLineNumber = -1) : Xunit.TheoryAttribute(sourceFilePath, sourceLineNumber) { } + +public class FactDiscoverer : Xunit.v3.FactDiscoverer { - /// - /// Override for that truncates our DisplayName down. - /// - /// Attribute that is applied to a method to indicate that it is a fact that should - /// be run by the test runner. It can also be extended to support a customized definition - /// of a test method. - /// - /// - [AttributeUsage(AttributeTargets.Method, AllowMultiple = false)] - [XunitTestCaseDiscoverer("StackExchange.Redis.Tests.FactDiscoverer", "StackExchange.Redis.Tests")] - public class FactAttribute : Xunit.FactAttribute - { - } - - [AttributeUsage(AttributeTargets.Method, AllowMultiple = false)] - public class FactLongRunningAttribute : FactAttribute - { - public override string Skip - { - get => TestConfig.Current.RunLongRunning ? base.Skip : "Config.RunLongRunning is false - skipping long test."; - set => base.Skip = value; - } - } + public override ValueTask> Discover(ITestFrameworkDiscoveryOptions discoveryOptions, IXunitTestMethod testMethod, IFactAttribute factAttribute) + => base.Discover(discoveryOptions, testMethod, factAttribute).ExpandAsync(); +} - /// - /// Override for that truncates our DisplayName down. - /// - /// Marks a test method as being a data theory. Data theories are tests which are - /// fed various bits of data from a data source, mapping to parameters on the test - /// method. If the data source contains multiple rows, then the test method is executed - /// multiple times (once with each data row). Data is provided by attributes which - /// derive from Xunit.Sdk.DataAttribute (notably, Xunit.InlineDataAttribute and Xunit.MemberDataAttribute). - /// - /// - [AttributeUsage(AttributeTargets.Method, AllowMultiple = false)] - [XunitTestCaseDiscoverer("StackExchange.Redis.Tests.TheoryDiscoverer", "StackExchange.Redis.Tests")] - public class TheoryAttribute : Xunit.TheoryAttribute { } +public class TheoryDiscoverer : Xunit.v3.TheoryDiscoverer +{ + protected override ValueTask> CreateTestCasesForDataRow(ITestFrameworkDiscoveryOptions discoveryOptions, IXunitTestMethod testMethod, ITheoryAttribute theoryAttribute, ITheoryDataRow dataRow, object?[] testMethodArguments) + => base.CreateTestCasesForDataRow(discoveryOptions, testMethod, theoryAttribute, dataRow, testMethodArguments).ExpandAsync(); - [AttributeUsage(AttributeTargets.Method, AllowMultiple = false)] - public class TheoryLongRunningAttribute : Xunit.TheoryAttribute - { - public override string Skip - { - get => TestConfig.Current.RunLongRunning ? base.Skip : "Config.RunLongRunning is false - skipping long test."; - set => base.Skip = value; - } - } + protected override ValueTask> CreateTestCasesForTheory(ITestFrameworkDiscoveryOptions discoveryOptions, IXunitTestMethod testMethod, ITheoryAttribute theoryAttribute) + => base.CreateTestCasesForTheory(discoveryOptions, testMethod, theoryAttribute).ExpandAsync(); +} - public class FactDiscoverer : Xunit.Sdk.FactDiscoverer - { - public FactDiscoverer(IMessageSink diagnosticMessageSink) : base(diagnosticMessageSink) { } +[AttributeUsage(AttributeTargets.Class | AttributeTargets.Method, AllowMultiple = false)] +public class RunPerProtocol() : Attribute { } - protected override IXunitTestCase CreateTestCase(ITestFrameworkDiscoveryOptions discoveryOptions, ITestMethod testMethod, IAttributeInfo factAttribute) - => new SkippableTestCase(DiagnosticMessageSink, discoveryOptions.MethodDisplayOrDefault(), discoveryOptions.MethodDisplayOptionsOrDefault(), testMethod); - } +public interface IProtocolTestCase +{ + RedisProtocol Protocol { get; } +} - public class TheoryDiscoverer : Xunit.Sdk.TheoryDiscoverer +public class ProtocolTestCase : XunitTestCase, IProtocolTestCase +{ + public RedisProtocol Protocol { get; private set; } + + [Obsolete("Called by the de-serializer; should only be called by deriving classes for de-serialization purposes")] + public ProtocolTestCase() { } + + public ProtocolTestCase(XunitTestCase testCase, RedisProtocol protocol) : base( + testMethod: testCase.TestMethod, + testCaseDisplayName: $"{testCase.TestCaseDisplayName.Replace("StackExchange.Redis.Tests.", "")} ({protocol.GetString()})", + uniqueID: testCase.UniqueID + protocol.GetString(), + @explicit: testCase.Explicit, + skipExceptions: testCase.SkipExceptions, + skipReason: testCase.SkipReason, + skipType: testCase.SkipType, + skipUnless: testCase.SkipUnless, + skipWhen: testCase.SkipWhen, + traits: testCase.TestMethod.Traits.ToReadWrite(StringComparer.OrdinalIgnoreCase), + testMethodArguments: testCase.TestMethodArguments, + sourceFilePath: testCase.SourceFilePath, + sourceLineNumber: testCase.SourceLineNumber, + timeout: testCase.Timeout) + => Protocol = protocol; + + protected override void Serialize(IXunitSerializationInfo data) { - public TheoryDiscoverer(IMessageSink diagnosticMessageSink) : base(diagnosticMessageSink) { } - - protected override IEnumerable CreateTestCasesForDataRow(ITestFrameworkDiscoveryOptions discoveryOptions, ITestMethod testMethod, IAttributeInfo theoryAttribute, object[] dataRow) - => new[] { new SkippableTestCase(DiagnosticMessageSink, discoveryOptions.MethodDisplayOrDefault(), discoveryOptions.MethodDisplayOptionsOrDefault(), testMethod, dataRow) }; - - protected override IEnumerable CreateTestCasesForSkip(ITestFrameworkDiscoveryOptions discoveryOptions, ITestMethod testMethod, IAttributeInfo theoryAttribute, string skipReason) - => new[] { new SkippableTestCase(DiagnosticMessageSink, discoveryOptions.MethodDisplayOrDefault(), discoveryOptions.MethodDisplayOptionsOrDefault(), testMethod) }; - - protected override IEnumerable CreateTestCasesForTheory(ITestFrameworkDiscoveryOptions discoveryOptions, ITestMethod testMethod, IAttributeInfo theoryAttribute) - => new[] { new SkippableTheoryTestCase(DiagnosticMessageSink, discoveryOptions.MethodDisplayOrDefault(), discoveryOptions.MethodDisplayOptionsOrDefault(), testMethod) }; - - protected override IEnumerable CreateTestCasesForSkippedDataRow(ITestFrameworkDiscoveryOptions discoveryOptions, ITestMethod testMethod, IAttributeInfo theoryAttribute, object[] dataRow, string skipReason) - => new[] { new NamedSkippedDataRowTestCase(DiagnosticMessageSink, discoveryOptions.MethodDisplayOrDefault(), discoveryOptions.MethodDisplayOptionsOrDefault(), testMethod, skipReason, dataRow) }; + base.Serialize(data); + data.AddValue("resp", (int)Protocol); } - public class SkippableTestCase : XunitTestCase + protected override void Deserialize(IXunitSerializationInfo data) { - protected override string GetDisplayName(IAttributeInfo factAttribute, string displayName) => - base.GetDisplayName(factAttribute, displayName).StripName(); - - [Obsolete("Called by the de-serializer; should only be called by deriving classes for de-serialization purposes")] - public SkippableTestCase() { } - - public SkippableTestCase(IMessageSink diagnosticMessageSink, TestMethodDisplay defaultMethodDisplay, TestMethodDisplayOptions defaultMethodDisplayOptions, ITestMethod testMethod, object[] testMethodArguments = null) - : base(diagnosticMessageSink, defaultMethodDisplay, defaultMethodDisplayOptions, testMethod, testMethodArguments) - { - } - - public override async Task RunAsync( - IMessageSink diagnosticMessageSink, - IMessageBus messageBus, - object[] constructorArguments, - ExceptionAggregator aggregator, - CancellationTokenSource cancellationTokenSource) - { - var skipMessageBus = new SkippableMessageBus(messageBus); - var result = await base.RunAsync(diagnosticMessageSink, skipMessageBus, constructorArguments, aggregator, cancellationTokenSource).ForAwait(); - return result.Update(skipMessageBus); - } + base.Deserialize(data); + Protocol = (RedisProtocol)data.GetValue("resp"); } +} - public class SkippableTheoryTestCase : XunitTheoryTestCase +public class ProtocolDelayEnumeratedTestCase : XunitDelayEnumeratedTheoryTestCase, IProtocolTestCase +{ + public RedisProtocol Protocol { get; private set; } + + [Obsolete("Called by the de-serializer; should only be called by deriving classes for de-serialization purposes")] + public ProtocolDelayEnumeratedTestCase() { } + + public ProtocolDelayEnumeratedTestCase(XunitDelayEnumeratedTheoryTestCase testCase, RedisProtocol protocol) : base( + testMethod: testCase.TestMethod, + testCaseDisplayName: $"{testCase.TestCaseDisplayName.Replace("StackExchange.Redis.Tests.", "")} ({protocol.GetString()})", + uniqueID: testCase.UniqueID + protocol.GetString(), + @explicit: testCase.Explicit, + skipTestWithoutData: testCase.SkipTestWithoutData, + skipExceptions: testCase.SkipExceptions, + skipReason: testCase.SkipReason, + skipType: testCase.SkipType, + skipUnless: testCase.SkipUnless, + skipWhen: testCase.SkipWhen, + traits: testCase.TestMethod.Traits.ToReadWrite(StringComparer.OrdinalIgnoreCase), + sourceFilePath: testCase.SourceFilePath, + sourceLineNumber: testCase.SourceLineNumber, + timeout: testCase.Timeout) + => Protocol = protocol; + + protected override void Serialize(IXunitSerializationInfo data) { - protected override string GetDisplayName(IAttributeInfo factAttribute, string displayName) => - base.GetDisplayName(factAttribute, displayName).StripName(); - - [Obsolete("Called by the de-serializer; should only be called by deriving classes for de-serialization purposes")] - public SkippableTheoryTestCase() { } - - public SkippableTheoryTestCase(IMessageSink diagnosticMessageSink, TestMethodDisplay defaultMethodDisplay, TestMethodDisplayOptions defaultMethodDisplayOptions, ITestMethod testMethod) - : base(diagnosticMessageSink, defaultMethodDisplay, defaultMethodDisplayOptions, testMethod) { } - - public override async Task RunAsync( - IMessageSink diagnosticMessageSink, - IMessageBus messageBus, - object[] constructorArguments, - ExceptionAggregator aggregator, - CancellationTokenSource cancellationTokenSource) - { - var skipMessageBus = new SkippableMessageBus(messageBus); - var result = await base.RunAsync(diagnosticMessageSink, skipMessageBus, constructorArguments, aggregator, cancellationTokenSource).ForAwait(); - return result.Update(skipMessageBus); - } + base.Serialize(data); + data.AddValue("resp", (int)Protocol); } - public class NamedSkippedDataRowTestCase : XunitSkippedDataRowTestCase + protected override void Deserialize(IXunitSerializationInfo data) { - protected override string GetDisplayName(IAttributeInfo factAttribute, string displayName) => - base.GetDisplayName(factAttribute, displayName).StripName(); - - [Obsolete("Called by the de-serializer; should only be called by deriving classes for de-serialization purposes")] - public NamedSkippedDataRowTestCase() { } - - public NamedSkippedDataRowTestCase(IMessageSink diagnosticMessageSink, TestMethodDisplay defaultMethodDisplay, TestMethodDisplayOptions defaultMethodDisplayOptions, ITestMethod testMethod, string skipReason, object[] testMethodArguments = null) - : base(diagnosticMessageSink, defaultMethodDisplay, defaultMethodDisplayOptions, testMethod, skipReason, testMethodArguments) { } + base.Deserialize(data); + Protocol = (RedisProtocol)data.GetValue("resp"); } +} - public class SkippableMessageBus : IMessageBus +internal static class XUnitExtensions +{ + public static async ValueTask> ExpandAsync(this ValueTask> discovery) { - private readonly IMessageBus InnerBus; - public SkippableMessageBus(IMessageBus innerBus) => InnerBus = innerBus; - - public int DynamicallySkippedTestCount { get; private set; } - - public void Dispose() { } - - public bool QueueMessage(IMessageSinkMessage message) + static IXunitTestCase CreateTestCase(XunitTestCase tc, RedisProtocol protocol) => tc switch + { + XunitDelayEnumeratedTheoryTestCase delayed => new ProtocolDelayEnumeratedTestCase(delayed, protocol), + _ => new ProtocolTestCase(tc, protocol), + }; + var testCases = await discovery; + List result = []; + foreach (var testCase in testCases.OfType()) { - if (message is ITestFailed testFailed) + var testMethod = testCase.TestMethod; + + if ((testMethod.Method.GetCustomAttributes(typeof(RunPerProtocol)).FirstOrDefault() + ?? testMethod.TestClass.Class.GetCustomAttributes(typeof(RunPerProtocol)).FirstOrDefault()) is RunPerProtocol) + { + result.Add(CreateTestCase(testCase, RedisProtocol.Resp2)); + result.Add(CreateTestCase(testCase, RedisProtocol.Resp3)); + } + else { - var exceptionType = testFailed.ExceptionTypes.FirstOrDefault(); - if (exceptionType == typeof(SkipTestException).FullName) - { - DynamicallySkippedTestCount++; - return InnerBus.QueueMessage(new TestSkipped(testFailed.Test, testFailed.Messages.FirstOrDefault())); - } + // Default to RESP2 everywhere else + result.Add(CreateTestCase(testCase, RedisProtocol.Resp2)); } - return InnerBus.QueueMessage(message); } + return result; } +} + +/// +/// Supports changing culture for the duration of a single test. +/// and with another culture. +/// +/// +/// Based on: https://bartwullems.blogspot.com/2022/03/xunit-change-culture-during-your-test.html. +/// Replaces the culture and UI culture of the current thread with . +/// +/// The name of the culture. +[AttributeUsage(AttributeTargets.Class | AttributeTargets.Method, AllowMultiple = false, Inherited = true)] +public class TestCultureAttribute(string culture) : BeforeAfterTestAttribute +{ + private readonly CultureInfo culture = new CultureInfo(culture, false); + private CultureInfo? originalCulture; - internal static class XUnitExtensions + /// + /// Stores the current and + /// and replaces them with the new cultures defined in the constructor. + /// + /// The method under test. + /// The current . + public override void Before(MethodInfo methodUnderTest, IXunitTest test) { - internal static string StripName(this string name) => - name.Replace("StackExchange.Redis.Tests.", ""); + originalCulture = Thread.CurrentThread.CurrentCulture; + Thread.CurrentThread.CurrentCulture = culture; + CultureInfo.CurrentCulture.ClearCachedData(); + } - public static RunSummary Update(this RunSummary summary, SkippableMessageBus bus) + /// + /// Restores the original to . + /// + /// The method under test. + /// The current . + public override void After(MethodInfo methodUnderTest, IXunitTest test) + { + if (originalCulture is not null) { - if (bus.DynamicallySkippedTestCount > 0) - { - summary.Failed -= bus.DynamicallySkippedTestCount; - summary.Skipped += bus.DynamicallySkippedTestCount; - } - return summary; + Thread.CurrentThread.CurrentCulture = originalCulture; + CultureInfo.CurrentCulture.ClearCachedData(); } } } diff --git a/tests/StackExchange.Redis.Tests/Helpers/Extensions.cs b/tests/StackExchange.Redis.Tests/Helpers/Extensions.cs index ffde26249..6f776d268 100644 --- a/tests/StackExchange.Redis.Tests/Helpers/Extensions.cs +++ b/tests/StackExchange.Redis.Tests/Helpers/Extensions.cs @@ -1,30 +1,24 @@ using System; +using System.Collections.Generic; using System.Runtime.InteropServices; -using Xunit.Abstractions; +using Xunit; -namespace StackExchange.Redis.Tests.Helpers +namespace StackExchange.Redis.Tests.Helpers; + +public static class Extensions { - public static class Extensions + private static string VersionInfo { get; } + + static Extensions() { - private static string VersionInfo { get; } + VersionInfo = $"Running under {RuntimeInformation.FrameworkDescription} ({Environment.Version})"; + } - static Extensions() - { -#if NET462 - VersionInfo = "Compiled under .NET 4.6.2"; -#else - VersionInfo = $"Running under {RuntimeInformation.FrameworkDescription} ({Environment.Version})"; -#endif - try - { - VersionInfo += "\n Running on: " + RuntimeInformation.OSDescription; - } - catch (Exception) - { - VersionInfo += "\n Failed to get OS version"; - } - } + public static void WriteFrameworkVersion(this ITestOutputHelper output) => output.WriteLine(VersionInfo); - public static void WriteFrameworkVersion(this ITestOutputHelper output) => output.WriteLine(VersionInfo); + public static ConfigurationOptions WithoutSubscriptions(this ConfigurationOptions options) + { + options.CommandMap = CommandMap.Create(new HashSet() { nameof(RedisCommand.SUBSCRIBE) }, available: false); + return options; } } diff --git a/tests/StackExchange.Redis.Tests/Helpers/InProcServerFixture.cs b/tests/StackExchange.Redis.Tests/Helpers/InProcServerFixture.cs new file mode 100644 index 000000000..9f5a5a59b --- /dev/null +++ b/tests/StackExchange.Redis.Tests/Helpers/InProcServerFixture.cs @@ -0,0 +1,30 @@ +using System; +using StackExchange.Redis.Configuration; +using Xunit; + +[assembly: AssemblyFixture(typeof(StackExchange.Redis.Tests.InProcServerFixture))] + +// ReSharper disable once CheckNamespace +namespace StackExchange.Redis.Tests; + +public class InProcServerFixture : IDisposable +{ + private readonly InProcessTestServer _server = new(); + private readonly ConfigurationOptions _config; + public InProcServerFixture() + { + _config = _server.GetClientConfig(); + Configuration = _config.ToString(); + } + + public ConfigurationOptions Config => _config; + + public string Configuration { get; } + + public Tunnel? Tunnel => _server.Tunnel; + + public void Dispose() + { + try { _server.Dispose(); } catch { } + } +} diff --git a/tests/StackExchange.Redis.Tests/Helpers/NonParallelCollection.cs b/tests/StackExchange.Redis.Tests/Helpers/NonParallelCollection.cs index 3932a34e1..ef623c337 100644 --- a/tests/StackExchange.Redis.Tests/Helpers/NonParallelCollection.cs +++ b/tests/StackExchange.Redis.Tests/Helpers/NonParallelCollection.cs @@ -1,10 +1,9 @@ using Xunit; -namespace StackExchange.Redis.Tests +namespace StackExchange.Redis.Tests; + +[CollectionDefinition(Name, DisableParallelization = true)] +public static class NonParallelCollection { - [CollectionDefinition(Name, DisableParallelization = true)] - public static class NonParallelCollection - { - public const string Name = "NonParallel"; - } + public const string Name = "NonParallel"; } diff --git a/tests/StackExchange.Redis.Tests/Helpers/SharedConnectionFixture.cs b/tests/StackExchange.Redis.Tests/Helpers/SharedConnectionFixture.cs new file mode 100644 index 000000000..9656ee45b --- /dev/null +++ b/tests/StackExchange.Redis.Tests/Helpers/SharedConnectionFixture.cs @@ -0,0 +1,290 @@ +using System; +using System.Collections.Concurrent; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Net; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Threading.Tasks; +using StackExchange.Redis.Maintenance; +using StackExchange.Redis.Profiling; +using Xunit; + +[assembly: AssemblyFixture(typeof(StackExchange.Redis.Tests.SharedConnectionFixture))] + +namespace StackExchange.Redis.Tests; + +public class SharedConnectionFixture : IDisposable +{ + public bool IsEnabled { get; } + + private readonly ConnectionMultiplexer _actualConnection; + public string Configuration { get; } + + public SharedConnectionFixture() + { + IsEnabled = TestConfig.Current.UseSharedConnection; + Configuration = TestBase.GetDefaultConfiguration(); + _actualConnection = TestBase.CreateDefault( + output: null, + clientName: nameof(SharedConnectionFixture), + configuration: Configuration, + allowAdmin: true); + _actualConnection.InternalError += OnInternalError; + _actualConnection.ConnectionFailed += OnConnectionFailed; + } + + private NonDisposingConnection? resp2, resp3; + internal IInternalConnectionMultiplexer GetConnection(TestBase obj, RedisProtocol protocol, [CallerMemberName] string caller = "") + { + Version? require = protocol == RedisProtocol.Resp3 ? RedisFeatures.v6_0_0 : null; + lock (this) + { + ref NonDisposingConnection? field = ref protocol == RedisProtocol.Resp3 ? ref resp3 : ref resp2; + if (field is { IsConnected: false }) + { + // abandon memoized connection if disconnected + var muxer = field.UnderlyingMultiplexer; + field = null; + muxer.Dispose(); + } + return field ??= VerifyAndWrap(obj.Create(protocol: protocol, require: require, caller: caller, shared: false, allowAdmin: true), protocol); + } + + static NonDisposingConnection VerifyAndWrap(IInternalConnectionMultiplexer muxer, RedisProtocol protocol) + { + var ep = muxer.GetEndPoints().FirstOrDefault(); + Assert.NotNull(ep); + var server = muxer.GetServer(ep); + server.Ping(); + var sep = muxer.GetServerEndPoint(ep); + if (sep.Protocol is null) + { + throw new InvalidOperationException("No RESP protocol; this means no connection?"); + } + Assert.Equal(protocol, sep.Protocol); + Assert.Equal(protocol, server.Protocol); + return new NonDisposingConnection(muxer); + } + } + + internal sealed class NonDisposingConnection(IInternalConnectionMultiplexer inner) : IInternalConnectionMultiplexer + { + public IInternalConnectionMultiplexer UnderlyingConnection => _inner; + + public bool AllowConnect + { + get => _inner.AllowConnect; + set => _inner.AllowConnect = value; + } + + public bool IgnoreConnect + { + get => _inner.IgnoreConnect; + set => _inner.IgnoreConnect = value; + } + + public ServerSelectionStrategy ServerSelectionStrategy => _inner.ServerSelectionStrategy; + + public ServerEndPoint GetServerEndPoint(EndPoint endpoint) => _inner.GetServerEndPoint(endpoint); + + public ReadOnlySpan GetServerSnapshot() => _inner.GetServerSnapshot(); + + public ConnectionMultiplexer UnderlyingMultiplexer => _inner.UnderlyingMultiplexer; + + private readonly IInternalConnectionMultiplexer _inner = inner; + + public int GetSubscriptionsCount() => _inner.GetSubscriptionsCount(); + public ConcurrentDictionary GetSubscriptions() => _inner.GetSubscriptions(); + + public void AddLibraryNameSuffix(string suffix) => _inner.AddLibraryNameSuffix(suffix); + + public string ClientName => _inner.ClientName; + + public string Configuration => _inner.Configuration; + + public int TimeoutMilliseconds => _inner.TimeoutMilliseconds; + + public long OperationCount => _inner.OperationCount; + +#pragma warning disable CS0618 // Type or member is obsolete + public bool PreserveAsyncOrder { get => false; set { } } +#pragma warning restore CS0618 + + public bool IsConnected => _inner.IsConnected; + + public bool IsConnecting => _inner.IsConnecting; + + public ConfigurationOptions RawConfig => _inner.RawConfig; + + public bool IncludeDetailInExceptions { get => _inner.RawConfig.IncludeDetailInExceptions; set => _inner.RawConfig.IncludeDetailInExceptions = value; } + + public int StormLogThreshold { get => _inner.StormLogThreshold; set => _inner.StormLogThreshold = value; } + + public event EventHandler ErrorMessage + { + add => _inner.ErrorMessage += value; + remove => _inner.ErrorMessage -= value; + } + + public event EventHandler ConnectionFailed + { + add => _inner.ConnectionFailed += value; + remove => _inner.ConnectionFailed -= value; + } + + public event EventHandler InternalError + { + add => _inner.InternalError += value; + remove => _inner.InternalError -= value; + } + + public event EventHandler ConnectionRestored + { + add => _inner.ConnectionRestored += value; + remove => _inner.ConnectionRestored -= value; + } + + public event EventHandler ConfigurationChanged + { + add => _inner.ConfigurationChanged += value; + remove => _inner.ConfigurationChanged -= value; + } + + public event EventHandler ConfigurationChangedBroadcast + { + add => _inner.ConfigurationChangedBroadcast += value; + remove => _inner.ConfigurationChangedBroadcast -= value; + } + + public event EventHandler HashSlotMoved + { + add => _inner.HashSlotMoved += value; + remove => _inner.HashSlotMoved -= value; + } + + public event EventHandler ServerMaintenanceEvent + { + add => _inner.ServerMaintenanceEvent += value; + remove => _inner.ServerMaintenanceEvent -= value; + } + + public void Close(bool allowCommandsToComplete = true) => _inner.Close(allowCommandsToComplete); + + public Task CloseAsync(bool allowCommandsToComplete = true) => _inner.CloseAsync(allowCommandsToComplete); + + public bool Configure(TextWriter? log = null) => _inner.Configure(log); + + public Task ConfigureAsync(TextWriter? log = null) => _inner.ConfigureAsync(log); + + public void Dispose() { } // DO NOT call _inner.Dispose(); + + public ValueTask DisposeAsync() => default; // DO NOT call _inner.DisposeAsync(); + + public ServerCounters GetCounters() => _inner.GetCounters(); + + public IDatabase GetDatabase(int db = -1, object? asyncState = null) => _inner.GetDatabase(db, asyncState); + + public EndPoint[] GetEndPoints(bool configuredOnly = false) => _inner.GetEndPoints(configuredOnly); + + public int GetHashSlot(RedisKey key) => _inner.GetHashSlot(key); + + public IServer GetServer(string host, int port, object? asyncState = null) => _inner.GetServer(host, port, asyncState); + + public IServer GetServer(string hostAndPort, object? asyncState = null) => _inner.GetServer(hostAndPort, asyncState); + + public IServer GetServer(IPAddress host, int port) => _inner.GetServer(host, port); + + public IServer GetServer(EndPoint endpoint, object? asyncState = null) => _inner.GetServer(endpoint, asyncState); + public IServer GetServer(RedisKey key, object? asyncState = null, CommandFlags flags = CommandFlags.None) => _inner.GetServer(key, asyncState, flags); + public IServer[] GetServers() => _inner.GetServers(); + + public string GetStatus() => _inner.GetStatus(); + + public void GetStatus(TextWriter log) => _inner.GetStatus(log); + + public string? GetStormLog() => _inner.GetStormLog(); + + public ISubscriber GetSubscriber(object? asyncState = null) => _inner.GetSubscriber(asyncState); + + public int HashSlot(RedisKey key) => _inner.HashSlot(key); + + public long PublishReconfigure(CommandFlags flags = CommandFlags.None) => _inner.PublishReconfigure(flags); + + public Task PublishReconfigureAsync(CommandFlags flags = CommandFlags.None) => _inner.PublishReconfigureAsync(flags); + + public void RegisterProfiler(Func profilingSessionProvider) => _inner.RegisterProfiler(profilingSessionProvider); + + public void ResetStormLog() => _inner.ResetStormLog(); + + public void Wait(Task task) => _inner.Wait(task); + + public T Wait(Task task) => _inner.Wait(task); + + public void WaitAll(params Task[] tasks) => _inner.WaitAll(tasks); + + public void ExportConfiguration(Stream destination, ExportOptions options = ExportOptions.All) + => _inner.ExportConfiguration(destination, options); + + public override string ToString() => _inner.ToString(); + long? IInternalConnectionMultiplexer.GetConnectionId(EndPoint endPoint, ConnectionType type) + => _inner.GetConnectionId(endPoint, type); + } + + public void Dispose() + { + resp2?.UnderlyingConnection?.Dispose(); + resp3?.UnderlyingConnection?.Dispose(); + GC.SuppressFinalize(this); + } + + protected void OnInternalError(object? sender, InternalErrorEventArgs e) + { + Interlocked.Increment(ref privateFailCount); + lock (privateExceptions) + { + privateExceptions.Add(TestBase.Time() + ": Internal error: " + e.Origin + ", " + EndPointCollection.ToString(e.EndPoint) + "/" + e.ConnectionType); + } + } + protected void OnConnectionFailed(object? sender, ConnectionFailedEventArgs e) + { + Interlocked.Increment(ref privateFailCount); + lock (privateExceptions) + { + privateExceptions.Add($"{TestBase.Time()}: Connection failed ({e.FailureType}): {EndPointCollection.ToString(e.EndPoint)}/{e.ConnectionType}: {e.Exception}"); + } + } + private readonly List privateExceptions = []; + private int privateFailCount; + + public void Teardown(TextWriter output) + { + var innerPrivateFailCount = Interlocked.Exchange(ref privateFailCount, 0); + if (innerPrivateFailCount != 0) + { + lock (privateExceptions) + { + foreach (var item in privateExceptions.Take(5)) + { + TestBase.Log(output, item); + } + privateExceptions.Clear(); + } + // Assert.True(false, $"There were {privateFailCount} private ambient exceptions."); + } + + if (_actualConnection != null) + { + TestBase.Log(output, "Connection Counts: " + _actualConnection.GetCounters().ToString()); + foreach (var ep in _actualConnection.GetServerSnapshot()) + { + var interactive = ep.GetBridge(ConnectionType.Interactive); + TestBase.Log(output, $" {Format.ToString(interactive)}: {interactive?.GetStatus()}"); + + var subscription = ep.GetBridge(ConnectionType.Subscription); + TestBase.Log(output, $" {Format.ToString(subscription)}: {subscription?.GetStatus()}"); + } + } + } +} diff --git a/tests/StackExchange.Redis.Tests/Helpers/Skip.cs b/tests/StackExchange.Redis.Tests/Helpers/Skip.cs index d09e5b6d7..72d62a3dc 100644 --- a/tests/StackExchange.Redis.Tests/Helpers/Skip.cs +++ b/tests/StackExchange.Redis.Tests/Helpers/Skip.cs @@ -1,53 +1,29 @@ using System; -using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; +using Xunit; -namespace StackExchange.Redis.Tests +namespace StackExchange.Redis.Tests; + +public static class Skip { - public static class Skip + public static void UnlessLongRunning() { - public static void Inconclusive(string message) => throw new SkipTestException(message); - - public static void IfNoConfig(string prop, string value) - { - if (string.IsNullOrEmpty(value)) - { - throw new SkipTestException($"Config.{prop} is not set, skipping test."); - } - } - - public static void IfNoConfig(string prop, List values) - { - if (values == null || values.Count == 0) - { - throw new SkipTestException($"Config.{prop} is not set, skipping test."); - } - } - - public static void IfMissingFeature(IConnectionMultiplexer conn, string feature, Func check) - { - var features = conn.GetServer(conn.GetEndPoints()[0]).Features; - if (!check(features)) - { - throw new SkipTestException($"'{feature}' is not supported on this server.") - { - MissingFeatures = feature - }; - } - } - - internal static void IfMissingDatabase(IConnectionMultiplexer conn, int dbId) - { - var dbCount = conn.GetServer(conn.GetEndPoints()[0]).DatabaseCount; - if (dbId >= dbCount) throw new SkipTestException($"Database '{dbId}' is not supported on this server."); - } + Assert.SkipUnless(TestConfig.Current.RunLongRunning, "Skipping long-running test"); } -#pragma warning disable RCS1194 // Implement exception constructors. - public class SkipTestException : Exception + public static void IfNoConfig(string prop, [NotNull] string? value) { - public string MissingFeatures { get; set; } + Assert.SkipWhen(value.IsNullOrEmpty(), $"Config.{prop} is not set, skipping test."); + } - public SkipTestException(string reason) : base(reason) { } + internal static void IfMissingDatabase(IConnectionMultiplexer conn, int dbId) + { + var dbCount = conn.GetServer(conn.GetEndPoints()[0]).DatabaseCount; + Assert.SkipWhen(dbId >= dbCount, $"Database '{dbId}' is not supported on this server."); } -#pragma warning restore RCS1194 // Implement exception constructors. +} + +public class SkipTestException(string reason) : Exception(reason) +{ + public string? MissingFeatures { get; set; } } diff --git a/tests/StackExchange.Redis.Tests/Helpers/TestConfig.cs b/tests/StackExchange.Redis.Tests/Helpers/TestConfig.cs index ec6c21ee4..c0194d5a6 100644 --- a/tests/StackExchange.Redis.Tests/Helpers/TestConfig.cs +++ b/tests/StackExchange.Redis.Tests/Helpers/TestConfig.cs @@ -1,109 +1,134 @@ -using System.IO; -using System; -using Newtonsoft.Json; +using System; +using System.IO; +using System.Linq; +using System.Net.Sockets; using System.Threading; +using Newtonsoft.Json; + +namespace StackExchange.Redis.Tests; -namespace StackExchange.Redis.Tests +public static class TestConfig { - public static class TestConfig - { - private const string FileName = "TestConfig.json"; + private const string FileName = "RedisTestConfig.json"; - public static Config Current { get; } + public static Config Current { get; } - private static int _db = 17; - public static int GetDedicatedDB(IConnectionMultiplexer conn = null) - { - int db = Interlocked.Increment(ref _db); - if (conn != null) Skip.IfMissingDatabase(conn, db); - return db; - } +#if NET + private static int _db = 17; +#else + private static int _db = 77; +#endif + public static int GetDedicatedDB(IConnectionMultiplexer? conn = null) + { + int db = Interlocked.Increment(ref _db); + if (conn != null) Skip.IfMissingDatabase(conn, db); + return db; + } - static TestConfig() + static TestConfig() + { + Current = new Config(); + try { - Current = new Config(); - try + using (var stream = typeof(TestConfig).Assembly.GetManifestResourceStream("StackExchange.Redis.Tests." + FileName)) { - using (var stream = typeof(TestConfig).Assembly.GetManifestResourceStream("StackExchange.Redis.Tests." + FileName)) + if (stream != null) { - if (stream != null) + using (var reader = new StreamReader(stream)) { - using (var reader = new StreamReader(stream)) - { - Current = JsonConvert.DeserializeObject(reader.ReadToEnd()); - } + Current = JsonConvert.DeserializeObject(reader.ReadToEnd()) ?? new Config(); } } } - catch (Exception ex) - { - Console.WriteLine("Error Deserializing TestConfig.json: " + ex); - } } + catch (Exception ex) + { + Console.WriteLine("Error Deserializing TestConfig.json: " + ex); + } + } - public class Config + public static bool IsServerRunning(string? host, int port) + { + if (host.IsNullOrEmpty()) + { + return false; + } + + try + { + using var client = new TcpClient(host, port); + return true; + } + catch (SocketException) { - public bool UseSharedConnection { get; set; } = true; - public bool RunLongRunning { get; set; } - public bool LogToConsole { get; set; } - - public string MasterServer { get; set; } = "127.0.0.1"; - public int MasterPort { get; set; } = 6379; - public string MasterServerAndPort => MasterServer + ":" + MasterPort.ToString(); - - public string ReplicaServer { get; set; } = "127.0.0.1"; - public int ReplicaPort { get; set; } = 6380; - public string ReplicaServerAndPort => ReplicaServer + ":" + ReplicaPort.ToString(); - - public string SecureServer { get; set; } = "127.0.0.1"; - public int SecurePort { get; set; } = 6381; - public string SecurePassword { get; set; } = "changeme"; - public string SecureServerAndPort => SecureServer + ":" + SecurePort.ToString(); - - // Separate servers for failover tests, so they don't wreak havoc on all others - public string FailoverMasterServer { get; set; } = "127.0.0.1"; - public int FailoverMasterPort { get; set; } = 6382; - public string FailoverMasterServerAndPort => FailoverMasterServer + ":" + FailoverMasterPort.ToString(); - - public string FailoverReplicaServer { get; set; } = "127.0.0.1"; - public int FailoverReplicaPort { get; set; } = 6383; - public string FailoverReplicaServerAndPort => FailoverReplicaServer + ":" + FailoverReplicaPort.ToString(); - - public string RediSearchServer { get; set; } = "127.0.0.1"; - public int RediSearchPort { get; set; } = 6385; - public string RediSearchServerAndPort => RediSearchServer + ":" + RediSearchPort.ToString(); - - public string IPv4Server { get; set; } = "127.0.0.1"; - public int IPv4Port { get; set; } = 6379; - public string IPv6Server { get; set; } = "::1"; - public int IPv6Port { get; set; } = 6379; - - public string RemoteServer { get; set; } = "127.0.0.1"; - public int RemotePort { get; set; } = 6379; - public string RemoteServerAndPort => RemoteServer + ":" + RemotePort.ToString(); - - public string SentinelServer { get; set; } = "127.0.0.1"; - public int SentinelPortA { get; set; } = 26379; - public int SentinelPortB { get; set; } = 26380; - public int SentinelPortC { get; set; } = 26381; - public string SentinelSeviceName { get; set; } = "mymaster"; - - public string ClusterServer { get; set; } = "127.0.0.1"; - public int ClusterStartPort { get; set; } = 7000; - public int ClusterServerCount { get; set; } = 6; - - public string SslServer { get; set; } - public int SslPort { get; set; } - - public string RedisLabsSslServer { get; set; } - public int RedisLabsSslPort { get; set; } = 6379; - public string RedisLabsPfxPath { get; set; } - - public string AzureCacheServer { get; set; } - public string AzureCachePassword { get; set; } - - public string SSDBServer { get; set; } - public int SSDBPort { get; set; } = 8888; + return false; } } + + public class Config + { + public bool UseSharedConnection { get; set; } = true; + public bool RunLongRunning { get; set; } + + public string PrimaryServer { get; set; } = "127.0.0.1"; + public int PrimaryPort { get; set; } = 6379; + public string PrimaryServerAndPort => PrimaryServer + ":" + PrimaryPort.ToString(); + + public string ReplicaServer { get; set; } = "127.0.0.1"; + public int ReplicaPort { get; set; } = 6380; + public string ReplicaServerAndPort => ReplicaServer + ":" + ReplicaPort.ToString(); + + public string SecureServer { get; set; } = "127.0.0.1"; + public int SecurePort { get; set; } = 6381; + public string SecurePassword { get; set; } = "changeme"; + public string SecureServerAndPort => SecureServer + ":" + SecurePort.ToString(); + + // Separate servers for failover tests, so they don't wreak havoc on all others + public string FailoverPrimaryServer { get; set; } = "127.0.0.1"; + public int FailoverPrimaryPort { get; set; } = 6382; + public string FailoverPrimaryServerAndPort => FailoverPrimaryServer + ":" + FailoverPrimaryPort.ToString(); + + public string FailoverReplicaServer { get; set; } = "127.0.0.1"; + public int FailoverReplicaPort { get; set; } = 6383; + public string FailoverReplicaServerAndPort => FailoverReplicaServer + ":" + FailoverReplicaPort.ToString(); + + public string IPv4Server { get; set; } = "127.0.0.1"; + public int IPv4Port { get; set; } = 6379; + public string IPv6Server { get; set; } = "::1"; + public int IPv6Port { get; set; } = 6379; + + public string RemoteServer { get; set; } = "127.0.0.1"; + public int RemotePort { get; set; } = 6379; + public string RemoteServerAndPort => RemoteServer + ":" + RemotePort.ToString(); + + public string SentinelServer { get; set; } = "127.0.0.1"; + public int SentinelPortA { get; set; } = 26379; + public int SentinelPortB { get; set; } = 26380; + public int SentinelPortC { get; set; } = 26381; + public string SentinelSeviceName { get; set; } = "myprimary"; + + public string ClusterServer { get; set; } = "127.0.0.1"; + public int ClusterStartPort { get; set; } = 7000; + public int ClusterServerCount { get; set; } = 6; + public string ClusterServersAndPorts => string.Join(",", Enumerable.Range(ClusterStartPort, ClusterServerCount).Select(port => ClusterServer + ":" + port)); + + public string? SslServer { get; set; } = "127.0.0.1"; + public int SslPort { get; set; } = 6384; + public string SslServerAndPort => SslServer + ":" + SslPort.ToString(); + + public string? RedisLabsSslServer { get; set; } + public int RedisLabsSslPort { get; set; } = 6379; + public string? RedisLabsPfxPath { get; set; } + + public string? AzureCacheServer { get; set; } + public string? AzureCachePassword { get; set; } + + public string? SSDBServer { get; set; } + public int SSDBPort { get; set; } = 8888; + + public string ProxyServer { get; set; } = "127.0.0.1"; + public int ProxyPort { get; set; } = 7015; + + public string ProxyServerAndPort => ProxyServer + ":" + ProxyPort.ToString(); + } } diff --git a/tests/StackExchange.Redis.Tests/Helpers/TestExtensions.cs b/tests/StackExchange.Redis.Tests/Helpers/TestExtensions.cs new file mode 100644 index 000000000..aab965f98 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/Helpers/TestExtensions.cs @@ -0,0 +1,35 @@ +using StackExchange.Redis.Profiling; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public static class TestExtensions +{ + public static ProfilingSession AddProfiler(this IConnectionMultiplexer mutex) + { + var session = new ProfilingSession(); + mutex.RegisterProfiler(() => session); + return session; + } + + public static RedisProtocol GetProtocol(this ITestContext context) => + context.Test?.TestCase is IProtocolTestCase protocolTestCase + ? protocolTestCase.Protocol : RedisProtocol.Resp2; + + public static bool IsResp2(this ITestContext context) => GetProtocol(context) == RedisProtocol.Resp2; + public static bool IsResp3(this ITestContext context) => GetProtocol(context) == RedisProtocol.Resp3; + + public static string KeySuffix(this ITestContext context) => GetProtocol(context) switch + { + RedisProtocol.Resp2 => "R2", + RedisProtocol.Resp3 => "R3", + _ => "", + }; + + public static string GetString(this RedisProtocol protocol) => protocol switch + { + RedisProtocol.Resp2 => "RESP2", + RedisProtocol.Resp3 => "RESP3", + _ => "UnknownProtocolFixMeeeeee", + }; +} diff --git a/tests/StackExchange.Redis.Tests/Helpers/TextWriterOutputHelper.cs b/tests/StackExchange.Redis.Tests/Helpers/TextWriterOutputHelper.cs index 8067147f5..2a23f3246 100644 --- a/tests/StackExchange.Redis.Tests/Helpers/TextWriterOutputHelper.cs +++ b/tests/StackExchange.Redis.Tests/Helpers/TextWriterOutputHelper.cs @@ -1,76 +1,103 @@ using System; using System.IO; using System.Text; -using Xunit.Abstractions; +using Xunit; -namespace StackExchange.Redis.Tests.Helpers +namespace StackExchange.Redis.Tests.Helpers; + +public class TextWriterOutputHelper(ITestOutputHelper outputHelper) : TextWriter { - public class TextWriterOutputHelper : TextWriter + private readonly StringBuilder _buffer = new(2048); + private StringBuilder? Echo { get; set; } + public override Encoding Encoding => Encoding.UTF8; + private readonly ITestOutputHelper Output = outputHelper; + + public void EchoTo(StringBuilder sb) => Echo = sb; + + public void WriteLineNoTime(string? value) { - private StringBuilder Buffer { get; } = new StringBuilder(2048); - private StringBuilder Echo { get; set; } - public override Encoding Encoding => Encoding.UTF8; - private readonly ITestOutputHelper Output; - private readonly bool ToConsole; - public TextWriterOutputHelper(ITestOutputHelper outputHelper, bool echoToConsole) + try { - Output = outputHelper; - ToConsole = echoToConsole; + base.WriteLine(value); } + catch (Exception ex) + { + Console.Write("Attempted to write: "); + Console.WriteLine(value); + Console.WriteLine(ex); + } + } - public void EchoTo(StringBuilder sb) => Echo = sb; + public override void WriteLine(string? value) + { + if (value is null) + { + return; + } - public override void WriteLine(string value) + try { - try + lock (_buffer) // keep everything together { - base.Write(TestBase.Time()); - base.Write(": "); base.WriteLine(value); } - catch (Exception ex) - { - Console.Write("Attempted to write: "); - Console.WriteLine(value); - Console.WriteLine(ex); - } } + catch (Exception ex) + { + Console.Write("Attempted to write: "); + Console.WriteLine(value); + Console.WriteLine(ex); + } + } - public override void Write(char value) + public override void Write(char value) + { + lock (_buffer) { if (value == '\n' || value == '\r') { // Ignore empty lines - if (Buffer.Length > 0) + if (_buffer.Length > 0) { FlushBuffer(); } } else { - Buffer.Append(value); + _buffer.Append(value); } } + } - protected override void Dispose(bool disposing) + protected override void Dispose(bool disposing) + { + lock (_buffer) { - if (Buffer.Length > 0) + if (_buffer.Length > 0) { FlushBuffer(); } - base.Dispose(disposing); } - private void FlushBuffer() + base.Dispose(disposing); + } + + private void FlushBuffer() + { + string text; + lock (_buffer) + { + text = _buffer.ToString(); + _buffer.Clear(); + } + try { - var text = Buffer.ToString(); Output.WriteLine(text); - Echo?.AppendLine(text); - if (ToConsole) - { - Console.WriteLine(text); - } - Buffer.Clear(); } + catch (InvalidOperationException) + { + // Thrown when writing from a handler after a test has ended - just bail in this case + } + Echo?.AppendLine(text); } } diff --git a/tests/StackExchange.Redis.Tests/Helpers/redis-sharp.cs b/tests/StackExchange.Redis.Tests/Helpers/redis-sharp.cs index c17adf77e..557562b71 100644 --- a/tests/StackExchange.Redis.Tests/Helpers/redis-sharp.cs +++ b/tests/StackExchange.Redis.Tests/Helpers/redis-sharp.cs @@ -9,8 +9,9 @@ // // Copyright 2010 Novell, Inc. // -// Licensed under the same terms of reddis: new BSD license. +// Licensed under the same terms of Redis: new BSD license. // +#nullable disable using System; using System.Collections.Generic; @@ -22,39 +23,32 @@ namespace RedisSharp { - public class Redis : IDisposable + public class Redis(string host, int port) : IDisposable { private Socket socket; private BufferedStream bstream; public enum KeyType { - None, String, List, Set + None, + String, + List, + Set, } -#pragma warning disable RCS1194 // Implement exception constructors. - public class ResponseException : Exception + public class ResponseException(string code) : Exception("Response error") { - public string Code { get; } - public ResponseException(string code) : base("Response error") => Code = code; - } -#pragma warning restore RCS1194 // Implement exception constructors. - - public Redis(string host, int port) - { - Host = host ?? throw new ArgumentNullException(nameof(host)); - Port = port; - SendTimeout = -1; + public string Code { get; } = code; } public Redis(string host) : this(host, 6379) { } public Redis() : this("localhost", 6379) { } - public string Host { get; } - public int Port { get; } + public string Host { get; } = host ?? throw new ArgumentNullException(nameof(host)); + public int Port { get; } = port; public int RetryTimeout { get; set; } public int RetryCount { get; set; } - public int SendTimeout { get; set; } + public int SendTimeout { get; set; } = -1; public string Password { get; set; } private int db; @@ -218,7 +212,7 @@ private void Connect() socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp) { NoDelay = true, - SendTimeout = SendTimeout + SendTimeout = SendTimeout, }; socket.Connect(Host, Port); if (!socket.Connected) @@ -233,7 +227,7 @@ private void Connect() SendExpectSuccess("AUTH {0}\r\n", Password); } - private readonly byte[] end_data = new byte[] { (byte)'\r', (byte)'\n' }; + private readonly byte[] endData = [(byte)'\r', (byte)'\n']; private bool SendDataCommand(byte[] data, string cmd, params object[] args) { @@ -251,7 +245,7 @@ private bool SendDataCommand(byte[] data, string cmd, params object[] args) if (data != null) { socket.Send(data); - socket.Send(end_data); + socket.Send(endData); } } catch (SocketException) @@ -291,7 +285,7 @@ private bool SendCommand(string cmd, params object[] args) } [Conditional("DEBUG")] - private void Log(string fmt, params object[] args) + private static void Log(string fmt, params object[] args) { Console.WriteLine("{0}", string.Format(fmt, args).Trim()); } @@ -381,9 +375,7 @@ private string SendExpectString(string cmd, params object[] args) throw new ResponseException("Unknown reply on integer request: " + c + s); } - // // This one does not throw errors - // private string SendGetString(string cmd, params object[] args) { if (!SendCommand(cmd, args)) @@ -405,7 +397,7 @@ private byte[] ReadData() string r = ReadLine(); Log("R: {0}", r); if (r.Length == 0) - throw new ResponseException("Zero length respose"); + throw new ResponseException("Zero length response"); char c = r[0]; if (c == '-') @@ -436,11 +428,11 @@ private byte[] ReadData() throw new ResponseException("Invalid length"); } - //returns the number of matches + // returns the number of matches if (c == '*') { if (int.TryParse(r.Substring(1), out int n)) - return n <= 0 ? new byte[0] : ReadData(); + return n <= 0 ? Array.Empty() : ReadData(); throw new ResponseException("Unexpected length parameter" + r); } @@ -501,18 +493,14 @@ public KeyType TypeOf(string key) { if (key == null) throw new ArgumentNullException(nameof(key)); - switch (SendExpectString("TYPE {0}\r\n", key)) + return SendExpectString("TYPE {0}\r\n", key) switch { - case "none": - return KeyType.None; - case "string": - return KeyType.String; - case "set": - return KeyType.Set; - case "list": - return KeyType.List; - } - throw new ResponseException("Invalid value"); + "none" => KeyType.None, + "string" => KeyType.String, + "set" => KeyType.Set, + "list" => KeyType.List, + _ => throw new ResponseException("Invalid value"), + }; } public string RandomKey() @@ -550,13 +538,7 @@ public int TimeToLive(string key) return SendExpectInt("TTL {0}\r\n", key); } - public int DbSize - { - get - { - return SendExpectInt("DBSIZE\r\n"); - } - } + public int DbSize => SendExpectInt("DBSIZE\r\n"); public string Save() { @@ -616,7 +598,7 @@ public string[] Keys { string commandResponse = Encoding.UTF8.GetString(SendExpectData(null, "KEYS *\r\n")); if (commandResponse.Length < 1) - return new string[0]; + return Array.Empty(); else return commandResponse.Split(' '); } @@ -628,7 +610,7 @@ public string[] GetKeys(string pattern) throw new ArgumentNullException(nameof(pattern)); var keys = SendExpectData(null, "KEYS {0}\r\n", pattern); if (keys.Length == 0) - return new string[0]; + return Array.Empty(); return Encoding.UTF8.GetString(keys).Split(' '); } @@ -637,7 +619,7 @@ public byte[][] GetKeys(params string[] keys) if (keys == null) throw new ArgumentNullException(nameof(keys)); if (keys.Length == 0) - throw new ArgumentException("keys"); + throw new ArgumentOutOfRangeException(nameof(keys)); return SendDataCommandExpectMultiBulkReply(null, "MGET {0}\r\n", string.Join(" ", keys)); } @@ -669,7 +651,6 @@ public byte[][] SendDataCommandExpectMultiBulkReply(byte[] data, string command, throw new ResponseException("Unknown reply on multi-request: " + c + s); } - #region List commands public byte[][] ListRange(string key, int start, int end) { return SendDataCommandExpectMultiBulkReply(null, "LRANGE {0} {1} {2}\r\n", key, start, end); @@ -696,9 +677,7 @@ public byte[] LeftPop(string key) SendCommand("LPOP {0}\r\n", key); return ReadData(); } - #endregion - #region Set commands public bool AddToSet(string key, byte[] member) { return SendDataExpectInt(member, "SADD {0} {1}\r\n", key, member.Length) > 0; @@ -752,7 +731,7 @@ public bool RemoveFromSet(string key, string member) public byte[][] GetUnionOfSets(params string[] keys) { if (keys == null) - throw new ArgumentNullException(); + throw new ArgumentNullException(nameof(keys)); return SendDataCommandExpectMultiBulkReply(null, "SUNION " + string.Join(" ", keys) + "\r\n"); } @@ -779,7 +758,7 @@ public void StoreUnionOfSets(string destKey, params string[] keys) public byte[][] GetIntersectionOfSets(params string[] keys) { if (keys == null) - throw new ArgumentNullException(); + throw new ArgumentNullException(nameof(keys)); return SendDataCommandExpectMultiBulkReply(null, "SINTER " + string.Join(" ", keys) + "\r\n"); } @@ -792,7 +771,7 @@ public void StoreIntersectionOfSets(string destKey, params string[] keys) public byte[][] GetDifferenceOfSets(params string[] keys) { if (keys == null) - throw new ArgumentNullException(); + throw new ArgumentNullException(nameof(keys)); return SendDataCommandExpectMultiBulkReply(null, "SDIFF " + string.Join(" ", keys) + "\r\n"); } @@ -806,7 +785,6 @@ public bool MoveMemberToSet(string srcKey, string destKey, byte[] member) { return SendDataExpectInt(member, "SMOVE {0} {1} {2}\r\n", srcKey, destKey, member.Length) > 0; } - #endregion public void Dispose() { diff --git a/tests/StackExchange.Redis.Tests/HighIntegrityBasicOpsTests.cs b/tests/StackExchange.Redis.Tests/HighIntegrityBasicOpsTests.cs new file mode 100644 index 000000000..b03f7332c --- /dev/null +++ b/tests/StackExchange.Redis.Tests/HighIntegrityBasicOpsTests.cs @@ -0,0 +1,15 @@ +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class HighIntegrityBasicOpsTests(ITestOutputHelper output, SharedConnectionFixture fixture) : BasicOpsTests(output, fixture) +{ + internal override bool HighIntegrity => true; +} + +/* +public class InProcHighIntegrityBasicOpsTests(ITestOutputHelper output, InProcServerFixture fixture) : InProcBasicOpsTests(output, fixture) +{ + internal override bool HighIntegrity => true; +} +*/ diff --git a/tests/StackExchange.Redis.Tests/HotKeysTests.cs b/tests/StackExchange.Redis.Tests/HotKeysTests.cs new file mode 100644 index 000000000..b8c8c4847 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/HotKeysTests.cs @@ -0,0 +1,365 @@ +using System; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +[RunPerProtocol] +[Collection(NonParallelCollection.Name)] +public class HotKeysClusterTests(ITestOutputHelper output, SharedConnectionFixture fixture) : HotKeysTests(output, fixture) +{ + protected override string GetConfiguration() => TestConfig.Current.ClusterServersAndPorts + ",connectTimeout=10000"; + + [Theory] + [InlineData(true)] + [InlineData(false)] + public void CanUseClusterFilter(bool sample) + { + var key = Me(); + using var muxer = GetServer(key, out var server); + Log($"server: {Format.ToString(server.EndPoint)}, key: '{key}'"); + + var slot = muxer.HashSlot(key); + server.HotKeysStart(slots: [(short)slot], sampleRatio: sample ? 3 : 1, duration: Duration); + + var db = muxer.GetDatabase(); + db.KeyDelete(key, flags: CommandFlags.FireAndForget); + for (int i = 0; i < 20; i++) + { + db.StringIncrement(key, flags: CommandFlags.FireAndForget); + } + + server.HotKeysStop(); + var result = server.HotKeysGet(); + Assert.NotNull(result); + Assert.True(result.IsSlotFiltered, nameof(result.IsSlotFiltered)); + var slots = result.SelectedSlots; + Assert.Equal(1, slots.Length); + Assert.Equal(slot, slots[0].From); + Assert.Equal(slot, slots[0].To); + + Assert.False(result.CpuByKey.IsEmpty, "Expected at least one CPU result"); + bool found = false; + foreach (var cpu in result.CpuByKey) + { + if (cpu.Key == key) found = true; + } + Assert.True(found, "key not found in CPU results"); + + Assert.False(result.NetworkBytesByKey.IsEmpty, "Expected at least one network result"); + found = false; + foreach (var net in result.NetworkBytesByKey) + { + if (net.Key == key) found = true; + } + Assert.True(found, "key not found in network results"); + + Assert.True(result.AllCommandSelectedSlotsMicroseconds >= 0, nameof(result.AllCommandSelectedSlotsMicroseconds)); + Assert.True(result.TotalCpuTimeUserMicroseconds >= 0, nameof(result.TotalCpuTimeUserMicroseconds)); + + Assert.Equal(sample, result.IsSampled); + if (sample) + { + Assert.Equal(3, result.SampleRatio); + Assert.True(result.SampledCommandsSelectedSlotsMicroseconds >= 0, nameof(result.SampledCommandsSelectedSlotsMicroseconds)); + Assert.True(result.NetworkBytesSampledCommandsSelectedSlotsRaw >= 0, nameof(result.NetworkBytesSampledCommandsSelectedSlotsRaw)); + Assert.True(result.SampledCommandsSelectedSlotsTime.HasValue); + Assert.True(result.SampledCommandsSelectedSlotsNetworkBytes.HasValue); + } + else + { + Assert.Equal(1, result.SampleRatio); + Assert.Equal(-1, result.SampledCommandsSelectedSlotsMicroseconds); + Assert.Equal(-1, result.NetworkBytesSampledCommandsSelectedSlotsRaw); + Assert.False(result.SampledCommandsSelectedSlotsTime.HasValue); + Assert.False(result.SampledCommandsSelectedSlotsNetworkBytes.HasValue); + } + + Assert.True(result.AllCommandsSelectedSlotsTime.HasValue); + Assert.True(result.AllCommandsSelectedSlotsNetworkBytes.HasValue); + } +} + +[RunPerProtocol] +[Collection(NonParallelCollection.Name)] +public class HotKeysTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + protected TimeSpan Duration => TimeSpan.FromMinutes(1); // ensure we don't leave profiling running + + private protected IConnectionMultiplexer GetServer(out IServer server) + => GetServer(RedisKey.Null, out server); + + private protected IConnectionMultiplexer GetServer(in RedisKey key, out IServer server) + { + var muxer = Create(require: RedisFeatures.v8_6_0, allowAdmin: true); + server = key.IsNull ? muxer.GetServer(muxer.GetEndPoints()[0]) : muxer.GetServer(key); + server.HotKeysStop(CommandFlags.FireAndForget); + server.HotKeysReset(CommandFlags.FireAndForget); + return muxer; + } + + [Fact] + public void GetWhenEmptyIsNull() + { + using var muxer = GetServer(out var server); + Assert.Null(server.HotKeysGet()); + } + + [Fact] + public async Task GetWhenEmptyIsNullAsync() + { + await using var muxer = GetServer(out var server); + Assert.Null(await server.HotKeysGetAsync()); + } + + [Fact] + public void StopWhenNotRunningIsFalse() + { + using var muxer = GetServer(out var server); + Assert.False(server.HotKeysStop()); + } + + [Fact] + public async Task StopWhenNotRunningIsFalseAsync() + { + await using var muxer = GetServer(out var server); + Assert.False(await server.HotKeysStopAsync()); + } + + [Fact] + public void CanStartStopReset() + { + RedisKey key = Me(); + using var muxer = GetServer(key, out var server); + server.HotKeysStart(duration: Duration); + var db = muxer.GetDatabase(); + db.KeyDelete(key, flags: CommandFlags.FireAndForget); + for (int i = 0; i < 20; i++) + { + db.StringIncrement(key, flags: CommandFlags.FireAndForget); + } + + var result = server.HotKeysGet(); + Assert.NotNull(result); + Assert.True(result.TrackingActive); + CheckSimpleWithKey(key, result, server); + + Assert.True(server.HotKeysStop()); + result = server.HotKeysGet(); + Assert.NotNull(result); + Assert.False(result.TrackingActive); + CheckSimpleWithKey(key, result, server); + + server.HotKeysReset(); + result = server.HotKeysGet(); + Assert.Null(result); + } + + private void CheckSimpleWithKey(RedisKey key, HotKeysResult hotKeys, IServer server) + { + Assert.Equal(HotKeysMetrics.Cpu | HotKeysMetrics.Network, hotKeys.Metrics); + Assert.True(hotKeys.CollectionDurationMicroseconds >= 0, nameof(hotKeys.CollectionDurationMicroseconds)); + Assert.True(hotKeys.CollectionStartTimeUnixMilliseconds >= 0, nameof(hotKeys.CollectionStartTimeUnixMilliseconds)); + + Assert.False(hotKeys.CpuByKey.IsEmpty, "Expected at least one CPU result"); + bool found = false; + foreach (var cpu in hotKeys.CpuByKey) + { + Assert.True(cpu.DurationMicroseconds >= 0, nameof(cpu.DurationMicroseconds)); + if (cpu.Key == key) found = true; + } + Assert.True(found, "key not found in CPU results"); + + Assert.False(hotKeys.NetworkBytesByKey.IsEmpty, "Expected at least one network result"); + found = false; + foreach (var net in hotKeys.NetworkBytesByKey) + { + Assert.True(net.Bytes > 0, nameof(net.Bytes)); + if (net.Key == key) found = true; + } + Assert.True(found, "key not found in network results"); + + Assert.Equal(1, hotKeys.SampleRatio); + Assert.False(hotKeys.IsSampled, nameof(hotKeys.IsSampled)); + Assert.False(hotKeys.IsSlotFiltered, nameof(hotKeys.IsSlotFiltered)); + + if (server.ServerType is ServerType.Cluster) + { + Assert.NotEqual(0, hotKeys.SelectedSlots.Length); + Log("Cluster mode detected; not enforcing slots, but:"); + foreach (var slot in hotKeys.SelectedSlots) + { + Log($" {slot}"); + } + } + else + { + Assert.Equal(1, hotKeys.SelectedSlots.Length); + var slots = hotKeys.SelectedSlots[0]; + Assert.Equal(SlotRange.MinSlot, slots.From); + Assert.Equal(SlotRange.MaxSlot, slots.To); + } + + Assert.True(hotKeys.AllCommandsAllSlotsMicroseconds >= 0, nameof(hotKeys.AllCommandsAllSlotsMicroseconds)); + Assert.True(hotKeys.TotalCpuTimeSystemMicroseconds >= 0, nameof(hotKeys.TotalCpuTimeSystemMicroseconds)); + Assert.True(hotKeys.TotalCpuTimeUserMicroseconds >= 0, nameof(hotKeys.TotalCpuTimeUserMicroseconds)); + Assert.True(hotKeys.AllCommandsAllSlotsNetworkBytes > 0, nameof(hotKeys.AllCommandsAllSlotsNetworkBytes)); + Assert.True(hotKeys.TotalNetworkBytes > 0, nameof(hotKeys.TotalNetworkBytes)); + + Assert.False(hotKeys.AllCommandsSelectedSlotsTime.HasValue); + Assert.False(hotKeys.AllCommandsSelectedSlotsNetworkBytes.HasValue); + Assert.False(hotKeys.SampledCommandsSelectedSlotsTime.HasValue); + Assert.False(hotKeys.SampledCommandsSelectedSlotsNetworkBytes.HasValue); + } + + [Fact] + public async Task CanStartStopResetAsync() + { + RedisKey key = Me(); + await using var muxer = GetServer(key, out var server); + await server.HotKeysStartAsync(duration: Duration); + var db = muxer.GetDatabase(); + await db.KeyDeleteAsync(key, flags: CommandFlags.FireAndForget); + for (int i = 0; i < 20; i++) + { + await db.StringIncrementAsync(key, flags: CommandFlags.FireAndForget); + } + + var result = await server.HotKeysGetAsync(); + Assert.NotNull(result); + Assert.True(result.TrackingActive); + CheckSimpleWithKey(key, result, server); + + Assert.True(await server.HotKeysStopAsync()); + result = await server.HotKeysGetAsync(); + Assert.NotNull(result); + Assert.False(result.TrackingActive); + CheckSimpleWithKey(key, result, server); + + await server.HotKeysResetAsync(); + result = await server.HotKeysGetAsync(); + Assert.Null(result); + } + + [Fact] + public async Task DurationFilterAsync() + { + Skip.UnlessLongRunning(); // time-based tests are horrible + + RedisKey key = Me(); + await using var muxer = GetServer(key, out var server); + await server.HotKeysStartAsync(duration: TimeSpan.FromSeconds(1)); + var db = muxer.GetDatabase(); + await db.KeyDeleteAsync(key, flags: CommandFlags.FireAndForget); + for (int i = 0; i < 20; i++) + { + await db.StringIncrementAsync(key, flags: CommandFlags.FireAndForget); + } + var before = await server.HotKeysGetAsync(); + await Task.Delay(TimeSpan.FromSeconds(2)); + var after = await server.HotKeysGetAsync(); + + Assert.NotNull(before); + Assert.True(before.TrackingActive); + + Assert.NotNull(after); + Assert.False(after.TrackingActive); + + var millis = after.CollectionDuration.TotalMilliseconds; + Log($"Duration: {millis}ms"); + Assert.True(millis > 900 && millis < 1100); + } + + [Theory] + [InlineData(HotKeysMetrics.Cpu)] + [InlineData(HotKeysMetrics.Network)] + [InlineData(HotKeysMetrics.Network | HotKeysMetrics.Cpu)] + public async Task MetricsChoiceAsync(HotKeysMetrics metrics) + { + RedisKey key = Me(); + await using var muxer = GetServer(key, out var server); + await server.HotKeysStartAsync(metrics, duration: Duration); + var db = muxer.GetDatabase(); + await db.KeyDeleteAsync(key, flags: CommandFlags.FireAndForget); + for (int i = 0; i < 20; i++) + { + await db.StringIncrementAsync(key, flags: CommandFlags.FireAndForget); + } + await server.HotKeysStopAsync(flags: CommandFlags.FireAndForget); + var result = await server.HotKeysGetAsync(); + Assert.NotNull(result); + Assert.Equal(metrics, result.Metrics); + + bool cpu = (metrics & HotKeysMetrics.Cpu) != 0; + bool net = (metrics & HotKeysMetrics.Network) != 0; + + Assert.NotEqual(cpu, result.CpuByKey.IsEmpty); + Assert.Equal(cpu, result.TotalCpuTimeSystem.HasValue); + Assert.Equal(cpu, result.TotalCpuTimeUser.HasValue); + Assert.Equal(cpu, result.TotalCpuTime.HasValue); + + Assert.NotEqual(net, result.NetworkBytesByKey.IsEmpty); + Assert.Equal(net, result.TotalNetworkBytes.HasValue); + } + + [Fact] + public async Task SampleRatioUsageAsync() + { + RedisKey key = Me(); + await using var muxer = GetServer(key, out var server); + await server.HotKeysStartAsync(sampleRatio: 3, duration: Duration); + var db = muxer.GetDatabase(); + await db.KeyDeleteAsync(key, flags: CommandFlags.FireAndForget); + for (int i = 0; i < 20; i++) + { + await db.StringIncrementAsync(key, flags: CommandFlags.FireAndForget); + } + + await server.HotKeysStopAsync(flags: CommandFlags.FireAndForget); + var result = await server.HotKeysGetAsync(); + Assert.NotNull(result); + Assert.True(result.IsSampled, nameof(result.IsSampled)); + Assert.Equal(3, result.SampleRatio); + Assert.True(result.TotalNetworkBytes.HasValue); + Assert.True(result.TotalCpuTime.HasValue); + } + + [Fact] + public void NonNegativeMicroseconds_ConvertsCorrectly() + { + // Test case: 103 microseconds should convert to 103 microseconds in TimeSpan + // 103 microseconds = 103 * 10 ticks = 1030 ticks = 0.103 milliseconds + long inputMicroseconds = 103; + TimeSpan result = HotKeysResult.NonNegativeMicroseconds(inputMicroseconds); + + // Expected: 1030 ticks (103 microseconds = 0.103 milliseconds) + Assert.Equal(1030, result.Ticks); + Assert.Equal(0.103, result.TotalMilliseconds, precision: 10); + } + + [Fact] + public void NonNegativeMicroseconds_HandlesZero() + { + TimeSpan result = HotKeysResult.NonNegativeMicroseconds(0); + Assert.Equal(TimeSpan.Zero, result); + } + + [Fact] + public void NonNegativeMicroseconds_HandlesNegativeAsZero() + { + TimeSpan result = HotKeysResult.NonNegativeMicroseconds(-100); + Assert.Equal(TimeSpan.Zero, result); + } + + [Fact] + public void NonNegativeMicroseconds_HandlesLargeValues() + { + // 1 second = 1,000,000 microseconds = 10,000,000 ticks = 1000 milliseconds + long inputMicroseconds = 1_000_000; + TimeSpan result = HotKeysResult.NonNegativeMicroseconds(inputMicroseconds); + + Assert.Equal(10_000_000, result.Ticks); + Assert.Equal(1000.0, result.TotalMilliseconds, precision: 10); + Assert.Equal(1.0, result.TotalSeconds, precision: 10); + } +} diff --git a/tests/StackExchange.Redis.Tests/HttpTunnelConnectTests.cs b/tests/StackExchange.Redis.Tests/HttpTunnelConnectTests.cs new file mode 100644 index 000000000..4099c7b94 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/HttpTunnelConnectTests.cs @@ -0,0 +1,61 @@ +using System; +using System.Diagnostics; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests +{ + public class HttpTunnelConnectTests(ITestOutputHelper log) + { + private ITestOutputHelper Log { get; } = log; + + [Theory] + [InlineData("")] + [InlineData(",tunnel=http:127.0.0.1:8080")] + public async Task Connect(string suffix) + { + var cs = Environment.GetEnvironmentVariable("HACK_TUNNEL_ENDPOINT"); + if (string.IsNullOrWhiteSpace(cs)) + { + Assert.Skip("Need HACK_TUNNEL_ENDPOINT environment variable"); + } + var config = ConfigurationOptions.Parse(cs + suffix); + if (!string.IsNullOrWhiteSpace(suffix)) + { + Assert.NotNull(config.Tunnel); + } + await using var conn = await ConnectionMultiplexer.ConnectAsync(config); + var db = conn.GetDatabase(); + await db.PingAsync(); + RedisKey key = "HttpTunnel"; + await db.KeyDeleteAsync(key); + + // latency test + var watch = Stopwatch.StartNew(); + const int LATENCY_LOOP = 25, BANDWIDTH_LOOP = 10; + for (int i = 0; i < LATENCY_LOOP; i++) + { + await db.StringIncrementAsync(key); + } + watch.Stop(); + int count = (int)await db.StringGetAsync(key); + Log.WriteLine($"{LATENCY_LOOP}xINCR: {watch.ElapsedMilliseconds}ms"); + Assert.Equal(LATENCY_LOOP, count); + + // bandwidth test + var chunk = new byte[4096]; + var rand = new Random(1234); + for (int i = 0; i < BANDWIDTH_LOOP; i++) + { + rand.NextBytes(chunk); + watch = Stopwatch.StartNew(); + await db.StringSetAsync(key, chunk); + using var fetch = await db.StringGetLeaseAsync(key); + watch.Stop(); + Assert.NotNull(fetch); + Log.WriteLine($"SET+GET {chunk.Length} bytes: {watch.ElapsedMilliseconds}ms"); + Assert.True(fetch.Span.SequenceEqual(chunk)); + } + } + } +} diff --git a/tests/StackExchange.Redis.Tests/HyperLogLog.cs b/tests/StackExchange.Redis.Tests/HyperLogLog.cs deleted file mode 100644 index ed883bf2d..000000000 --- a/tests/StackExchange.Redis.Tests/HyperLogLog.cs +++ /dev/null @@ -1,43 +0,0 @@ -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - [Collection(SharedConnectionFixture.Key)] - public class HyperLogLog : TestBase - { - public HyperLogLog(ITestOutputHelper output, SharedConnectionFixture fixture) : base (output, fixture) { } - - [Fact] - public void SingleKeyLength() - { - using (var conn = Create()) - { - var db = conn.GetDatabase(); - RedisKey key = "hll1"; - - db.HyperLogLogAdd(key, "a"); - db.HyperLogLogAdd(key, "b"); - db.HyperLogLogAdd(key, "c"); - - Assert.True(db.HyperLogLogLength(key) > 0); - } - } - - [Fact] - public void MultiKeyLength() - { - using (var conn = Create()) - { - var db = conn.GetDatabase(); - RedisKey[] keys = { "hll1", "hll2", "hll3" }; - - db.HyperLogLogAdd(keys[0], "a"); - db.HyperLogLogAdd(keys[1], "b"); - db.HyperLogLogAdd(keys[2], "c"); - - Assert.True(db.HyperLogLogLength(keys) > 0); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/HyperLogLogTests.cs b/tests/StackExchange.Redis.Tests/HyperLogLogTests.cs new file mode 100644 index 000000000..f4c259854 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/HyperLogLogTests.cs @@ -0,0 +1,38 @@ +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +[RunPerProtocol] +public class HyperLogLogTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + [Fact] + public async Task SingleKeyLength() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + RedisKey key = "hll1"; + + db.HyperLogLogAdd(key, "a"); + db.HyperLogLogAdd(key, "b"); + db.HyperLogLogAdd(key, "c"); + + Assert.True(db.HyperLogLogLength(key) > 0); + } + + [Fact] + public async Task MultiKeyLength() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + RedisKey[] keys = ["hll1", "hll2", "hll3"]; + + db.HyperLogLogAdd(keys[0], "a"); + db.HyperLogLogAdd(keys[1], "b"); + db.HyperLogLogAdd(keys[2], "c"); + + Assert.True(db.HyperLogLogLength(keys) > 0); + } +} diff --git a/tests/StackExchange.Redis.Tests/InProcessTestServer.cs b/tests/StackExchange.Redis.Tests/InProcessTestServer.cs new file mode 100644 index 000000000..6f80215dd --- /dev/null +++ b/tests/StackExchange.Redis.Tests/InProcessTestServer.cs @@ -0,0 +1,252 @@ +extern alias respite; +using System; +using System.IO; +using System.IO.Pipelines; +using System.Net; +using System.Net.Sockets; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using respite::RESPite.Messages; +using StackExchange.Redis.Configuration; +using StackExchange.Redis.Server; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class InProcessTestServer : MemoryCacheRedisServer +{ + private readonly ITestOutputHelper? _log; + public InProcessTestServer(ITestOutputHelper? log = null) + { + RedisVersion = RedisFeatures.v6_0_0; // for client to expect RESP3 + _log = log; + // ReSharper disable once VirtualMemberCallInConstructor + _log?.WriteLine($"Creating in-process server: {ToString()}"); + Tunnel = new InProcTunnel(this); + } + + public Task ConnectAsync(bool withPubSub = true /*, WriteMode writeMode = WriteMode.Default */, TextWriter? log = null) + => ConnectionMultiplexer.ConnectAsync(GetClientConfig(withPubSub /*, writeMode */), log); + + // view request/response highlights in the log + public override TypedRedisValue Execute(RedisClient client, in RedisRequest request) + { + var result = base.Execute(client, in request); + var type = client.ApplyProtocol(result.Type); + if (result.IsNil) + { + Log($"[{client}] {request.Command} (no reply)"); + } + else if (result.IsAggregate) + { + Log($"[{client}] {request.Command} => {(char)type}{result.Span.Length}"); + } + else + { + try + { + var s = result.AsRedisValue().ToString() ?? "(null)"; + const int MAX_CHARS = 32; + s = s.Length <= MAX_CHARS ? s : s.Substring(0, MAX_CHARS) + "..."; + Log($"[{client}] {request.Command} => {(char)type}{s}"); + } + catch + { + Log($"[{client}] {request.Command} => {(char)type}"); + } + } + return result; + } + + public ConfigurationOptions GetClientConfig(bool withPubSub = true /*, WriteMode writeMode = WriteMode.Default */) + { + var commands = GetCommands(); + if (!withPubSub) + { + commands.Remove(nameof(RedisCommand.SUBSCRIBE)); + commands.Remove(nameof(RedisCommand.PSUBSCRIBE)); + commands.Remove(nameof(RedisCommand.SSUBSCRIBE)); + commands.Remove(nameof(RedisCommand.UNSUBSCRIBE)); + commands.Remove(nameof(RedisCommand.PUNSUBSCRIBE)); + commands.Remove(nameof(RedisCommand.SUNSUBSCRIBE)); + commands.Remove(nameof(RedisCommand.PUBLISH)); + commands.Remove(nameof(RedisCommand.SPUBLISH)); + } + + var config = new ConfigurationOptions + { + CommandMap = CommandMap.Create(commands), + ConfigurationChannel = "", + TieBreaker = "", + DefaultVersion = RedisVersion, + ConnectTimeout = 10000, + SyncTimeout = 5000, + AsyncTimeout = 5000, + AllowAdmin = true, + Tunnel = Tunnel, + Protocol = TestContext.Current.GetProtocol(), + // WriteMode = (BufferedStreamWriter.WriteMode)writeMode, + }; + if (!string.IsNullOrEmpty(Password)) config.Password = Password; + + /* useful for viewing *outbound* data in the log +#if DEBUG + if (_log is not null) + { + config.OutputLog = msg => + { + lock (_log) + { + _log.WriteLine(msg); + } + }; + } +#endif + */ + + foreach (var endpoint in GetEndPoints()) + { + config.EndPoints.Add(endpoint); + } + return config; + } + + public Tunnel Tunnel { get; } + + public override void Log(string message) + { + _log?.WriteLine(message); + base.Log(message); + } + + protected override void OnMoved(RedisClient client, int hashSlot, Node node) + { + _log?.WriteLine($"[{client}] being redirected: slot {hashSlot} to {node}"); + base.OnMoved(client, hashSlot, node); + } + + protected override void OnOutOfBand(RedisClient client, TypedRedisValue message) + { + var type = client.ApplyProtocol(message.Type); + if (message.IsAggregate + && message.Span is { IsEmpty: false } span + && !span[0].IsAggregate) + { + _log?.WriteLine($"[{client}] => {(char)type}{message.Span.Length} {span[0].AsRedisValue()}"); + } + else + { + _log?.WriteLine($"[{client}] => {(char)type}"); + } + + base.OnOutOfBand(client, message); + } + + /* + public override void OnFlush(RedisClient client, int messages, long bytes) + { + if (bytes >= 0) + { + _log?.WriteLine($"[{client}] flushed {messages} messages, {bytes} bytes"); + } + else + { + _log?.WriteLine($"[{client}] flushed {messages} messages"); // bytes not available + } + base.OnFlush(client, messages, bytes); + } + */ + + public override TypedRedisValue OnUnknownCommand(in RedisClient client, in RedisRequest request, ReadOnlySpan command) + { + _log?.WriteLine($"[{client}] unknown command: {Encoding.ASCII.GetString(command)}"); + return base.OnUnknownCommand(in client, in request, command); + } + + public override void OnClientConnected(RedisClient client, object state) + { + if (state is TaskCompletionSource pending) + { + pending.TrySetResult(client); + } + base.OnClientConnected(client, state); + } + + private sealed class InProcTunnel( + InProcessTestServer server, + PipeOptions? pipeOptions = null) : Tunnel + { + public override ValueTask GetSocketConnectEndpointAsync( + EndPoint endpoint, + CancellationToken cancellationToken) + { + if (server.TryGetNode(endpoint, out _)) + { + // server._log?.WriteLine($"Disabling client creation, requested endpoint: {Format.ToString(endpoint)}"); + return default; + } + return base.GetSocketConnectEndpointAsync(endpoint, cancellationToken); + } + + public override ValueTask BeforeAuthenticateAsync( + EndPoint endpoint, + ConnectionType connectionType, + Socket? socket, + CancellationToken cancellationToken) + { + if (server.TryGetNode(endpoint, out var node)) + { + var clientToServer = new Pipe(pipeOptions ?? PipeOptions.Default); + var serverToClient = new Pipe(pipeOptions ?? PipeOptions.Default); + var serverSide = new Duplex(clientToServer.Reader, serverToClient.Writer); + + TaskCompletionSource clientTcs = new(TaskCreationOptions.RunContinuationsAsynchronously); + Task.Run(async () => await server.RunClientAsync(serverSide, node: node, state: clientTcs), cancellationToken).RedisFireAndForget(); + if (!clientTcs.Task.Wait(1000)) throw new TimeoutException("Client not connected"); + var client = clientTcs.Task.Result; + server._log?.WriteLine( + $"[{client}] connected ({connectionType} mapped to {server.ServerType} node {node})"); + + var readStream = serverToClient.Reader.AsStream(); + var writeStream = clientToServer.Writer.AsStream(); + var clientSide = new DuplexStream(readStream, writeStream); + return new(clientSide); + } + return base.BeforeAuthenticateAsync(endpoint, connectionType, socket, cancellationToken); + } + + private sealed class Duplex(PipeReader input, PipeWriter output) : IDuplexPipe + { + public PipeReader Input => input; + public PipeWriter Output => output; + + public ValueTask Dispose() + { + input.Complete(); + output.Complete(); + return default; + } + } + } + + /* + + private readonly RespServer _server; + public RespSocketServer(RespServer server) + { + _server = server ?? throw new ArgumentNullException(nameof(server)); + server.Shutdown.ContinueWith((_, o) => ((SocketServer)o).Dispose(), this); + } + protected override void OnStarted(EndPoint endPoint) + => _server.Log("Server is listening on " + endPoint); + + protected override Task OnClientConnectedAsync(in ClientConnection client) + => _server.RunClientAsync(client.Transport); + + protected override void Dispose(bool disposing) + { + if (disposing) _server.Dispose(); + } + */ +} diff --git a/tests/StackExchange.Redis.Tests/InfoReplicationCheckTests.cs b/tests/StackExchange.Redis.Tests/InfoReplicationCheckTests.cs new file mode 100644 index 000000000..03b9f5b7f --- /dev/null +++ b/tests/StackExchange.Redis.Tests/InfoReplicationCheckTests.cs @@ -0,0 +1,25 @@ +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class InfoReplicationCheckTests(ITestOutputHelper output) : TestBase(output) +{ + protected override string GetConfiguration() => base.GetConfiguration() + ",configCheckSeconds=2"; + + [Fact] + public async Task Exec() + { + Assert.Skip("need to think about CompletedSynchronously"); + + await using var conn = Create(); + + var parsed = ConfigurationOptions.Parse(conn.Configuration); + Assert.Equal(2, parsed.ConfigCheckSeconds); + var before = conn.GetCounters(); + await Task.Delay(7000).ForAwait(); + var after = conn.GetCounters(); + int done = (int)(after.Interactive.CompletedSynchronously - before.Interactive.CompletedSynchronously); + Assert.True(done >= 2, $"expected >=2, got {done}"); + } +} diff --git a/tests/StackExchange.Redis.Tests/Issues/BgSaveResponse.cs b/tests/StackExchange.Redis.Tests/Issues/BgSaveResponse.cs deleted file mode 100644 index 76de9f55f..000000000 --- a/tests/StackExchange.Redis.Tests/Issues/BgSaveResponse.cs +++ /dev/null @@ -1,22 +0,0 @@ -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests.Issues -{ - public class BgSaveResponse : TestBase - { - public BgSaveResponse(ITestOutputHelper output) : base (output) { } - - [Theory (Skip = "We don't need to test this, and it really screws local testing hard.")] - [InlineData(SaveType.BackgroundSave)] - [InlineData(SaveType.BackgroundRewriteAppendOnlyFile)] - public void ShouldntThrowException(SaveType saveType) - { - using (var conn = Create(null, null, true)) - { - var Server = GetServer(conn); - Server.Save(saveType); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/Issues/BgSaveResponseTests.cs b/tests/StackExchange.Redis.Tests/Issues/BgSaveResponseTests.cs new file mode 100644 index 000000000..15e4c6ef3 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/Issues/BgSaveResponseTests.cs @@ -0,0 +1,19 @@ +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests.Issues; + +public class BgSaveResponseTests(ITestOutputHelper output) : TestBase(output) +{ + [Theory(Skip = "We don't need to test this, and it really screws local testing hard.")] + [InlineData(SaveType.BackgroundSave)] + [InlineData(SaveType.BackgroundRewriteAppendOnlyFile)] + public async Task ShouldntThrowException(SaveType saveType) + { + await using var conn = Create(allowAdmin: true); + + var server = GetServer(conn); + server.Save(saveType); + await Task.Delay(1000); + } +} diff --git a/tests/StackExchange.Redis.Tests/Issues/DefaultDatabase.cs b/tests/StackExchange.Redis.Tests/Issues/DefaultDatabase.cs deleted file mode 100644 index 5166c5a0a..000000000 --- a/tests/StackExchange.Redis.Tests/Issues/DefaultDatabase.cs +++ /dev/null @@ -1,59 +0,0 @@ -using System.IO; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests.Issues -{ - public class DefaultDatabase : TestBase - { - public DefaultDatabase(ITestOutputHelper output) : base (output) { } - - [Fact] - public void UnspecifiedDbId_ReturnsNull() - { - var config = ConfigurationOptions.Parse("localhost"); - Assert.Null(config.DefaultDatabase); - } - - [Fact] - public void SpecifiedDbId_ReturnsExpected() - { - var config = ConfigurationOptions.Parse("localhost,defaultDatabase=3"); - Assert.Equal(3, config.DefaultDatabase); - } - - [Fact] - public void ConfigurationOptions_UnspecifiedDefaultDb() - { - var log = new StringWriter(); - try - { - using (var conn = ConnectionMultiplexer.Connect(TestConfig.Current.MasterServerAndPort, log)) { - var db = conn.GetDatabase(); - Assert.Equal(0, db.Database); - } - } - finally - { - Log(log.ToString()); - } - } - - [Fact] - public void ConfigurationOptions_SpecifiedDefaultDb() - { - var log = new StringWriter(); - try - { - using (var conn = ConnectionMultiplexer.Connect($"{TestConfig.Current.MasterServerAndPort},defaultDatabase=3", log)) { - var db = conn.GetDatabase(); - Assert.Equal(3, db.Database); - } - } - finally - { - Log(log.ToString()); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/Issues/DefaultDatabaseTests.cs b/tests/StackExchange.Redis.Tests/Issues/DefaultDatabaseTests.cs new file mode 100644 index 000000000..9666c91a2 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/Issues/DefaultDatabaseTests.cs @@ -0,0 +1,54 @@ +using System.IO; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests.Issues; + +public class DefaultDatabaseTests(ITestOutputHelper output) : TestBase(output) +{ + [Fact] + public void UnspecifiedDbId_ReturnsNull() + { + var config = ConfigurationOptions.Parse("localhost"); + Assert.Null(config.DefaultDatabase); + } + + [Fact] + public void SpecifiedDbId_ReturnsExpected() + { + var config = ConfigurationOptions.Parse("localhost,defaultDatabase=3"); + Assert.Equal(3, config.DefaultDatabase); + } + + [Fact] + public async Task ConfigurationOptions_UnspecifiedDefaultDb() + { + var log = new StringWriter(); + try + { + await using var conn = await ConnectionMultiplexer.ConnectAsync(TestConfig.Current.PrimaryServerAndPort, log); + var db = conn.GetDatabase(); + Assert.Equal(0, db.Database); + } + finally + { + Log(log.ToString()); + } + } + + [Fact] + public async Task ConfigurationOptions_SpecifiedDefaultDb() + { + var log = new StringWriter(); + try + { + await using var conn = await ConnectionMultiplexer.ConnectAsync($"{TestConfig.Current.PrimaryServerAndPort},defaultDatabase=3", log); + var db = conn.GetDatabase(); + Assert.Equal(3, db.Database); + } + finally + { + Log(log.ToString()); + } + } +} diff --git a/tests/StackExchange.Redis.Tests/Issues/Issue10.cs b/tests/StackExchange.Redis.Tests/Issues/Issue10.cs deleted file mode 100644 index 252fefa51..000000000 --- a/tests/StackExchange.Redis.Tests/Issues/Issue10.cs +++ /dev/null @@ -1,31 +0,0 @@ -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests.Issues -{ - public class Issue10 : TestBase - { - public Issue10(ITestOutputHelper output) : base(output) { } - - [Fact] - public void Execute() - { - using (var muxer = Create()) - { - var key = Me(); - var conn = muxer.GetDatabase(); - conn.KeyDeleteAsync(key); // contents: nil - conn.ListLeftPushAsync(key, "abc"); // "abc" - conn.ListLeftPushAsync(key, "def"); // "def", "abc" - conn.ListLeftPushAsync(key, "ghi"); // "ghi", "def", "abc", - conn.ListSetByIndexAsync(key, 1, "jkl"); // "ghi", "jkl", "abc" - - var contents = conn.Wait(conn.ListRangeAsync(key, 0, -1)); - Assert.Equal(3, contents.Length); - Assert.Equal("ghi", contents[0]); - Assert.Equal("jkl", contents[1]); - Assert.Equal("abc", contents[2]); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/Issues/Issue10Tests.cs b/tests/StackExchange.Redis.Tests/Issues/Issue10Tests.cs new file mode 100644 index 000000000..0a2f3fa8f --- /dev/null +++ b/tests/StackExchange.Redis.Tests/Issues/Issue10Tests.cs @@ -0,0 +1,27 @@ +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests.Issues; + +public class Issue10Tests(ITestOutputHelper output) : TestBase(output) +{ + [Fact] + public async Task Execute() + { + await using var conn = Create(); + + var key = Me(); + var db = conn.GetDatabase(); + _ = db.KeyDeleteAsync(key); // contents: nil + _ = db.ListLeftPushAsync(key, "abc"); // "abc" + _ = db.ListLeftPushAsync(key, "def"); // "def", "abc" + _ = db.ListLeftPushAsync(key, "ghi"); // "ghi", "def", "abc", + _ = db.ListSetByIndexAsync(key, 1, "jkl"); // "ghi", "jkl", "abc" + + var contents = await db.ListRangeAsync(key, 0, -1); + Assert.Equal(3, contents.Length); + Assert.Equal("ghi", contents[0]); + Assert.Equal("jkl", contents[1]); + Assert.Equal("abc", contents[2]); + } +} diff --git a/tests/StackExchange.Redis.Tests/Issues/Issue1101.cs b/tests/StackExchange.Redis.Tests/Issues/Issue1101.cs deleted file mode 100644 index 9c1247b7f..000000000 --- a/tests/StackExchange.Redis.Tests/Issues/Issue1101.cs +++ /dev/null @@ -1,193 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Linq; -using System.Threading; -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests.Issues -{ - public class Issue1101 : TestBase - { - public Issue1101(ITestOutputHelper output) : base(output) { } - - static void AssertCounts(ISubscriber pubsub, in RedisChannel channel, - bool has, int handlers, int queues) - { - var aHas = ((RedisSubscriber)pubsub).GetSubscriberCounts(channel, out var ah, out var aq); - Assert.Equal(has, aHas); - Assert.Equal(handlers, ah); - Assert.Equal(queues, aq); - } - [Fact] - public async Task ExecuteWithUnsubscribeViaChannel() - { - using (var muxer = Create()) - { - RedisChannel name = Me(); - var pubsub = muxer.GetSubscriber(); - AssertCounts(pubsub, name, false, 0, 0); - - // subscribe and check we get data - var first = await pubsub.SubscribeAsync(name); - var second = await pubsub.SubscribeAsync(name); - AssertCounts(pubsub, name, true, 0, 2); - List values = new List(); - int i = 0; - first.OnMessage(x => - { - lock (values) { values.Add(x.Message); } - return Task.CompletedTask; - }); - second.OnMessage(_ => Interlocked.Increment(ref i)); - await Task.Delay(200); - await pubsub.PublishAsync(name, "abc"); - await UntilCondition(TimeSpan.FromSeconds(10), () => values.Count == 1); - lock (values) - { - Assert.Equal("abc", Assert.Single(values)); - } - var subs = muxer.GetServer(muxer.GetEndPoints().Single()).SubscriptionSubscriberCount(name); - Assert.Equal(1, subs); - Assert.False(first.Completion.IsCompleted, "completed"); - Assert.False(second.Completion.IsCompleted, "completed"); - - await first.UnsubscribeAsync(); - await Task.Delay(200); - await pubsub.PublishAsync(name, "def"); - await UntilCondition(TimeSpan.FromSeconds(10), () => values.Count == 1 && Volatile.Read(ref i) == 2); - lock (values) - { - Assert.Equal("abc", Assert.Single(values)); - } - Assert.Equal(2, Volatile.Read(ref i)); - Assert.True(first.Completion.IsCompleted, "completed"); - Assert.False(second.Completion.IsCompleted, "completed"); - AssertCounts(pubsub, name, true, 0, 1); - - await second.UnsubscribeAsync(); - await Task.Delay(200); - await pubsub.PublishAsync(name, "ghi"); - await UntilCondition(TimeSpan.FromSeconds(10), () => values.Count == 1); - lock (values) - { - Assert.Equal("abc", Assert.Single(values)); - } - Assert.Equal(2, Volatile.Read(ref i)); - Assert.True(first.Completion.IsCompleted, "completed"); - Assert.True(second.Completion.IsCompleted, "completed"); - AssertCounts(pubsub, name, false, 0, 0); - - - subs = muxer.GetServer(muxer.GetEndPoints().Single()).SubscriptionSubscriberCount(name); - Assert.Equal(0, subs); - Assert.True(first.Completion.IsCompleted, "completed"); - Assert.True(second.Completion.IsCompleted, "completed"); - } - } - - [Fact] - public async Task ExecuteWithUnsubscribeViaSubscriber() - { - using (var muxer = Create()) - { - RedisChannel name = Me(); - var pubsub = muxer.GetSubscriber(); - AssertCounts(pubsub, name, false, 0, 0); - - // subscribe and check we get data - var first = await pubsub.SubscribeAsync(name); - var second = await pubsub.SubscribeAsync(name); - AssertCounts(pubsub, name, true, 0, 2); - List values = new List(); - int i = 0; - first.OnMessage(x => - { - lock (values) { values.Add(x.Message); } - return Task.CompletedTask; - }); - second.OnMessage(_ => Interlocked.Increment(ref i)); - - await Task.Delay(100); - await pubsub.PublishAsync(name, "abc"); - await UntilCondition(TimeSpan.FromSeconds(10), () => values.Count == 1); - lock (values) - { - Assert.Equal("abc", Assert.Single(values)); - } - var subs = muxer.GetServer(muxer.GetEndPoints().Single()).SubscriptionSubscriberCount(name); - Assert.Equal(1, subs); - Assert.False(first.Completion.IsCompleted, "completed"); - Assert.False(second.Completion.IsCompleted, "completed"); - - await pubsub.UnsubscribeAsync(name); - await Task.Delay(100); - await pubsub.PublishAsync(name, "def"); - await UntilCondition(TimeSpan.FromSeconds(10), () => values.Count == 1); - lock (values) - { - Assert.Equal("abc", Assert.Single(values)); - } - Assert.Equal(1, Volatile.Read(ref i)); - - subs = muxer.GetServer(muxer.GetEndPoints().Single()).SubscriptionSubscriberCount(name); - Assert.Equal(0, subs); - Assert.True(first.Completion.IsCompleted, "completed"); - Assert.True(second.Completion.IsCompleted, "completed"); - AssertCounts(pubsub, name, false, 0, 0); - } - } - - [Fact] - public async Task ExecuteWithUnsubscribeViaClearAll() - { - using (var muxer = Create()) - { - RedisChannel name = Me(); - var pubsub = muxer.GetSubscriber(); - AssertCounts(pubsub, name, false, 0, 0); - - // subscribe and check we get data - var first = await pubsub.SubscribeAsync(name); - var second = await pubsub.SubscribeAsync(name); - AssertCounts(pubsub, name, true, 0, 2); - List values = new List(); - int i = 0; - first.OnMessage(x => - { - lock (values) { values.Add(x.Message); } - return Task.CompletedTask; - }); - second.OnMessage(_ => Interlocked.Increment(ref i)); - await Task.Delay(100); - await pubsub.PublishAsync(name, "abc"); - await UntilCondition(TimeSpan.FromSeconds(10), () => values.Count == 1); - lock (values) - { - Assert.Equal("abc", Assert.Single(values)); - } - var subs = muxer.GetServer(muxer.GetEndPoints().Single()).SubscriptionSubscriberCount(name); - Assert.Equal(1, subs); - Assert.False(first.Completion.IsCompleted, "completed"); - Assert.False(second.Completion.IsCompleted, "completed"); - - await pubsub.UnsubscribeAllAsync(); - await Task.Delay(100); - await pubsub.PublishAsync(name, "def"); - await UntilCondition(TimeSpan.FromSeconds(10), () => values.Count == 1); - lock (values) - { - Assert.Equal("abc", Assert.Single(values)); - } - Assert.Equal(1, Volatile.Read(ref i)); - - subs = muxer.GetServer(muxer.GetEndPoints().Single()).SubscriptionSubscriberCount(name); - Assert.Equal(0, subs); - Assert.True(first.Completion.IsCompleted, "completed"); - Assert.True(second.Completion.IsCompleted, "completed"); - AssertCounts(pubsub, name, false, 0, 0); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/Issues/Issue1101Tests.cs b/tests/StackExchange.Redis.Tests/Issues/Issue1101Tests.cs new file mode 100644 index 000000000..b0d9b9027 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/Issues/Issue1101Tests.cs @@ -0,0 +1,188 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests.Issues; + +public class Issue1101Tests(ITestOutputHelper output) : TestBase(output) +{ + private static void AssertCounts(ISubscriber pubsub, in RedisChannel channel, bool has, int handlers, int queues) + { + if (pubsub.Multiplexer is ConnectionMultiplexer muxer) + { + var aHas = muxer.GetSubscriberCounts(channel, out var ah, out var aq); + Assert.Equal(has, aHas); + Assert.Equal(handlers, ah); + Assert.Equal(queues, aq); + } + } + + [Fact] + public async Task ExecuteWithUnsubscribeViaChannel() + { + await using var conn = Create(log: Writer); + + RedisChannel name = RedisChannel.Literal(Me()); + var pubsub = conn.GetSubscriber(); + AssertCounts(pubsub, name, false, 0, 0); + + // subscribe and check we get data + var first = await pubsub.SubscribeAsync(name); + var second = await pubsub.SubscribeAsync(name); + AssertCounts(pubsub, name, true, 0, 2); + var values = new List(); + int i = 0; + first.OnMessage(x => + { + lock (values) { values.Add(x.Message); } + return Task.CompletedTask; + }); + second.OnMessage(_ => Interlocked.Increment(ref i)); + await Task.Delay(200); + await pubsub.PublishAsync(name, "abc"); + await UntilConditionAsync(TimeSpan.FromSeconds(10), () => values.Count == 1); + lock (values) + { + Assert.Equal("abc", Assert.Single(values)); + } + var subs = conn.GetServer(conn.GetEndPoints().Single()).SubscriptionSubscriberCount(name); + Assert.Equal(1, subs); + Assert.False(first.Completion.IsCompleted, "completed"); + Assert.False(second.Completion.IsCompleted, "completed"); + + await first.UnsubscribeAsync(); + await Task.Delay(200); + await pubsub.PublishAsync(name, "def"); + await UntilConditionAsync(TimeSpan.FromSeconds(10), () => values.Count == 1 && Volatile.Read(ref i) == 2); + lock (values) + { + Assert.Equal("abc", Assert.Single(values)); + } + Assert.Equal(2, Volatile.Read(ref i)); + Assert.True(first.Completion.IsCompleted, "completed"); + Assert.False(second.Completion.IsCompleted, "completed"); + AssertCounts(pubsub, name, true, 0, 1); + + await second.UnsubscribeAsync(); + await Task.Delay(200); + await pubsub.PublishAsync(name, "ghi"); + await UntilConditionAsync(TimeSpan.FromSeconds(10), () => values.Count == 1); + lock (values) + { + Assert.Equal("abc", Assert.Single(values)); + } + Assert.Equal(2, Volatile.Read(ref i)); + Assert.True(first.Completion.IsCompleted, "completed"); + Assert.True(second.Completion.IsCompleted, "completed"); + AssertCounts(pubsub, name, false, 0, 0); + + subs = conn.GetServer(conn.GetEndPoints().Single()).SubscriptionSubscriberCount(name); + Assert.Equal(0, subs); + Assert.True(first.Completion.IsCompleted, "completed"); + Assert.True(second.Completion.IsCompleted, "completed"); + } + + [Fact] + public async Task ExecuteWithUnsubscribeViaSubscriber() + { + await using var conn = Create(shared: false, log: Writer); + + RedisChannel name = RedisChannel.Literal(Me()); + var pubsub = conn.GetSubscriber(); + AssertCounts(pubsub, name, false, 0, 0); + + // subscribe and check we get data + var first = await pubsub.SubscribeAsync(name); + var second = await pubsub.SubscribeAsync(name); + AssertCounts(pubsub, name, true, 0, 2); + var values = new List(); + int i = 0; + first.OnMessage(x => + { + lock (values) { values.Add(x.Message); } + return Task.CompletedTask; + }); + second.OnMessage(_ => Interlocked.Increment(ref i)); + + await Task.Delay(100); + await pubsub.PublishAsync(name, "abc"); + await UntilConditionAsync(TimeSpan.FromSeconds(10), () => values.Count == 1); + lock (values) + { + Assert.Equal("abc", Assert.Single(values)); + } + var subs = conn.GetServer(conn.GetEndPoints().Single()).SubscriptionSubscriberCount(name); + Assert.Equal(1, subs); + Assert.False(first.Completion.IsCompleted, "completed"); + Assert.False(second.Completion.IsCompleted, "completed"); + + await pubsub.UnsubscribeAsync(name); + await Task.Delay(100); + await pubsub.PublishAsync(name, "def"); + await UntilConditionAsync(TimeSpan.FromSeconds(10), () => values.Count == 1); + lock (values) + { + Assert.Equal("abc", Assert.Single(values)); + } + Assert.Equal(1, Volatile.Read(ref i)); + + subs = conn.GetServer(conn.GetEndPoints().Single()).SubscriptionSubscriberCount(name); + Assert.Equal(0, subs); + Assert.True(first.Completion.IsCompleted, "completed"); + Assert.True(second.Completion.IsCompleted, "completed"); + AssertCounts(pubsub, name, false, 0, 0); + } + + [Fact] + public async Task ExecuteWithUnsubscribeViaClearAll() + { + await using var conn = Create(log: Writer); + + RedisChannel name = RedisChannel.Literal(Me()); + var pubsub = conn.GetSubscriber(); + AssertCounts(pubsub, name, false, 0, 0); + + // subscribe and check we get data + var first = await pubsub.SubscribeAsync(name); + var second = await pubsub.SubscribeAsync(name); + AssertCounts(pubsub, name, true, 0, 2); + var values = new List(); + int i = 0; + first.OnMessage(x => + { + lock (values) { values.Add(x.Message); } + return Task.CompletedTask; + }); + second.OnMessage(_ => Interlocked.Increment(ref i)); + await Task.Delay(100); + await pubsub.PublishAsync(name, "abc"); + await UntilConditionAsync(TimeSpan.FromSeconds(10), () => values.Count == 1); + lock (values) + { + Assert.Equal("abc", Assert.Single(values)); + } + var subs = conn.GetServer(conn.GetEndPoints().Single()).SubscriptionSubscriberCount(name); + Assert.Equal(1, subs); + Assert.False(first.Completion.IsCompleted, "completed"); + Assert.False(second.Completion.IsCompleted, "completed"); + + await pubsub.UnsubscribeAllAsync(); + await Task.Delay(100); + await pubsub.PublishAsync(name, "def"); + await UntilConditionAsync(TimeSpan.FromSeconds(10), () => values.Count == 1); + lock (values) + { + Assert.Equal("abc", Assert.Single(values)); + } + Assert.Equal(1, Volatile.Read(ref i)); + + subs = conn.GetServer(conn.GetEndPoints().Single()).SubscriptionSubscriberCount(name); + Assert.Equal(0, subs); + Assert.True(first.Completion.IsCompleted, "completed"); + Assert.True(second.Completion.IsCompleted, "completed"); + AssertCounts(pubsub, name, false, 0, 0); + } +} diff --git a/tests/StackExchange.Redis.Tests/Issues/Issue1103.cs b/tests/StackExchange.Redis.Tests/Issues/Issue1103.cs deleted file mode 100644 index 20f1832e6..000000000 --- a/tests/StackExchange.Redis.Tests/Issues/Issue1103.cs +++ /dev/null @@ -1,69 +0,0 @@ -using System.Globalization; -using Xunit; -using Xunit.Abstractions; -using static StackExchange.Redis.RedisValue; - -namespace StackExchange.Redis.Tests.Issues -{ - public class Issue1103 : TestBase - { - public Issue1103(ITestOutputHelper output) : base(output) { } - - [Theory] - [InlineData(142205255210238005UL, (int)StorageType.Int64)] - [InlineData(ulong.MaxValue, (int)StorageType.UInt64)] - [InlineData(ulong.MinValue, (int)StorageType.Int64)] - [InlineData(0x8000000000000000UL, (int)StorageType.UInt64)] - [InlineData(0x8000000000000001UL, (int)StorageType.UInt64)] - [InlineData(0x7FFFFFFFFFFFFFFFUL, (int)StorageType.Int64)] - public void LargeUInt64StoredCorrectly(ulong value, int storageType) - { - RedisKey key = Me(); - using (var muxer = Create()) - { - var db = muxer.GetDatabase(); - RedisValue typed = value; - - // only need UInt64 for 64-bits - Assert.Equal((StorageType)storageType, typed.Type); - db.StringSet(key, typed); - var fromRedis = db.StringGet(key); - - Log($"{fromRedis.Type}: {fromRedis}"); - Assert.Equal(StorageType.Raw, fromRedis.Type); - Assert.Equal(value, (ulong)fromRedis); - Assert.Equal(value.ToString(CultureInfo.InvariantCulture), fromRedis.ToString()); - - var simplified = fromRedis.Simplify(); - Log($"{simplified.Type}: {simplified}"); - Assert.Equal((StorageType)storageType, typed.Type); - Assert.Equal(value, (ulong)simplified); - Assert.Equal(value.ToString(CultureInfo.InvariantCulture), fromRedis.ToString()); - } - } - - [Fact] - public void UnusualRedisValueOddities() // things we found while doing this - { - RedisValue x = 0, y = "0"; - Assert.Equal(x, y); - Assert.Equal(y, x); - - y = "-0"; - Assert.Equal(x, y); - Assert.Equal(y, x); - - y = "-"; // this is the oddness; this used to return true - Assert.NotEqual(x, y); - Assert.NotEqual(y, x); - - y = "+"; - Assert.NotEqual(x, y); - Assert.NotEqual(y, x); - - y = "."; - Assert.NotEqual(x, y); - Assert.NotEqual(y, x); - } - } -} diff --git a/tests/StackExchange.Redis.Tests/Issues/Issue1103Tests.cs b/tests/StackExchange.Redis.Tests/Issues/Issue1103Tests.cs new file mode 100644 index 000000000..ab4042042 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/Issues/Issue1103Tests.cs @@ -0,0 +1,65 @@ +using System.Globalization; +using System.Threading.Tasks; +using Xunit; +using static StackExchange.Redis.RedisValue; + +namespace StackExchange.Redis.Tests.Issues; + +public class Issue1103Tests(ITestOutputHelper output) : TestBase(output) +{ + [Theory] + [InlineData(142205255210238005UL, (int)StorageType.Int64)] + [InlineData(ulong.MaxValue, (int)StorageType.UInt64)] + [InlineData(ulong.MinValue, (int)StorageType.Int64)] + [InlineData(0x8000000000000000UL, (int)StorageType.UInt64)] + [InlineData(0x8000000000000001UL, (int)StorageType.UInt64)] + [InlineData(0x7FFFFFFFFFFFFFFFUL, (int)StorageType.Int64)] + public async Task LargeUInt64StoredCorrectly(ulong value, int storageType) + { + await using var conn = Create(); + + RedisKey key = Me(); + var db = conn.GetDatabase(); + RedisValue typed = value; + + // only need UInt64 for 64-bits + Assert.Equal((StorageType)storageType, typed.Type); + db.StringSet(key, typed); + var fromRedis = db.StringGet(key); + + Log($"{fromRedis.Type}: {fromRedis}"); + Assert.Equal(StorageType.Raw, fromRedis.Type); + Assert.Equal(value, (ulong)fromRedis); + Assert.Equal(value.ToString(CultureInfo.InvariantCulture), fromRedis.ToString()); + + var simplified = fromRedis.Simplify(); + Log($"{simplified.Type}: {simplified}"); + Assert.Equal((StorageType)storageType, typed.Type); + Assert.Equal(value, (ulong)simplified); + Assert.Equal(value.ToString(CultureInfo.InvariantCulture), fromRedis.ToString()); + } + + [Fact] + public void UnusualRedisValueOddities() // things we found while doing this + { + RedisValue x = 0, y = "0"; + Assert.Equal(x, y); + Assert.Equal(y, x); + + y = "-0"; + Assert.Equal(x, y); + Assert.Equal(y, x); + + y = "-"; // this is the oddness; this used to return true + Assert.NotEqual(x, y); + Assert.NotEqual(y, x); + + y = "+"; + Assert.NotEqual(x, y); + Assert.NotEqual(y, x); + + y = "."; + Assert.NotEqual(x, y); + Assert.NotEqual(y, x); + } +} diff --git a/tests/StackExchange.Redis.Tests/Issues/Issue182.cs b/tests/StackExchange.Redis.Tests/Issues/Issue182.cs deleted file mode 100644 index 1c541ce99..000000000 --- a/tests/StackExchange.Redis.Tests/Issues/Issue182.cs +++ /dev/null @@ -1,81 +0,0 @@ -using System; -using System.Linq; -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests.Issues -{ - public class Issue182 : TestBase - { - protected override string GetConfiguration() => $"{TestConfig.Current.MasterServerAndPort},responseTimeout=10000"; - - public Issue182(ITestOutputHelper output) : base (output) { } - - [FactLongRunning] - public async Task SetMembers() - { - using (var conn = Create(syncTimeout: 20000)) - { - conn.ConnectionFailed += (s, a) => - { - Log(a.FailureType.ToString()); - Log(a.Exception.Message); - Log(a.Exception.StackTrace); - }; - var db = conn.GetDatabase(); - - var key = Me(); - const int count = (int)5e6; - var len = await db.SetLengthAsync(key).ForAwait(); - - if (len != count) - { - await db.KeyDeleteAsync(key).ForAwait(); - foreach (var _ in Enumerable.Range(0, count)) - db.SetAdd(key, Guid.NewGuid().ToByteArray(), CommandFlags.FireAndForget); - - Assert.Equal(count, await db.SetLengthAsync(key).ForAwait()); // SCARD for set - } - var result = await db.SetMembersAsync(key).ForAwait(); - Assert.Equal(count, result.Length); // SMEMBERS result length - } - } - - [FactLongRunning] - public async Task SetUnion() - { - using (var conn = Create(syncTimeout: 10000)) - { - var db = conn.GetDatabase(); - - var key1 = Me() + ":1"; - var key2 = Me() + ":2"; - var dstkey = Me() + ":dst"; - - const int count = (int)5e6; - - var len1 = await db.SetLengthAsync(key1).ForAwait(); - var len2 = await db.SetLengthAsync(key2).ForAwait(); - await db.KeyDeleteAsync(dstkey).ForAwait(); - - if (len1 != count || len2 != count) - { - await db.KeyDeleteAsync(key1).ForAwait(); - await db.KeyDeleteAsync(key2).ForAwait(); - - foreach (var _ in Enumerable.Range(0, count)) - { - db.SetAdd(key1, Guid.NewGuid().ToByteArray(), CommandFlags.FireAndForget); - db.SetAdd(key2, Guid.NewGuid().ToByteArray(), CommandFlags.FireAndForget); - } - Assert.Equal(count, await db.SetLengthAsync(key1).ForAwait()); // SCARD for set 1 - Assert.Equal(count, await db.SetLengthAsync(key2).ForAwait()); // SCARD for set 2 - } - await db.SetCombineAndStoreAsync(SetOperation.Union, dstkey, key1, key2).ForAwait(); - var dstLen = db.SetLength(dstkey); - Assert.Equal(count * 2, dstLen); // SCARD for destination set - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/Issues/Issue182Tests.cs b/tests/StackExchange.Redis.Tests/Issues/Issue182Tests.cs new file mode 100644 index 000000000..e60332603 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/Issues/Issue182Tests.cs @@ -0,0 +1,77 @@ +using System; +using System.Linq; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests.Issues; + +public class Issue182Tests(ITestOutputHelper output) : TestBase(output) +{ + protected override string GetConfiguration() => $"{TestConfig.Current.PrimaryServerAndPort},responseTimeout=10000"; + + [Fact] + public async Task SetMembers() + { + Skip.UnlessLongRunning(); + await using var conn = Create(syncTimeout: 20000); + + conn.ConnectionFailed += (s, a) => + { + Log(a.FailureType.ToString()); + Log(a.Exception?.Message); + Log(a.Exception?.StackTrace); + }; + var db = conn.GetDatabase(); + + var key = Me(); + const int count = (int)5e6; + var len = await db.SetLengthAsync(key).ForAwait(); + + if (len != count) + { + await db.KeyDeleteAsync(key).ForAwait(); + foreach (var _ in Enumerable.Range(0, count)) + db.SetAdd(key, Guid.NewGuid().ToByteArray(), CommandFlags.FireAndForget); + + Assert.Equal(count, await db.SetLengthAsync(key).ForAwait()); // SCARD for set + } + var result = await db.SetMembersAsync(key).ForAwait(); + Assert.Equal(count, result.Length); // SMEMBERS result length + } + + [Fact] + public async Task SetUnion() + { + Skip.UnlessLongRunning(); + await using var conn = Create(syncTimeout: 10000); + + var db = conn.GetDatabase(); + + var key1 = Me() + ":1"; + var key2 = Me() + ":2"; + var dstkey = Me() + ":dst"; + + const int count = (int)5e6; + + var len1 = await db.SetLengthAsync(key1).ForAwait(); + var len2 = await db.SetLengthAsync(key2).ForAwait(); + await db.KeyDeleteAsync(dstkey).ForAwait(); + + if (len1 != count || len2 != count) + { + await db.KeyDeleteAsync(key1).ForAwait(); + await db.KeyDeleteAsync(key2).ForAwait(); + + foreach (var _ in Enumerable.Range(0, count)) + { + db.SetAdd(key1, Guid.NewGuid().ToByteArray(), CommandFlags.FireAndForget); + db.SetAdd(key2, Guid.NewGuid().ToByteArray(), CommandFlags.FireAndForget); + } + Assert.Equal(count, await db.SetLengthAsync(key1).ForAwait()); // SCARD for set 1 + Assert.Equal(count, await db.SetLengthAsync(key2).ForAwait()); // SCARD for set 2 + } + await db.SetCombineAndStoreAsync(SetOperation.Union, dstkey, key1, key2).ForAwait(); + var dstLen = db.SetLength(dstkey); + Assert.Equal(count * 2, dstLen); // SCARD for destination set + } +} diff --git a/tests/StackExchange.Redis.Tests/Issues/Issue2164Tests.cs b/tests/StackExchange.Redis.Tests/Issues/Issue2164Tests.cs new file mode 100644 index 000000000..b52e9f627 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/Issues/Issue2164Tests.cs @@ -0,0 +1,55 @@ +namespace StackExchange.Redis.Tests.Issues +{ + public class Issue2164Tests + { + [Fact] + public void LoadSimpleScript() + { + LuaScript.Prepare("return 42"); + } + [Fact] + public void LoadComplexScript() + { + LuaScript.Prepare(@" +------------------------------------------------------------------------------- +-- API definitions +------------------------------------------------------------------------------- +local MessageStoreAPI = {} + +MessageStoreAPI.confirmPendingDelivery = function(smscMessageId, smscDeliveredAt, smscMessageState) + local messageId = redis.call('hget', ""smId:"" .. smscMessageId, 'mId') + if not messageId then + return nil + end + -- delete pending delivery + redis.call('del', ""smId:"" .. smscMessageId) + + local mIdK = 'm:'..messageId + + local result = redis.call('hsetnx', mIdK, 'sState', smscMessageState) + if result == 1 then + redis.call('hset', mIdK, 'sDlvAt', smscDeliveredAt) + redis.call('zrem', ""msg.validUntil"", messageId) + return redis.call('hget', mIdK, 'payload') + else + return nil + end +end + + +------------------------------------------------------------------------------- +-- Function lookup +------------------------------------------------------------------------------- + +-- None of the function calls accept keys +if #KEYS > 0 then error('No Keys should be provided') end + +-- The first argument must be the function that we intend to call, and it must +-- exist +local command_name = assert(table.remove(ARGV, 1), 'Must provide a command as first argument') +local command = assert(MessageStoreAPI[command_name], 'Unknown command ' .. command_name) + +return command(unpack(ARGV))"); + } + } +} diff --git a/tests/StackExchange.Redis.Tests/Issues/Issue2176Tests.cs b/tests/StackExchange.Redis.Tests/Issues/Issue2176Tests.cs new file mode 100644 index 000000000..39edd91d1 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/Issues/Issue2176Tests.cs @@ -0,0 +1,78 @@ +using System.Collections.Generic; +using System.Linq; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests.Issues +{ + public class Issue2176Tests(ITestOutputHelper output) : TestBase(output) + { + [Fact] + public async Task Execute_Batch() + { + await using var conn = Create(); + var db = conn.GetDatabase(); + + var me = Me(); + var key = me + ":1"; + var key2 = me + ":2"; + var keyIntersect = me + ":result"; + + db.KeyDelete(key); + db.KeyDelete(key2); + db.KeyDelete(keyIntersect); + db.SortedSetAdd(key, "a", 1345); + + var tasks = new List(); + var batch = db.CreateBatch(); + tasks.Add(batch.SortedSetAddAsync(key2, "a", 4567)); + tasks.Add(batch.SortedSetCombineAndStoreAsync(SetOperation.Intersect, keyIntersect, [key, key2])); + var rangeByRankTask = batch.SortedSetRangeByRankAsync(keyIntersect); + tasks.Add(rangeByRankTask); + batch.Execute(); + + await Task.WhenAll(tasks.ToArray()); + + var rangeByRankSortedSetValues = rangeByRankTask.Result; + + int size = rangeByRankSortedSetValues.Length; + Assert.Equal(1, size); + string firstRedisValue = rangeByRankSortedSetValues.FirstOrDefault().ToString(); + Assert.Equal("a", firstRedisValue); + } + + [Fact] + public async Task Execute_Transaction() + { + await using var conn = Create(); + var db = conn.GetDatabase(); + + var me = Me(); + var key = me + ":1"; + var key2 = me + ":2"; + var keyIntersect = me + ":result"; + + db.KeyDelete(key); + db.KeyDelete(key2); + db.KeyDelete(keyIntersect); + db.SortedSetAdd(key, "a", 1345); + + var tasks = new List(); + var batch = db.CreateTransaction(); + tasks.Add(batch.SortedSetAddAsync(key2, "a", 4567)); + tasks.Add(batch.SortedSetCombineAndStoreAsync(SetOperation.Intersect, keyIntersect, [key, key2])); + var rangeByRankTask = batch.SortedSetRangeByRankAsync(keyIntersect); + tasks.Add(rangeByRankTask); + batch.Execute(); + + await Task.WhenAll(tasks.ToArray()); + + var rangeByRankSortedSetValues = rangeByRankTask.Result; + + int size = rangeByRankSortedSetValues.Length; + Assert.Equal(1, size); + string firstRedisValue = rangeByRankSortedSetValues.FirstOrDefault().ToString(); + Assert.Equal("a", firstRedisValue); + } + } +} diff --git a/tests/StackExchange.Redis.Tests/Issues/Issue2392Tests.cs b/tests/StackExchange.Redis.Tests/Issues/Issue2392Tests.cs new file mode 100644 index 000000000..39df94021 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/Issues/Issue2392Tests.cs @@ -0,0 +1,43 @@ +using System; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests.Issues +{ + public class Issue2392Tests(ITestOutputHelper output) : TestBase(output) + { + [Fact] + public async Task Execute() + { + var options = new ConfigurationOptions() + { + BacklogPolicy = new() + { + QueueWhileDisconnected = true, + AbortPendingOnConnectionFailure = false, + }, + AbortOnConnectFail = false, + ConnectTimeout = 1, + ConnectRetry = 0, + AsyncTimeout = 1, + SyncTimeout = 1, + AllowAdmin = true, + }; + options.EndPoints.Add("127.0.0.1:1234"); + + await using var conn = await ConnectionMultiplexer.ConnectAsync(options, Writer); + var key = Me(); + var db = conn.GetDatabase(); + var server = conn.GetServerSnapshot()[0]; + + // Fail the connection + conn.AllowConnect = false; + server.SimulateConnectionFailure(SimulatedFailureType.All); + Assert.False(conn.IsConnected); + + await db.StringGetAsync(key, flags: CommandFlags.FireAndForget); + var ex = await Assert.ThrowsAnyAsync(() => db.StringGetAsync(key).WithTimeout(5000)); + Assert.True(ex is RedisTimeoutException or RedisConnectionException); + } + } +} diff --git a/tests/StackExchange.Redis.Tests/Issues/Issue2418.cs b/tests/StackExchange.Redis.Tests/Issues/Issue2418.cs new file mode 100644 index 000000000..db38b1325 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/Issues/Issue2418.cs @@ -0,0 +1,39 @@ +using System.Linq; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests.Issues; + +public class Issue2418(ITestOutputHelper output, SharedConnectionFixture? fixture = null) : TestBase(output, fixture) +{ + [Fact] + public async Task Execute() + { + await using var conn = Create(); + var db = conn.GetDatabase(); + + RedisKey key = Me(); + RedisValue someInt = 12; + Assert.False(someInt.IsNullOrEmpty, nameof(someInt.IsNullOrEmpty) + " before"); + Assert.True(someInt.IsInteger, nameof(someInt.IsInteger) + " before"); + await db.HashSetAsync(key, [new HashEntry("some_int", someInt)]); + + // check we can fetch it + var entry = await db.HashGetAllAsync(key); + Assert.NotEmpty(entry); + Assert.Single(entry); + foreach (var pair in entry) + { + Log($"'{pair.Name}'='{pair.Value}'"); + } + + // filter with LINQ + Assert.True(entry.Any(x => x.Name == "some_int"), "Any"); + someInt = entry.FirstOrDefault(x => x.Name == "some_int").Value; + Log($"found via Any: '{someInt}'"); + Assert.False(someInt.IsNullOrEmpty, nameof(someInt.IsNullOrEmpty) + " after"); + Assert.True(someInt.TryParse(out int i)); + Assert.Equal(12, i); + Assert.Equal(12, (int)someInt); + } +} diff --git a/tests/StackExchange.Redis.Tests/Issues/Issue25.cs b/tests/StackExchange.Redis.Tests/Issues/Issue25.cs deleted file mode 100644 index e4a2ac218..000000000 --- a/tests/StackExchange.Redis.Tests/Issues/Issue25.cs +++ /dev/null @@ -1,49 +0,0 @@ -using System; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests.Issues -{ - public class Issue25 : TestBase - { - public Issue25(ITestOutputHelper output) : base (output) { } - - [Fact] - public void CaseInsensitive() - { - var options = ConfigurationOptions.Parse("ssl=true"); - Assert.True(options.Ssl); - Assert.Equal("ssl=True", options.ToString()); - - options = ConfigurationOptions.Parse("SSL=TRUE"); - Assert.True(options.Ssl); - Assert.Equal("ssl=True", options.ToString()); - } - - [Fact] - public void UnkonwnKeywordHandling_Ignore() - { - ConfigurationOptions.Parse("ssl2=true", true); - } - - [Fact] - public void UnkonwnKeywordHandling_ExplicitFail() - { - var ex = Assert.Throws(() => { - ConfigurationOptions.Parse("ssl2=true", false); - }); - Assert.StartsWith("Keyword 'ssl2' is not supported", ex.Message); - Assert.Equal("ssl2", ex.ParamName); - } - - [Fact] - public void UnkonwnKeywordHandling_ImplicitFail() - { - var ex = Assert.Throws(() => { - ConfigurationOptions.Parse("ssl2=true"); - }); - Assert.StartsWith("Keyword 'ssl2' is not supported", ex.Message); - Assert.Equal("ssl2", ex.ParamName); - } - } -} diff --git a/tests/StackExchange.Redis.Tests/Issues/Issue2507.cs b/tests/StackExchange.Redis.Tests/Issues/Issue2507.cs new file mode 100644 index 000000000..f77e43e29 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/Issues/Issue2507.cs @@ -0,0 +1,40 @@ +using System.Linq; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests.Issues; + +[Collection(NonParallelCollection.Name)] +public class Issue2507(ITestOutputHelper output, SharedConnectionFixture? fixture = null) : TestBase(output, fixture) +{ + [Fact(Explicit = true)] // note this may show as Inconclusive, depending on the runner + public async Task Execute() + { + await using var conn = Create(shared: false); + var db = conn.GetDatabase(); + var pubsub = conn.GetSubscriber(); + var queue = await pubsub.SubscribeAsync(RedisChannel.Literal("__redis__:invalidate")); + await Task.Delay(100); + var connectionId = conn.GetConnectionId(conn.GetEndPoints().Single(), ConnectionType.Subscription); + if (connectionId is null) Assert.Skip("Connection id not available"); + + string baseKey = Me(); + RedisKey key1 = baseKey + "abc", + key2 = baseKey + "ghi", + key3 = baseKey + "mno"; + + await db.StringSetAsync([new(key1, "def"), new(key2, "jkl"), new(key3, "pqr")]); + // this is not supported, but: we want it to at least not fail + await db.ExecuteAsync("CLIENT", "TRACKING", "on", "REDIRECT", connectionId!.Value, "BCAST"); + await db.KeyDeleteAsync([key1, key2, key3]); + await Task.Delay(100); + queue.Unsubscribe(); + Assert.True(queue.TryRead(out var message), "Queue 1 Read failed"); + Assert.Equal(key1, message.Message); + Assert.True(queue.TryRead(out message), "Queue 2 Read failed"); + Assert.Equal(key2, message.Message); + Assert.True(queue.TryRead(out message), "Queue 3 Read failed"); + Assert.Equal(key3, message.Message); + Assert.False(queue.TryRead(out message), "Queue 4 Read succeeded"); + } +} diff --git a/tests/StackExchange.Redis.Tests/Issues/Issue25Tests.cs b/tests/StackExchange.Redis.Tests/Issues/Issue25Tests.cs new file mode 100644 index 000000000..05dc4d57c --- /dev/null +++ b/tests/StackExchange.Redis.Tests/Issues/Issue25Tests.cs @@ -0,0 +1,41 @@ +using System; +using Xunit; + +namespace StackExchange.Redis.Tests.Issues; + +public class Issue25Tests(ITestOutputHelper output) : TestBase(output) +{ + [Fact] + public void CaseInsensitive() + { + var options = ConfigurationOptions.Parse("ssl=true"); + Assert.True(options.Ssl); + Assert.Equal("ssl=True", options.ToString()); + + options = ConfigurationOptions.Parse("SSL=TRUE"); + Assert.True(options.Ssl); + Assert.Equal("ssl=True", options.ToString()); + } + + [Fact] + public void UnkonwnKeywordHandling_Ignore() + { + ConfigurationOptions.Parse("ssl2=true", true); + } + + [Fact] + public void UnkonwnKeywordHandling_ExplicitFail() + { + var ex = Assert.Throws(() => ConfigurationOptions.Parse("ssl2=true", false)); + Assert.StartsWith("Keyword 'ssl2' is not supported", ex.Message); + Assert.Equal("ssl2", ex.ParamName); + } + + [Fact] + public void UnkonwnKeywordHandling_ImplicitFail() + { + var ex = Assert.Throws(() => ConfigurationOptions.Parse("ssl2=true")); + Assert.StartsWith("Keyword 'ssl2' is not supported", ex.Message); + Assert.Equal("ssl2", ex.ParamName); + } +} diff --git a/tests/StackExchange.Redis.Tests/Issues/Issue2653.cs b/tests/StackExchange.Redis.Tests/Issues/Issue2653.cs new file mode 100644 index 000000000..d304ff44a --- /dev/null +++ b/tests/StackExchange.Redis.Tests/Issues/Issue2653.cs @@ -0,0 +1,16 @@ +using Xunit; + +namespace StackExchange.Redis.Tests.Issues; + +public class Issue2653 +{ + [Theory] + [InlineData(null, "")] + [InlineData("", "")] + [InlineData("abcdef", "abcdef")] + [InlineData("abc.def", "abc.def")] + [InlineData("abc d \t ef", "abc-d-ef")] + [InlineData(" abc\r\ndef\n", "abc-def")] + public void CheckLibraySanitization(string? input, string expected) + => Assert.Equal(expected, ServerEndPoint.ClientInfoSanitize(input)); +} diff --git a/tests/StackExchange.Redis.Tests/Issues/Issue2763Tests.cs b/tests/StackExchange.Redis.Tests/Issues/Issue2763Tests.cs new file mode 100644 index 000000000..699076118 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/Issues/Issue2763Tests.cs @@ -0,0 +1,43 @@ +using System; +using System.Collections.Generic; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests.Issues +{ + public class Issue2763Tests(ITestOutputHelper output) : TestBase(output) + { + [Fact] + public async Task Execute() + { + await using var conn = Create(); + var subscriber = conn.GetSubscriber(); + + static void Handler(RedisChannel c, RedisValue v) { } + + const int COUNT = 1000; + RedisChannel channel = RedisChannel.Literal("CHANNEL:TEST"); + + List subscribes = new List(COUNT); + for (int i = 0; i < COUNT; i++) + subscribes.Add(() => subscriber.Subscribe(channel, Handler)); + Parallel.ForEach(subscribes, action => action()); + + Assert.Equal(COUNT, CountSubscriptionsForChannel(subscriber, channel)); + + List unsubscribes = new List(COUNT); + for (int i = 0; i < COUNT; i++) + unsubscribes.Add(() => subscriber.Unsubscribe(channel, Handler)); + Parallel.ForEach(unsubscribes, action => action()); + + Assert.Equal(0, CountSubscriptionsForChannel(subscriber, channel)); + } + + private static int CountSubscriptionsForChannel(ISubscriber subscriber, RedisChannel channel) + { + ConnectionMultiplexer connMultiplexer = (ConnectionMultiplexer)subscriber.Multiplexer; + connMultiplexer.GetSubscriberCounts(channel, out int handlers, out int _); + return handlers; + } + } +} diff --git a/tests/StackExchange.Redis.Tests/Issues/Issue6.cs b/tests/StackExchange.Redis.Tests/Issues/Issue6.cs deleted file mode 100644 index 9b9776099..000000000 --- a/tests/StackExchange.Redis.Tests/Issues/Issue6.cs +++ /dev/null @@ -1,21 +0,0 @@ -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests.Issues -{ - public class Issue6 : TestBase - { - public Issue6(ITestOutputHelper output) : base (output) { } - - [Fact] - public void ShouldWorkWithoutEchoOrPing() - { - using(var conn = Create(proxy: Proxy.Twemproxy)) - { - Log("config: " + conn.Configuration); - var db = conn.GetDatabase(); - var time = db.Ping(); - Log("ping time: " + time); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/Issues/Issue6Tests.cs b/tests/StackExchange.Redis.Tests/Issues/Issue6Tests.cs new file mode 100644 index 000000000..c7c6385c0 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/Issues/Issue6Tests.cs @@ -0,0 +1,18 @@ +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests.Issues; + +public class Issue6Tests(ITestOutputHelper output) : TestBase(output) +{ + [Fact] + public async Task ShouldWorkWithoutEchoOrPing() + { + await using var conn = Create(proxy: Proxy.Twemproxy); + + Log("config: " + conn.Configuration); + var db = conn.GetDatabase(); + var time = await db.PingAsync(); + Log("ping time: " + time); + } +} diff --git a/tests/StackExchange.Redis.Tests/Issues/Massive Delete.cs b/tests/StackExchange.Redis.Tests/Issues/Massive Delete.cs deleted file mode 100644 index 6194e99dd..000000000 --- a/tests/StackExchange.Redis.Tests/Issues/Massive Delete.cs +++ /dev/null @@ -1,80 +0,0 @@ -using System; -using System.Diagnostics; -using System.Threading; -using System.Threading.Tasks; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests.Issues -{ - public class Massive_Delete : TestBase - { - public Massive_Delete(ITestOutputHelper output) : base(output) { } - - private void Prep(int db, string key) - { - var prefix = Me(); - using (var muxer = Create(allowAdmin: true)) - { - Skip.IfMissingDatabase(muxer, db); - GetServer(muxer).FlushDatabase(db); - Task last = null; - var conn = muxer.GetDatabase(db); - for (int i = 0; i < 10000; i++) - { - string iKey = prefix + i; - conn.StringSetAsync(iKey, iKey); - last = conn.SetAddAsync(key, iKey); - } - conn.Wait(last); - } - } - - [FactLongRunning] - public async Task ExecuteMassiveDelete() - { - var dbId = TestConfig.GetDedicatedDB(); - var key = Me(); - Prep(dbId, key); - var watch = Stopwatch.StartNew(); - using (var muxer = Create()) - using (var throttle = new SemaphoreSlim(1)) - { - var conn = muxer.GetDatabase(dbId); - var originally = await conn.SetLengthAsync(key).ForAwait(); - int keepChecking = 1; - Task last = null; - while (Volatile.Read(ref keepChecking) == 1) - { - throttle.Wait(); // acquire - var x = conn.SetPopAsync(key).ContinueWith(task => - { - throttle.Release(); - if (task.IsCompleted) - { - if ((string)task.Result == null) - { - Volatile.Write(ref keepChecking, 0); - } - else - { - last = conn.KeyDeleteAsync((string)task.Result); - } - } - }); - GC.KeepAlive(x); - } - if (last != null) - { - await last; - } - watch.Stop(); - long remaining = await conn.SetLengthAsync(key).ForAwait(); - Log("From {0} to {1}; {2}ms", originally, remaining, - watch.ElapsedMilliseconds); - - var counters = GetServer(muxer).GetCounters(); - Log("Completions: {0} sync, {1} async", counters.Interactive.CompletedSynchronously, counters.Interactive.CompletedAsynchronously); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/Issues/MassiveDeleteTests.cs b/tests/StackExchange.Redis.Tests/Issues/MassiveDeleteTests.cs new file mode 100644 index 000000000..94590a186 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/Issues/MassiveDeleteTests.cs @@ -0,0 +1,74 @@ +using System; +using System.Diagnostics; +using System.Threading; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests.Issues; + +public class MassiveDeleteTests(ITestOutputHelper output) : TestBase(output) +{ + private async Task Prep(int dbId, string key) + { + await using var conn = Create(allowAdmin: true); + + var prefix = Me(); + Skip.IfMissingDatabase(conn, dbId); + GetServer(conn).FlushDatabase(dbId); + Task? last = null; + var db = conn.GetDatabase(dbId); + for (int i = 0; i < 10000; i++) + { + string iKey = prefix + i; + _ = db.StringSetAsync(iKey, iKey); + last = db.SetAddAsync(key, iKey); + } + await last!; + } + + [Fact] + public async Task ExecuteMassiveDelete() + { + Skip.UnlessLongRunning(); + var dbId = TestConfig.GetDedicatedDB(); + var key = Me(); + await Prep(dbId, key); + var watch = Stopwatch.StartNew(); + await using var conn = Create(); + using var throttle = new SemaphoreSlim(1); + var db = conn.GetDatabase(dbId); + var originally = await db.SetLengthAsync(key).ForAwait(); + int keepChecking = 1; + Task? last = null; + while (Volatile.Read(ref keepChecking) == 1) + { + throttle.Wait(); // acquire + var x = db.SetPopAsync(key).ContinueWith(task => + { + throttle.Release(); + if (task.IsCompleted) + { + if ((string?)task.Result == null) + { + Volatile.Write(ref keepChecking, 0); + } + else + { + last = db.KeyDeleteAsync((string?)task.Result); + } + } + }); + GC.KeepAlive(x); + } + if (last != null) + { + await last; + } + watch.Stop(); + long remaining = await db.SetLengthAsync(key).ForAwait(); + Log($"From {originally} to {remaining}; {watch.ElapsedMilliseconds}ms"); + + var counters = GetServer(conn).GetCounters(); + Log("Completions: {0} sync, {1} async", counters.Interactive.CompletedSynchronously, counters.Interactive.CompletedAsynchronously); + } +} diff --git a/tests/StackExchange.Redis.Tests/Issues/SO10504853.cs b/tests/StackExchange.Redis.Tests/Issues/SO10504853.cs deleted file mode 100644 index d7b7bb7df..000000000 --- a/tests/StackExchange.Redis.Tests/Issues/SO10504853.cs +++ /dev/null @@ -1,91 +0,0 @@ -using System; -using System.Diagnostics; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests.Issues -{ - public class SO10504853 : TestBase - { - public SO10504853(ITestOutputHelper output) : base(output) { } - - [Fact] - public void LoopLotsOfTrivialStuff() - { - var key = Me(); - Trace.WriteLine("### init"); - using (var muxer = Create()) - { - var conn = muxer.GetDatabase(); - conn.KeyDelete(key, CommandFlags.FireAndForget); - } - const int COUNT = 2; - for (int i = 0; i < COUNT; i++) - { - Trace.WriteLine("### incr:" + i); - using (var muxer = Create()) - { - var conn = muxer.GetDatabase(); - Assert.Equal(i + 1, conn.StringIncrement(key)); - } - } - Trace.WriteLine("### close"); - using (var muxer = Create()) - { - var conn = muxer.GetDatabase(); - Assert.Equal(COUNT, (long)conn.StringGet(key)); - } - } - - [Fact] - public void ExecuteWithEmptyStartingPoint() - { - using (var muxer = Create()) - { - var conn = muxer.GetDatabase(); - var key = Me(); - var task = new { priority = 3 }; - conn.KeyDeleteAsync(key); - conn.HashSetAsync(key, "something else", "abc"); - conn.HashSetAsync(key, "priority", task.priority.ToString()); - - var taskResult = conn.HashGetAsync(key, "priority"); - - conn.Wait(taskResult); - - var priority = int.Parse(taskResult.Result); - - Assert.Equal(3, priority); - } - } - - [Fact] - public void ExecuteWithNonHashStartingPoint() - { - var key = Me(); - Assert.Throws(() => - { - using (var muxer = Create()) - { - var conn = muxer.GetDatabase(); - var task = new { priority = 3 }; - conn.KeyDeleteAsync(key); - conn.StringSetAsync(key, "not a hash"); - conn.HashSetAsync(key, "priority", task.priority.ToString()); - - var taskResult = conn.HashGetAsync(key, "priority"); - - try - { - conn.Wait(taskResult); - Assert.True(false, "Should throw a WRONGTYPE"); - } - catch (AggregateException ex) - { - throw ex.InnerExceptions[0]; - } - } - }); // WRONGTYPE Operation against a key holding the wrong kind of value - } - } -} diff --git a/tests/StackExchange.Redis.Tests/Issues/SO10504853Tests.cs b/tests/StackExchange.Redis.Tests/Issues/SO10504853Tests.cs new file mode 100644 index 000000000..7d4276e9d --- /dev/null +++ b/tests/StackExchange.Redis.Tests/Issues/SO10504853Tests.cs @@ -0,0 +1,84 @@ +using System; +using System.Diagnostics; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests.Issues; + +public class SO10504853Tests(ITestOutputHelper output) : TestBase(output) +{ + [Fact] + public async Task LoopLotsOfTrivialStuff() + { + var key = Me(); + Trace.WriteLine("### init"); + await using (var conn = Create()) + { + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + } + const int COUNT = 2; + for (int i = 0; i < COUNT; i++) + { + Trace.WriteLine("### incr:" + i); + await using var conn = Create(); + var db = conn.GetDatabase(); + Assert.Equal(i + 1, db.StringIncrement(key)); + } + Trace.WriteLine("### close"); + await using (var conn = Create()) + { + var db = conn.GetDatabase(); + Assert.Equal(COUNT, (long)db.StringGet(key)); + } + } + + [Fact] + public async Task ExecuteWithEmptyStartingPoint() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + var key = Me(); + var task = new { priority = 3 }; + _ = db.KeyDeleteAsync(key); + _ = db.HashSetAsync(key, "something else", "abc"); + _ = db.HashSetAsync(key, "priority", task.priority.ToString()); + + var taskResult = db.HashGetAsync(key, "priority"); + + await taskResult; + + var priority = int.Parse(taskResult.Result!); + + Assert.Equal(3, priority); + } + + [Fact] + public async Task ExecuteWithNonHashStartingPoint() + { + var key = Me(); + await Assert.ThrowsAsync(async () => + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + var task = new { priority = 3 }; + _ = db.KeyDeleteAsync(key); + _ = db.StringSetAsync(key, "not a hash"); + _ = db.HashSetAsync(key, "priority", task.priority.ToString()); + + var taskResult = db.HashGetAsync(key, "priority"); + + try + { + db.Wait(taskResult); + Assert.Fail("Should throw a WRONGTYPE"); + } + catch (AggregateException ex) + { + throw ex.InnerExceptions[0]; + } + }); // WRONGTYPE Operation against a key holding the wrong kind of value + } +} diff --git a/tests/StackExchange.Redis.Tests/Issues/SO10825542.cs b/tests/StackExchange.Redis.Tests/Issues/SO10825542.cs deleted file mode 100644 index 5f12ebe42..000000000 --- a/tests/StackExchange.Redis.Tests/Issues/SO10825542.cs +++ /dev/null @@ -1,34 +0,0 @@ -using System; -using System.Text; -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests.Issues -{ - public class SO10825542 : TestBase - { - public SO10825542(ITestOutputHelper output) : base(output) { } - - [Fact] - public async Task Execute() - { - using (var muxer = Create()) - { - var key = Me(); - - var con = muxer.GetDatabase(); - // set the field value and expiration - _ = con.HashSetAsync(key, "field1", Encoding.UTF8.GetBytes("hello world")); - _ = con.KeyExpireAsync(key, TimeSpan.FromSeconds(7200)); - _ = con.HashSetAsync(key, "field2", "fooobar"); - var result = await con.HashGetAllAsync(key).ForAwait(); - - Assert.Equal(2, result.Length); - var dict = result.ToStringDictionary(); - Assert.Equal("hello world", dict["field1"]); - Assert.Equal("fooobar", dict["field2"]); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/Issues/SO10825542Tests.cs b/tests/StackExchange.Redis.Tests/Issues/SO10825542Tests.cs new file mode 100644 index 000000000..493f4ec1b --- /dev/null +++ b/tests/StackExchange.Redis.Tests/Issues/SO10825542Tests.cs @@ -0,0 +1,28 @@ +using System; +using System.Text; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests.Issues; + +public class SO10825542Tests(ITestOutputHelper output) : TestBase(output) +{ + [Fact] + public async Task Execute() + { + await using var conn = Create(); + var key = Me(); + + var db = conn.GetDatabase(); + // set the field value and expiration + _ = db.HashSetAsync(key, "field1", Encoding.UTF8.GetBytes("hello world")); + _ = db.KeyExpireAsync(key, TimeSpan.FromSeconds(7200)); + _ = db.HashSetAsync(key, "field2", "fooobar"); + var result = await db.HashGetAllAsync(key).ForAwait(); + + Assert.Equal(2, result.Length); + var dict = result.ToStringDictionary(); + Assert.Equal("hello world", dict["field1"]); + Assert.Equal("fooobar", dict["field2"]); + } +} diff --git a/tests/StackExchange.Redis.Tests/Issues/SO11766033.cs b/tests/StackExchange.Redis.Tests/Issues/SO11766033.cs deleted file mode 100644 index 7ea6154ff..000000000 --- a/tests/StackExchange.Redis.Tests/Issues/SO11766033.cs +++ /dev/null @@ -1,41 +0,0 @@ -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests.Issues -{ - public class SO11766033 : TestBase - { - public SO11766033(ITestOutputHelper output) : base(output) { } - - [Fact] - public void TestNullString() - { - using (var muxer = Create()) - { - var redis = muxer.GetDatabase(); - const string expectedTestValue = null; - var uid = Me(); - redis.StringSetAsync(uid, "abc"); - redis.StringSetAsync(uid, expectedTestValue); - string testValue = redis.StringGet(uid); - Assert.Null(testValue); - } - } - - [Fact] - public void TestEmptyString() - { - using (var muxer = Create()) - { - var redis = muxer.GetDatabase(); - const string expectedTestValue = ""; - var uid = Me(); - - redis.StringSetAsync(uid, expectedTestValue); - string testValue = redis.StringGet(uid); - - Assert.Equal(expectedTestValue, testValue); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/Issues/SO11766033Tests.cs b/tests/StackExchange.Redis.Tests/Issues/SO11766033Tests.cs new file mode 100644 index 000000000..65cef55a7 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/Issues/SO11766033Tests.cs @@ -0,0 +1,36 @@ +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests.Issues; + +public class SO11766033Tests(ITestOutputHelper output) : TestBase(output) +{ + [Fact] + public async Task TestNullString() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + const string? expectedTestValue = null; + var uid = Me(); + _ = db.StringSetAsync(uid, "abc"); + _ = db.StringSetAsync(uid, expectedTestValue); + string? testValue = db.StringGet(uid); + Assert.Null(testValue); + } + + [Fact] + public async Task TestEmptyString() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + const string expectedTestValue = ""; + var uid = Me(); + + _ = db.StringSetAsync(uid, expectedTestValue); + string? testValue = db.StringGet(uid); + + Assert.Equal(expectedTestValue, testValue); + } +} diff --git a/tests/StackExchange.Redis.Tests/Issues/SO22786599.cs b/tests/StackExchange.Redis.Tests/Issues/SO22786599.cs deleted file mode 100644 index 893870550..000000000 --- a/tests/StackExchange.Redis.Tests/Issues/SO22786599.cs +++ /dev/null @@ -1,37 +0,0 @@ -using System.Diagnostics; -using System.Linq; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests.Issues -{ - public class SO22786599 : TestBase - { - public SO22786599(ITestOutputHelper output) : base(output) { } - - [Fact] - public void Execute() - { - string CurrentIdsSetDbKey = Me() + ".x"; - string CurrentDetailsSetDbKey = Me() + ".y"; - - RedisValue[] stringIds = Enumerable.Range(1, 750).Select(i => (RedisValue)(i + " id")).ToArray(); - RedisValue[] stringDetails = Enumerable.Range(1, 750).Select(i => (RedisValue)(i + " detail")).ToArray(); - - using (var conn = Create()) - { - var db = conn.GetDatabase(); - var tran = db.CreateTransaction(); - - tran.SetAddAsync(CurrentIdsSetDbKey, stringIds); - tran.SetAddAsync(CurrentDetailsSetDbKey, stringDetails); - - var watch = Stopwatch.StartNew(); - var isOperationSuccessful = tran.Execute(); - watch.Stop(); - Log("{0}ms", watch.ElapsedMilliseconds); - Assert.True(isOperationSuccessful); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/Issues/SO22786599Tests.cs b/tests/StackExchange.Redis.Tests/Issues/SO22786599Tests.cs new file mode 100644 index 000000000..0fc653991 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/Issues/SO22786599Tests.cs @@ -0,0 +1,33 @@ +using System.Diagnostics; +using System.Linq; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests.Issues; + +public class SO22786599Tests(ITestOutputHelper output) : TestBase(output) +{ + [Fact] + public async Task Execute() + { + string currentIdsSetDbKey = Me() + ".x"; + string currentDetailsSetDbKey = Me() + ".y"; + + RedisValue[] stringIds = Enumerable.Range(1, 750).Select(i => (RedisValue)(i + " id")).ToArray(); + RedisValue[] stringDetails = Enumerable.Range(1, 750).Select(i => (RedisValue)(i + " detail")).ToArray(); + + await using var conn = Create(); + + var db = conn.GetDatabase(); + var tran = db.CreateTransaction(); + + _ = tran.SetAddAsync(currentIdsSetDbKey, stringIds); + _ = tran.SetAddAsync(currentDetailsSetDbKey, stringDetails); + + var watch = Stopwatch.StartNew(); + var isOperationSuccessful = tran.Execute(); + watch.Stop(); + Log("{0}ms", watch.ElapsedMilliseconds); + Assert.True(isOperationSuccessful); + } +} diff --git a/tests/StackExchange.Redis.Tests/Issues/SO23949477.cs b/tests/StackExchange.Redis.Tests/Issues/SO23949477.cs deleted file mode 100644 index 25435443f..000000000 --- a/tests/StackExchange.Redis.Tests/Issues/SO23949477.cs +++ /dev/null @@ -1,40 +0,0 @@ -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests.Issues -{ - public class SO23949477 : TestBase - { - public SO23949477(ITestOutputHelper output) : base (output) { } - - [Fact] - public void Execute() - { - using (var conn = Create()) - { - var db = conn.GetDatabase(); - RedisKey key = Me(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.SortedSetAdd(key, "c", 3, When.Always, CommandFlags.FireAndForget); - db.SortedSetAdd(key, - new[] { - new SortedSetEntry("a", 1), - new SortedSetEntry("b", 2), - new SortedSetEntry("d", 4), - new SortedSetEntry("e", 5) - }, - When.Always, - CommandFlags.FireAndForget); - var pairs = db.SortedSetRangeByScoreWithScores( - key, order: Order.Descending, take: 3); - Assert.Equal(3, pairs.Length); - Assert.Equal(5, pairs[0].Score); - Assert.Equal("e", pairs[0].Element); - Assert.Equal(4, pairs[1].Score); - Assert.Equal("d", pairs[1].Element); - Assert.Equal(3, pairs[2].Score); - Assert.Equal("c", pairs[2].Element); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/Issues/SO23949477Tests.cs b/tests/StackExchange.Redis.Tests/Issues/SO23949477Tests.cs new file mode 100644 index 000000000..92277289a --- /dev/null +++ b/tests/StackExchange.Redis.Tests/Issues/SO23949477Tests.cs @@ -0,0 +1,37 @@ +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests.Issues; + +public class SO23949477Tests(ITestOutputHelper output) : TestBase(output) +{ + [Fact] + public async Task Execute() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + RedisKey key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.SortedSetAdd(key, "c", 3, When.Always, CommandFlags.FireAndForget); + db.SortedSetAdd( + key, + [ + new SortedSetEntry("a", 1), + new SortedSetEntry("b", 2), + new SortedSetEntry("d", 4), + new SortedSetEntry("e", 5), + ], + When.Always, + CommandFlags.FireAndForget); + var pairs = db.SortedSetRangeByScoreWithScores( + key, order: Order.Descending, take: 3); + Assert.Equal(3, pairs.Length); + Assert.Equal(5, pairs[0].Score); + Assert.Equal("e", pairs[0].Element); + Assert.Equal(4, pairs[1].Score); + Assert.Equal("d", pairs[1].Element); + Assert.Equal(3, pairs[2].Score); + Assert.Equal("c", pairs[2].Element); + } +} diff --git a/tests/StackExchange.Redis.Tests/Issues/SO24807536.cs b/tests/StackExchange.Redis.Tests/Issues/SO24807536.cs deleted file mode 100644 index 54c6483c9..000000000 --- a/tests/StackExchange.Redis.Tests/Issues/SO24807536.cs +++ /dev/null @@ -1,49 +0,0 @@ -using System; -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests.Issues -{ - public class SO24807536 : TestBase - { - public SO24807536(ITestOutputHelper output) : base (output) { } - - [Fact] - public async Task Exec() - { - var key = Me(); - using(var conn = Create()) - { - var cache = conn.GetDatabase(); - - // setup some data - cache.KeyDelete(key, CommandFlags.FireAndForget); - cache.HashSet(key, "full", "some value", flags: CommandFlags.FireAndForget); - cache.KeyExpire(key, TimeSpan.FromSeconds(1), CommandFlags.FireAndForget); - - // test while exists - var keyExists = cache.KeyExists(key); - var ttl = cache.KeyTimeToLive(key); - var fullWait = cache.HashGetAsync(key, "full", flags: CommandFlags.None); - Assert.True(keyExists, "key exists"); - Assert.NotNull(ttl); - Assert.Equal("some value", fullWait.Result); - - // wait for expiry - await Task.Delay(2000).ForAwait(); - - // test once expired - keyExists = cache.KeyExists(key); - ttl = cache.KeyTimeToLive(key); - fullWait = cache.HashGetAsync(key, "full", flags: CommandFlags.None); - - Assert.False(keyExists); - Assert.Null(ttl); - var r = await fullWait; - Assert.True(r.IsNull); - Assert.Null((string)r); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/Issues/SO24807536Tests.cs b/tests/StackExchange.Redis.Tests/Issues/SO24807536Tests.cs new file mode 100644 index 000000000..ddff810c0 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/Issues/SO24807536Tests.cs @@ -0,0 +1,44 @@ +using System; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests.Issues; + +public class SO24807536Tests(ITestOutputHelper output) : TestBase(output) +{ + [Fact] + public async Task Exec() + { + await using var conn = Create(); + + var key = Me(); + var db = conn.GetDatabase(); + + // setup some data + db.KeyDelete(key, CommandFlags.FireAndForget); + db.HashSet(key, "full", "some value", flags: CommandFlags.FireAndForget); + db.KeyExpire(key, TimeSpan.FromSeconds(2), CommandFlags.FireAndForget); + + // test while exists + var keyExists = db.KeyExists(key); + var ttl = db.KeyTimeToLive(key); + var fullWait = db.HashGetAsync(key, "full", flags: CommandFlags.None); + Assert.True(keyExists, "key exists"); + Assert.NotNull(ttl); + Assert.Equal("some value", await fullWait); + + // wait for expiry + await UntilConditionAsync(TimeSpan.FromSeconds(10), () => !db.KeyExists(key)).ForAwait(); + + // test once expired + keyExists = db.KeyExists(key); + ttl = db.KeyTimeToLive(key); + fullWait = db.HashGetAsync(key, "full", flags: CommandFlags.None); + + Assert.False(keyExists); + Assert.Null(ttl); + var r = await fullWait; + Assert.True(r.IsNull); + Assert.Null((string?)r); + } +} diff --git a/tests/StackExchange.Redis.Tests/Issues/SO25113323.cs b/tests/StackExchange.Redis.Tests/Issues/SO25113323.cs deleted file mode 100644 index 1afc1ab69..000000000 --- a/tests/StackExchange.Redis.Tests/Issues/SO25113323.cs +++ /dev/null @@ -1,41 +0,0 @@ -using System; -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests.Issues -{ - public class SO25113323 : TestBase - { - public SO25113323(ITestOutputHelper output) : base (output) { } - - [Fact] - public async Task SetExpirationToPassed() - { - var key = Me(); - using (var conn = Create()) - { - // Given - var cache = conn.GetDatabase(); - cache.KeyDelete(key, CommandFlags.FireAndForget); - cache.HashSet(key, "full", "test", When.NotExists, CommandFlags.PreferMaster); - - await Task.Delay(2000).ForAwait(); - - // When - var expiresOn = DateTime.UtcNow.AddSeconds(-2); - - var firstResult = cache.KeyExpire(key, expiresOn, CommandFlags.PreferMaster); - var secondResult = cache.KeyExpire(key, expiresOn, CommandFlags.PreferMaster); - var exists = cache.KeyExists(key); - var ttl = cache.KeyTimeToLive(key); - - // Then - Assert.True(firstResult); // could set the first time, but this nukes the key - Assert.False(secondResult); // can't set, since nuked - Assert.False(exists); // does not exist since nuked - Assert.Null(ttl); // no expiry since nuked - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/Issues/SO25113323Tests.cs b/tests/StackExchange.Redis.Tests/Issues/SO25113323Tests.cs new file mode 100644 index 000000000..00bc9836b --- /dev/null +++ b/tests/StackExchange.Redis.Tests/Issues/SO25113323Tests.cs @@ -0,0 +1,36 @@ +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests.Issues; + +public class SO25113323Tests(ITestOutputHelper output) : TestBase(output) +{ + [Fact] + public async Task SetExpirationToPassed() + { + await using var conn = Create(); + + // Given + var key = Me(); + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.HashSet(key, "full", "test", When.NotExists, CommandFlags.PreferMaster); + + await Task.Delay(2000).ForAwait(); + + // When + var serverTime = GetServer(conn).Time(); + var expiresOn = serverTime.AddSeconds(-2); + + var firstResult = db.KeyExpire(key, expiresOn, CommandFlags.PreferMaster); + var secondResult = db.KeyExpire(key, expiresOn, CommandFlags.PreferMaster); + var exists = db.KeyExists(key); + var ttl = db.KeyTimeToLive(key); + + // Then + Assert.True(firstResult); // could set the first time, but this nukes the key + Assert.False(secondResult); // can't set, since nuked + Assert.False(exists); // does not exist since nuked + Assert.Null(ttl); // no expiry since nuked + } +} diff --git a/tests/StackExchange.Redis.Tests/Issues/SO25567566.cs b/tests/StackExchange.Redis.Tests/Issues/SO25567566.cs deleted file mode 100644 index b656849f6..000000000 --- a/tests/StackExchange.Redis.Tests/Issues/SO25567566.cs +++ /dev/null @@ -1,75 +0,0 @@ -using System; -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests.Issues -{ - public class SO25567566 : TestBase - { - protected override string GetConfiguration() => TestConfig.Current.MasterServerAndPort; - public SO25567566(ITestOutputHelper output) : base(output) { } - - [FactLongRunning] - public async Task Execute() - { - using (var conn = ConnectionMultiplexer.Connect(GetConfiguration())) // Create()) - { - for (int i = 0; i < 100; i++) - { - Assert.Equal("ok", await DoStuff(conn).ForAwait()); - } - } - } - - private async Task DoStuff(ConnectionMultiplexer conn) - { - var db = conn.GetDatabase(); - - var timeout = Task.Delay(5000); - var key = Me(); - var key2 = key + "2"; - var len = db.ListLengthAsync(key); - - if (await Task.WhenAny(timeout, len).ForAwait() != len) - { - return "Timeout getting length"; - } - - if ((await len.ForAwait()) == 0) - { - db.ListRightPush(key, "foo", flags: CommandFlags.FireAndForget); - } - var tran = db.CreateTransaction(); - var x = tran.ListRightPopLeftPushAsync(key, key2); - var y = tran.SetAddAsync(key + "set", "bar"); - var z = tran.KeyExpireAsync(key2, TimeSpan.FromSeconds(60)); - timeout = Task.Delay(5000); - - var exec = tran.ExecuteAsync(); - // SWAP THESE TWO - bool ok = await Task.WhenAny(exec, timeout).ForAwait() == exec; - //bool ok = true; - - if (ok) - { - if (await exec.ForAwait()) - { - await Task.WhenAll(x, y, z).ForAwait(); - - var db2 = conn.GetDatabase(); - db2.HashGet(key + "hash", "whatever"); - return "ok"; - } - else - { - return "Transaction aborted"; - } - } - else - { - return "Timeout during exec"; - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/Issues/SO25567566Tests.cs b/tests/StackExchange.Redis.Tests/Issues/SO25567566Tests.cs new file mode 100644 index 000000000..6d00a705e --- /dev/null +++ b/tests/StackExchange.Redis.Tests/Issues/SO25567566Tests.cs @@ -0,0 +1,70 @@ +using System; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests.Issues; + +public class SO25567566Tests(ITestOutputHelper output) : TestBase(output) +{ + [Fact] + public async Task Execute() + { + Skip.UnlessLongRunning(); + await using var conn = await ConnectionMultiplexer.ConnectAsync(GetConfiguration()); + + for (int i = 0; i < 100; i++) + { + Assert.Equal("ok", await DoStuff(conn).ForAwait()); + } + } + + private async Task DoStuff(ConnectionMultiplexer conn) + { + var db = conn.GetDatabase(); + + var timeout = Task.Delay(5000); + var key = Me(); + var key2 = key + "2"; + var len = db.ListLengthAsync(key); + + if (await Task.WhenAny(timeout, len).ForAwait() != len) + { + return "Timeout getting length"; + } + + if ((await len.ForAwait()) == 0) + { + db.ListRightPush(key, "foo", flags: CommandFlags.FireAndForget); + } + var tran = db.CreateTransaction(); + var x = tran.ListRightPopLeftPushAsync(key, key2); + var y = tran.SetAddAsync(key + "set", "bar"); + var z = tran.KeyExpireAsync(key2, TimeSpan.FromSeconds(60)); + timeout = Task.Delay(5000); + + var exec = tran.ExecuteAsync(); + // SWAP THESE TWO + // bool ok = true; + bool ok = await Task.WhenAny(exec, timeout).ForAwait() == exec; + + if (ok) + { + if (await exec.ForAwait()) + { + await Task.WhenAll(x, y, z).ForAwait(); + + var db2 = conn.GetDatabase(); + db2.HashGet(key + "hash", "whatever"); + return "ok"; + } + else + { + return "Transaction aborted"; + } + } + else + { + return "Timeout during exec"; + } + } +} diff --git a/tests/StackExchange.Redis.Tests/KeyAndValueTests.cs b/tests/StackExchange.Redis.Tests/KeyAndValueTests.cs new file mode 100644 index 000000000..6d37fbe7a --- /dev/null +++ b/tests/StackExchange.Redis.Tests/KeyAndValueTests.cs @@ -0,0 +1,182 @@ +using System; +using System.Collections.Generic; +using System.Globalization; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class KeyAndValueTests +{ + [Fact] + public void TestValues() + { + RedisValue @default = default(RedisValue); + CheckNull(@default); + + RedisValue nullString = (string?)null; + CheckNull(nullString); + + RedisValue nullBlob = (byte[]?)null; + CheckNull(nullBlob); + + RedisValue emptyString = ""; + CheckNotNull(emptyString); + + RedisValue emptyBlob = Array.Empty(); + CheckNotNull(emptyBlob); + + RedisValue a0 = new string('a', 1); + CheckNotNull(a0); + RedisValue a1 = new string('a', 1); + CheckNotNull(a1); + RedisValue b0 = new[] { (byte)'b' }; + CheckNotNull(b0); + RedisValue b1 = new[] { (byte)'b' }; + CheckNotNull(b1); + + RedisValue i4 = 1; + CheckNotNull(i4); + RedisValue i8 = 1L; + CheckNotNull(i8); + + RedisValue bool1 = true; + CheckNotNull(bool1); + RedisValue bool2 = false; + CheckNotNull(bool2); + RedisValue bool3 = true; + CheckNotNull(bool3); + + CheckSame(a0, a0); + CheckSame(a1, a1); + CheckSame(a0, a1); + + CheckSame(b0, b0); + CheckSame(b1, b1); + CheckSame(b0, b1); + + CheckSame(i4, i4); + CheckSame(i8, i8); + CheckSame(i4, i8); + + CheckSame(bool1, bool3); + CheckNotSame(bool1, bool2); + } + + internal static void CheckSame(RedisValue x, RedisValue y) + { + if (x.TryParse(out double value) && double.IsNaN(value)) + { + // NaN has atypical equality rules + Assert.True(y.TryParse(out value) && double.IsNaN(value)); + return; + } + Assert.True(Equals(x, y), "Equals(x, y)"); + Assert.True(Equals(y, x), "Equals(y, x)"); + Assert.True(EqualityComparer.Default.Equals(x, y), "EQ(x,y)"); + Assert.True(EqualityComparer.Default.Equals(y, x), "EQ(y,x)"); + Assert.True(x == y, "x==y"); + Assert.True(y == x, "y==x"); + Assert.False(x != y, "x!=y"); + Assert.False(y != x, "y!=x"); + Assert.True(x.Equals(y), "x.EQ(y)"); + Assert.True(y.Equals(x), "y.EQ(x)"); + Assert.True(x.GetHashCode() == y.GetHashCode(), "GetHashCode"); + } + + private static void CheckNotSame(RedisValue x, RedisValue y) + { + Assert.False(Equals(x, y)); + Assert.False(Equals(y, x)); + Assert.False(EqualityComparer.Default.Equals(x, y)); + Assert.False(EqualityComparer.Default.Equals(y, x)); + Assert.False(x == y); + Assert.False(y == x); + Assert.True(x != y); + Assert.True(y != x); + Assert.False(x.Equals(y)); + Assert.False(y.Equals(x)); + Assert.False(x.GetHashCode() == y.GetHashCode()); // well, very unlikely + } + + private static void CheckNotNull(RedisValue value) + { + Assert.False(value.IsNull); + Assert.NotNull((byte[]?)value); + Assert.NotNull((string?)value); + Assert.NotEqual(-1, value.GetHashCode()); + + Assert.NotNull((string?)value); + Assert.NotNull((byte[]?)value); + + CheckSame(value, value); + CheckNotSame(value, default(RedisValue)); + CheckNotSame(value, (string?)null); + CheckNotSame(value, (byte[]?)null); + } + + internal static void CheckNull(RedisValue value) + { + Assert.True(value.IsNull); + Assert.True(value.IsNullOrEmpty); + Assert.False(value.IsInteger); + Assert.Equal(-1, value.GetHashCode()); + + Assert.Null((string?)value); + Assert.Null((byte[]?)value); + + Assert.Equal(0, (int)value); + Assert.Equal(0L, (long)value); + + CheckSame(value, value); + // CheckSame(value, default(RedisValue)); + // CheckSame(value, (string)null); + // CheckSame(value, (byte[])null); + } + + [Fact] + public void ValuesAreConvertible() + { + RedisValue val = 123; + object o = val; + byte[] blob = (byte[])Convert.ChangeType(o, typeof(byte[])); + + Assert.Equal(3, blob.Length); + Assert.Equal((byte)'1', blob[0]); + Assert.Equal((byte)'2', blob[1]); + Assert.Equal((byte)'3', blob[2]); + + Assert.Equal(123, Convert.ToDouble(o)); + + IConvertible c = (IConvertible)o; + // ReSharper disable RedundantCast + Assert.Equal((short)123, c.ToInt16(CultureInfo.InvariantCulture)); + Assert.Equal((int)123, c.ToInt32(CultureInfo.InvariantCulture)); + Assert.Equal(123L, c.ToInt64(CultureInfo.InvariantCulture)); + Assert.Equal(123F, c.ToSingle(CultureInfo.InvariantCulture)); + Assert.Equal("123", c.ToString(CultureInfo.InvariantCulture)); + Assert.Equal(123D, c.ToDouble(CultureInfo.InvariantCulture)); + Assert.Equal(123M, c.ToDecimal(CultureInfo.InvariantCulture)); + Assert.Equal((ushort)123, c.ToUInt16(CultureInfo.InvariantCulture)); + Assert.Equal(123U, c.ToUInt32(CultureInfo.InvariantCulture)); + Assert.Equal(123UL, c.ToUInt64(CultureInfo.InvariantCulture)); + + blob = (byte[])c.ToType(typeof(byte[]), CultureInfo.InvariantCulture); + Assert.Equal(3, blob.Length); + Assert.Equal((byte)'1', blob[0]); + Assert.Equal((byte)'2', blob[1]); + Assert.Equal((byte)'3', blob[2]); + } + + [Fact] + public void CanBeDynamic() + { + RedisValue val = "abc"; + object o = val; + dynamic d = o; + byte[] blob = (byte[])d; // could be in a try/catch + Assert.Equal(3, blob.Length); + Assert.Equal((byte)'a', blob[0]); + Assert.Equal((byte)'b', blob[1]); + Assert.Equal((byte)'c', blob[2]); + } +} diff --git a/tests/StackExchange.Redis.Tests/KeyIdleAsyncTests.cs b/tests/StackExchange.Redis.Tests/KeyIdleAsyncTests.cs new file mode 100644 index 000000000..598e84d93 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/KeyIdleAsyncTests.cs @@ -0,0 +1,49 @@ +using System; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +[RunPerProtocol] +public class KeyIdleAsyncTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + [Fact] + public async Task IdleTimeAsync() + { + await using var conn = Create(); + + RedisKey key = Me(); + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.StringSet(key, "new value", flags: CommandFlags.FireAndForget); + await Task.Delay(2000).ForAwait(); + var idleTime = await db.KeyIdleTimeAsync(key).ForAwait(); + Assert.True(idleTime > TimeSpan.Zero, "First check"); + + db.StringSet(key, "new value2", flags: CommandFlags.FireAndForget); + var idleTime2 = await db.KeyIdleTimeAsync(key).ForAwait(); + Assert.True(idleTime2 < idleTime, "Second check"); + + db.KeyDelete(key); + var idleTime3 = await db.KeyIdleTimeAsync(key).ForAwait(); + Assert.Null(idleTime3); + } + + [Fact] + public async Task TouchIdleTimeAsync() + { + await using var conn = Create(require: RedisFeatures.v3_2_1); + + RedisKey key = Me(); + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.StringSet(key, "new value", flags: CommandFlags.FireAndForget); + await Task.Delay(2000).ForAwait(); + var idleTime = await db.KeyIdleTimeAsync(key).ForAwait(); + Assert.True(idleTime > TimeSpan.Zero, "First check"); + + Assert.True(await db.KeyTouchAsync(key).ForAwait(), "Second check"); + var idleTime1 = await db.KeyIdleTimeAsync(key).ForAwait(); + Assert.True(idleTime1 < idleTime, "Third check"); + } +} diff --git a/tests/StackExchange.Redis.Tests/KeyIdleTests.cs b/tests/StackExchange.Redis.Tests/KeyIdleTests.cs new file mode 100644 index 000000000..deec1efb4 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/KeyIdleTests.cs @@ -0,0 +1,49 @@ +using System; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +[RunPerProtocol] +public class KeyIdleTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + [Fact] + public async Task IdleTime() + { + await using var conn = Create(); + + RedisKey key = Me(); + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.StringSet(key, "new value", flags: CommandFlags.FireAndForget); + await Task.Delay(2000).ForAwait(); + var idleTime = db.KeyIdleTime(key); + Assert.True(idleTime > TimeSpan.Zero); + + db.StringSet(key, "new value2", flags: CommandFlags.FireAndForget); + var idleTime2 = db.KeyIdleTime(key); + Assert.True(idleTime2 < idleTime); + + db.KeyDelete(key); + var idleTime3 = db.KeyIdleTime(key); + Assert.Null(idleTime3); + } + + [Fact] + public async Task TouchIdleTime() + { + await using var conn = Create(require: RedisFeatures.v3_2_1); + + RedisKey key = Me(); + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.StringSet(key, "new value", flags: CommandFlags.FireAndForget); + await Task.Delay(2000).ForAwait(); + var idleTime = db.KeyIdleTime(key); + Assert.True(idleTime > TimeSpan.Zero, "First check"); + + Assert.True(db.KeyTouch(key), "Second check"); + var idleTime1 = db.KeyIdleTime(key); + Assert.True(idleTime1 < idleTime, "Third check"); + } +} diff --git a/tests/StackExchange.Redis.Tests/KeyNotificationTests.cs b/tests/StackExchange.Redis.Tests/KeyNotificationTests.cs new file mode 100644 index 000000000..0a70aa739 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/KeyNotificationTests.cs @@ -0,0 +1,698 @@ +using System; +using System.Buffers; +using System.Text; +using Xunit; +using Xunit.Sdk; + +namespace StackExchange.Redis.Tests; + +public class KeyNotificationTests(ITestOutputHelper log) +{ + [Theory] + [InlineData("foo", "foo")] + [InlineData("__foo__", "__foo__")] + [InlineData("__keyspace@4__:", "__keyspace@4__:")] // not long enough + [InlineData("__keyspace@4__:f", "f")] + [InlineData("__keyspace@4__:fo", "fo")] + [InlineData("__keyspace@4__:foo", "foo")] + [InlineData("__keyspace@42__:foo", "foo")] // check multi-char db + [InlineData("__keyevent@4__:foo", "__keyevent@4__:foo")] // key-event + [InlineData("__keyevent@42__:foo", "__keyevent@42__:foo")] // key-event + public void RoutingSpan_StripKeySpacePrefix(string raw, string routed) + { + ReadOnlySpan srcBytes = Encoding.UTF8.GetBytes(raw); + var strippedBytes = RedisChannel.StripKeySpacePrefix(srcBytes); + var result = Encoding.UTF8.GetString(strippedBytes); + Assert.Equal(routed, result); + } + + [Fact] + public void Keyspace_Del_ParsesCorrectly() + { + // __keyspace@1__:mykey with payload "del" + var channel = RedisChannel.Literal("__keyspace@1__:mykey"); + Assert.False(channel.IgnoreChannelPrefix); // because constructed manually + RedisValue value = "del"; + + Assert.True(KeyNotification.TryParse(in channel, in value, out var notification)); + + Assert.True(notification.IsKeySpace); + Assert.False(notification.IsKeyEvent); + Assert.Equal(1, notification.Database); + Assert.Equal(KeyNotificationType.Del, notification.Type); + Assert.True(notification.IsType("del"u8)); + Assert.Equal("mykey", (string?)notification.GetKey()); + Assert.Equal(5, notification.GetKeyByteCount()); + Assert.Equal(5, notification.GetKeyMaxByteCount()); + Assert.Equal(5, notification.GetKeyCharCount()); + Assert.Equal(6, notification.GetKeyMaxCharCount()); + } + + [Fact] + public void Keyevent_Del_ParsesCorrectly() + { + // __keyevent@42__:del with value "mykey" + var channel = RedisChannel.Literal("__keyevent@42__:del"); + RedisValue value = "mykey"; + + Assert.True(KeyNotification.TryParse(in channel, in value, out var notification)); + + Assert.False(notification.IsKeySpace); + Assert.True(notification.IsKeyEvent); + Assert.Equal(42, notification.Database); + Assert.Equal(KeyNotificationType.Del, notification.Type); + Assert.True(notification.IsType("del"u8)); + Assert.Equal("mykey", (string?)notification.GetKey()); + Assert.Equal(5, notification.GetKeyByteCount()); + Assert.Equal(18, notification.GetKeyMaxByteCount()); + Assert.Equal(5, notification.GetKeyCharCount()); + Assert.Equal(5, notification.GetKeyMaxCharCount()); + } + + [Fact] + public void Keyspace_Set_ParsesCorrectly() + { + var channel = RedisChannel.Literal("__keyspace@0__:testkey"); + RedisValue value = "set"; + + Assert.True(KeyNotification.TryParse(in channel, in value, out var notification)); + + Assert.True(notification.IsKeySpace); + Assert.Equal(0, notification.Database); + Assert.Equal(KeyNotificationType.Set, notification.Type); + Assert.True(notification.IsType("set"u8)); + Assert.Equal("testkey", (string?)notification.GetKey()); + Assert.Equal(7, notification.GetKeyByteCount()); + Assert.Equal(7, notification.GetKeyMaxByteCount()); + Assert.Equal(7, notification.GetKeyCharCount()); + Assert.Equal(8, notification.GetKeyMaxCharCount()); + } + + [Fact] + public void Keyevent_Expire_ParsesCorrectly() + { + var channel = RedisChannel.Literal("__keyevent@5__:expire"); + RedisValue value = "session:12345"; + + Assert.True(KeyNotification.TryParse(in channel, in value, out var notification)); + + Assert.True(notification.IsKeyEvent); + Assert.Equal(5, notification.Database); + Assert.Equal(KeyNotificationType.Expire, notification.Type); + Assert.True(notification.IsType("expire"u8)); + Assert.Equal("session:12345", (string?)notification.GetKey()); + Assert.Equal(13, notification.GetKeyByteCount()); + Assert.Equal(42, notification.GetKeyMaxByteCount()); + Assert.Equal(13, notification.GetKeyCharCount()); + Assert.Equal(13, notification.GetKeyMaxCharCount()); + } + + [Fact] + public void Keyspace_Expired_ParsesCorrectly() + { + var channel = RedisChannel.Literal("__keyspace@3__:cache:item"); + RedisValue value = "expired"; + + Assert.True(KeyNotification.TryParse(in channel, in value, out var notification)); + + Assert.True(notification.IsKeySpace); + Assert.Equal(3, notification.Database); + Assert.Equal(KeyNotificationType.Expired, notification.Type); + Assert.True(notification.IsType("expired"u8)); + Assert.Equal("cache:item", (string?)notification.GetKey()); + Assert.Equal(10, notification.GetKeyByteCount()); + Assert.Equal(10, notification.GetKeyMaxByteCount()); + Assert.Equal(10, notification.GetKeyCharCount()); + Assert.Equal(11, notification.GetKeyMaxCharCount()); + } + + [Fact] + public void Keyevent_LPush_ParsesCorrectly() + { + var channel = RedisChannel.Literal("__keyevent@0__:lpush"); + RedisValue value = "queue:tasks"; + + Assert.True(KeyNotification.TryParse(in channel, in value, out var notification)); + + Assert.True(notification.IsKeyEvent); + Assert.Equal(0, notification.Database); + Assert.Equal(KeyNotificationType.LPush, notification.Type); + Assert.True(notification.IsType("lpush"u8)); + Assert.Equal("queue:tasks", (string?)notification.GetKey()); + Assert.Equal(11, notification.GetKeyByteCount()); + Assert.Equal(36, notification.GetKeyMaxByteCount()); + Assert.Equal(11, notification.GetKeyCharCount()); + Assert.Equal(11, notification.GetKeyMaxCharCount()); + } + + [Fact] + public void Keyspace_HSet_ParsesCorrectly() + { + var channel = RedisChannel.Literal("__keyspace@2__:user:1000"); + RedisValue value = "hset"; + + Assert.True(KeyNotification.TryParse(in channel, in value, out var notification)); + + Assert.True(notification.IsKeySpace); + Assert.Equal(2, notification.Database); + Assert.Equal(KeyNotificationType.HSet, notification.Type); + Assert.True(notification.IsType("hset"u8)); + Assert.Equal("user:1000", (string?)notification.GetKey()); + Assert.Equal(9, notification.GetKeyByteCount()); + Assert.Equal(9, notification.GetKeyMaxByteCount()); + Assert.Equal(9, notification.GetKeyCharCount()); + Assert.Equal(10, notification.GetKeyMaxCharCount()); + } + + [Fact] + public void Keyevent_ZAdd_ParsesCorrectly() + { + var channel = RedisChannel.Literal("__keyevent@7__:zadd"); + RedisValue value = "leaderboard"; + + Assert.True(KeyNotification.TryParse(in channel, in value, out var notification)); + + Assert.True(notification.IsKeyEvent); + Assert.Equal(7, notification.Database); + Assert.Equal(KeyNotificationType.ZAdd, notification.Type); + Assert.True(notification.IsType("zadd"u8)); + Assert.Equal("leaderboard", (string?)notification.GetKey()); + Assert.Equal(11, notification.GetKeyByteCount()); + Assert.Equal(36, notification.GetKeyMaxByteCount()); + Assert.Equal(11, notification.GetKeyCharCount()); + Assert.Equal(11, notification.GetKeyMaxCharCount()); + } + + [Fact] + public void CustomEventWithUnusualValue_Works() + { + var channel = RedisChannel.Literal("__keyevent@7__:flooble"); + RedisValue value = 17.5; + + Assert.True(KeyNotification.TryParse(in channel, in value, out var notification)); + + Assert.True(notification.IsKeyEvent); + Assert.Equal(7, notification.Database); + Assert.Equal(KeyNotificationType.Unknown, notification.Type); + Assert.False(notification.IsType("zadd"u8)); + Assert.True(notification.IsType("flooble"u8)); + Assert.Equal("17.5", (string?)notification.GetKey()); + Assert.Equal(4, notification.GetKeyByteCount()); + Assert.Equal(40, notification.GetKeyMaxByteCount()); + Assert.Equal(4, notification.GetKeyCharCount()); + Assert.Equal(40, notification.GetKeyMaxCharCount()); + } + + [Fact] + public void TryCopyKey_WorksCorrectly() + { + var channel = RedisChannel.Literal("__keyspace@0__:testkey"); + RedisValue value = "set"; + + Assert.True(KeyNotification.TryParse(in channel, in value, out var notification)); + + var lease = ArrayPool.Shared.Rent(20); + Span buffer = lease.AsSpan(0, 20); + Assert.True(notification.TryCopyKey(buffer, out var bytesWritten)); + Assert.Equal(7, bytesWritten); + Assert.Equal("testkey", Encoding.UTF8.GetString(lease, 0, bytesWritten)); + ArrayPool.Shared.Return(lease); + } + + [Fact] + public void TryCopyKey_FailsWithSmallBuffer() + { + var channel = RedisChannel.Literal("__keyspace@0__:testkey"); + RedisValue value = "set"; + + Assert.True(KeyNotification.TryParse(in channel, in value, out var notification)); + + Span buffer = stackalloc byte[3]; // too small + Assert.False(notification.TryCopyKey(buffer, out var bytesWritten)); + Assert.Equal(0, bytesWritten); + } + + [Fact] + public void InvalidChannel_ReturnsFalse() + { + var channel = RedisChannel.Literal("regular:channel"); + RedisValue value = "data"; + + Assert.False(KeyNotification.TryParse(in channel, in value, out var notification)); + } + + [Fact] + public void InvalidKeyspaceChannel_MissingDelimiter_ReturnsFalse() + { + var channel = RedisChannel.Literal("__keyspace@0__"); // missing the key part + RedisValue value = "set"; + + Assert.False(KeyNotification.TryParse(in channel, in value, out var notification)); + } + + [Fact] + public void Keyspace_UnknownEventType_ReturnsUnknown() + { + var channel = RedisChannel.Literal("__keyspace@0__:mykey"); + RedisValue value = "unknownevent"; + + Assert.True(KeyNotification.TryParse(in channel, in value, out var notification)); + + Assert.True(notification.IsKeySpace); + Assert.Equal(0, notification.Database); + Assert.Equal(KeyNotificationType.Unknown, notification.Type); + Assert.False(notification.IsType("del"u8)); + Assert.Equal("mykey", (string?)notification.GetKey()); + } + + [Fact] + public void Keyevent_UnknownEventType_ReturnsUnknown() + { + var channel = RedisChannel.Literal("__keyevent@0__:unknownevent"); + RedisValue value = "mykey"; + + Assert.True(KeyNotification.TryParse(in channel, in value, out var notification)); + + Assert.True(notification.IsKeyEvent); + Assert.Equal(0, notification.Database); + Assert.Equal(KeyNotificationType.Unknown, notification.Type); + Assert.False(notification.IsType("del"u8)); + Assert.Equal("mykey", (string?)notification.GetKey()); + } + + [Fact] + public void Keyspace_WithColonInKey_ParsesCorrectly() + { + var channel = RedisChannel.Literal("__keyspace@0__:user:session:12345"); + RedisValue value = "del"; + + Assert.True(KeyNotification.TryParse(in channel, in value, out var notification)); + + Assert.True(notification.IsKeySpace); + Assert.Equal(0, notification.Database); + Assert.Equal(KeyNotificationType.Del, notification.Type); + Assert.True(notification.IsType("del"u8)); + Assert.Equal("user:session:12345", (string?)notification.GetKey()); + } + + [Fact] + public void Keyevent_Evicted_ParsesCorrectly() + { + var channel = RedisChannel.Literal("__keyevent@1__:evicted"); + RedisValue value = "cache:old"; + + Assert.True(KeyNotification.TryParse(in channel, in value, out var notification)); + + Assert.True(notification.IsKeyEvent); + Assert.Equal(1, notification.Database); + Assert.Equal(KeyNotificationType.Evicted, notification.Type); + Assert.True(notification.IsType("evicted"u8)); + Assert.Equal("cache:old", (string?)notification.GetKey()); + } + + [Fact] + public void Keyspace_New_ParsesCorrectly() + { + var channel = RedisChannel.Literal("__keyspace@0__:newkey"); + RedisValue value = "new"; + + Assert.True(KeyNotification.TryParse(in channel, in value, out var notification)); + + Assert.True(notification.IsKeySpace); + Assert.Equal(0, notification.Database); + Assert.Equal(KeyNotificationType.New, notification.Type); + Assert.True(notification.IsType("new"u8)); + Assert.Equal("newkey", (string?)notification.GetKey()); + } + + [Fact] + public void Keyevent_XGroupCreate_ParsesCorrectly() + { + var channel = RedisChannel.Literal("__keyevent@0__:xgroup-create"); + RedisValue value = "mystream"; + + Assert.True(KeyNotification.TryParse(in channel, in value, out var notification)); + + Assert.True(notification.IsKeyEvent); + Assert.Equal(0, notification.Database); + Assert.Equal(KeyNotificationType.XGroupCreate, notification.Type); + Assert.True(notification.IsType("xgroup-create"u8)); + Assert.Equal("mystream", (string?)notification.GetKey()); + } + + [Fact] + public void Keyspace_TypeChanged_ParsesCorrectly() + { + var channel = RedisChannel.Literal("__keyspace@0__:mykey"); + RedisValue value = "type_changed"; + + Assert.True(KeyNotification.TryParse(in channel, in value, out var notification)); + + Assert.True(notification.IsKeySpace); + Assert.Equal(0, notification.Database); + Assert.Equal(KeyNotificationType.TypeChanged, notification.Type); + Assert.True(notification.IsType("type_changed"u8)); + Assert.Equal("mykey", (string?)notification.GetKey()); + } + + [Fact] + public void Keyevent_HighDatabaseNumber_ParsesCorrectly() + { + var channel = RedisChannel.Literal("__keyevent@999__:set"); + RedisValue value = "testkey"; + + Assert.True(KeyNotification.TryParse(in channel, in value, out var notification)); + + Assert.True(notification.IsKeyEvent); + Assert.Equal(999, notification.Database); + Assert.Equal(KeyNotificationType.Set, notification.Type); + Assert.True(notification.IsType("set"u8)); + Assert.Equal("testkey", (string?)notification.GetKey()); + } + + [Fact] + public void Keyevent_NonIntegerDatabase_ParsesWellEnough() + { + var channel = RedisChannel.Literal("__keyevent@abc__:set"); + RedisValue value = "testkey"; + + Assert.True(KeyNotification.TryParse(in channel, in value, out var notification)); + + Assert.True(notification.IsKeyEvent); + Assert.Equal(-1, notification.Database); + Assert.Equal(KeyNotificationType.Set, notification.Type); + Assert.True(notification.IsType("set"u8)); + Assert.Equal("testkey", (string?)notification.GetKey()); + } + + [Fact] + public void DefaultKeyNotification_HasExpectedProperties() + { + var notification = default(KeyNotification); + + Assert.False(notification.IsKeySpace); + Assert.False(notification.IsKeyEvent); + Assert.Equal(-1, notification.Database); + Assert.Equal(KeyNotificationType.Unknown, notification.Type); + Assert.False(notification.IsType("del"u8)); + Assert.True(notification.GetKey().IsNull); + Assert.Equal(0, notification.GetKeyByteCount()); + Assert.Equal(0, notification.GetKeyMaxByteCount()); + Assert.Equal(0, notification.GetKeyCharCount()); + Assert.Equal(0, notification.GetKeyMaxCharCount()); + Assert.True(notification.GetChannel().IsNull); + Assert.True(notification.GetValue().IsNull); + + // TryCopyKey should return false and write 0 bytes + Span buffer = stackalloc byte[10]; + Assert.False(notification.TryCopyKey(buffer, out var bytesWritten)); + Assert.Equal(0, bytesWritten); + } + + [Theory] + [InlineData("append", KeyNotificationType.Append)] + [InlineData("copy", KeyNotificationType.Copy)] + [InlineData("del", KeyNotificationType.Del)] + [InlineData("expire", KeyNotificationType.Expire)] + [InlineData("hdel", KeyNotificationType.HDel)] + [InlineData("hexpired", KeyNotificationType.HExpired)] + [InlineData("hincrbyfloat", KeyNotificationType.HIncrByFloat)] + [InlineData("hincrby", KeyNotificationType.HIncrBy)] + [InlineData("hpersist", KeyNotificationType.HPersist)] + [InlineData("hset", KeyNotificationType.HSet)] + [InlineData("incrbyfloat", KeyNotificationType.IncrByFloat)] + [InlineData("incrby", KeyNotificationType.IncrBy)] + [InlineData("linsert", KeyNotificationType.LInsert)] + [InlineData("lpop", KeyNotificationType.LPop)] + [InlineData("lpush", KeyNotificationType.LPush)] + [InlineData("lrem", KeyNotificationType.LRem)] + [InlineData("lset", KeyNotificationType.LSet)] + [InlineData("ltrim", KeyNotificationType.LTrim)] + [InlineData("move_from", KeyNotificationType.MoveFrom)] + [InlineData("move_to", KeyNotificationType.MoveTo)] + [InlineData("persist", KeyNotificationType.Persist)] + [InlineData("rename_from", KeyNotificationType.RenameFrom)] + [InlineData("rename_to", KeyNotificationType.RenameTo)] + [InlineData("restore", KeyNotificationType.Restore)] + [InlineData("rpop", KeyNotificationType.RPop)] + [InlineData("rpush", KeyNotificationType.RPush)] + [InlineData("sadd", KeyNotificationType.SAdd)] + [InlineData("set", KeyNotificationType.Set)] + [InlineData("setrange", KeyNotificationType.SetRange)] + [InlineData("sortstore", KeyNotificationType.SortStore)] + [InlineData("srem", KeyNotificationType.SRem)] + [InlineData("spop", KeyNotificationType.SPop)] + [InlineData("xadd", KeyNotificationType.XAdd)] + [InlineData("xdel", KeyNotificationType.XDel)] + [InlineData("xgroup-createconsumer", KeyNotificationType.XGroupCreateConsumer)] + [InlineData("xgroup-create", KeyNotificationType.XGroupCreate)] + [InlineData("xgroup-delconsumer", KeyNotificationType.XGroupDelConsumer)] + [InlineData("xgroup-destroy", KeyNotificationType.XGroupDestroy)] + [InlineData("xgroup-setid", KeyNotificationType.XGroupSetId)] + [InlineData("xsetid", KeyNotificationType.XSetId)] + [InlineData("xtrim", KeyNotificationType.XTrim)] + [InlineData("zadd", KeyNotificationType.ZAdd)] + [InlineData("zdiffstore", KeyNotificationType.ZDiffStore)] + [InlineData("zinterstore", KeyNotificationType.ZInterStore)] + [InlineData("zunionstore", KeyNotificationType.ZUnionStore)] + [InlineData("zincr", KeyNotificationType.ZIncr)] + [InlineData("zrembyrank", KeyNotificationType.ZRemByRank)] + [InlineData("zrembyscore", KeyNotificationType.ZRemByScore)] + [InlineData("zrem", KeyNotificationType.ZRem)] + [InlineData("expired", KeyNotificationType.Expired)] + [InlineData("evicted", KeyNotificationType.Evicted)] + [InlineData("new", KeyNotificationType.New)] + [InlineData("overwritten", KeyNotificationType.Overwritten)] + [InlineData("type_changed", KeyNotificationType.TypeChanged)] + public unsafe void FastHashParse_AllKnownValues_ParseCorrectly(string raw, KeyNotificationType parsed) + { + var arr = ArrayPool.Shared.Rent(Encoding.UTF8.GetMaxByteCount(raw.Length)); + int bytes; + fixed (byte* bPtr = arr) // encode into the buffer + { + fixed (char* cPtr = raw) + { + bytes = Encoding.UTF8.GetBytes(cPtr, raw.Length, bPtr, arr.Length); + } + } + + var result = KeyNotificationTypeMetadata.Parse(arr.AsSpan(0, bytes)); + log.WriteLine($"Parsed '{raw}' as {result}"); + Assert.Equal(parsed, result); + + // and the other direction: + var fetchedBytes = KeyNotificationTypeMetadata.GetRawBytes(parsed); + string fetched; + fixed (byte* bPtr = fetchedBytes) + { + fetched = Encoding.UTF8.GetString(bPtr, fetchedBytes.Length); + } + + log.WriteLine($"Fetched '{raw}'"); + Assert.Equal(raw, fetched); + + ArrayPool.Shared.Return(arr); + } + + [Fact] + public void CreateKeySpaceNotification_Valid() + { + var channel = RedisChannel.KeySpaceSingleKey("abc", 42); + Assert.Equal("__keyspace@42__:abc", channel.ToString()); + Assert.False(channel.IsMultiNode); + Assert.True(channel.IsKeyRouted); + Assert.False(channel.IsSharded); + Assert.False(channel.IsPattern); + Assert.True(channel.IgnoreChannelPrefix); + } + + [Theory] + [InlineData(null, null, "__keyspace@*__:*")] + [InlineData("abc*", null, "__keyspace@*__:abc*")] + [InlineData(null, 42, "__keyspace@42__:*")] + [InlineData("abc*", 42, "__keyspace@42__:abc*")] + public void CreateKeySpaceNotificationPattern(string? pattern, int? database, string expected) + { + var channel = RedisChannel.KeySpacePattern(pattern, database); + Assert.Equal(expected, channel.ToString()); + Assert.True(channel.IsMultiNode); + Assert.False(channel.IsKeyRouted); + Assert.False(channel.IsSharded); + Assert.True(channel.IsPattern); + Assert.True(channel.IgnoreChannelPrefix); + } + + [Theory] + [InlineData("abc", null, "__keyspace@*__:abc*")] + [InlineData("abc", 42, "__keyspace@42__:abc*")] + public void CreateKeySpaceNotificationPrefix_Key(string prefix, int? database, string expected) + { + var channel = RedisChannel.KeySpacePrefix((RedisKey)prefix, database); + Assert.Equal(expected, channel.ToString()); + Assert.True(channel.IsMultiNode); + Assert.False(channel.IsKeyRouted); + Assert.False(channel.IsSharded); + Assert.True(channel.IsPattern); + Assert.True(channel.IgnoreChannelPrefix); + } + + [Theory] + [InlineData("abc", null, "__keyspace@*__:abc*")] + [InlineData("abc", 42, "__keyspace@42__:abc*")] + public void CreateKeySpaceNotificationPrefix_Span(string prefix, int? database, string expected) + { + var channel = RedisChannel.KeySpacePrefix((ReadOnlySpan)Encoding.UTF8.GetBytes(prefix), database); + Assert.Equal(expected, channel.ToString()); + Assert.True(channel.IsMultiNode); + Assert.False(channel.IsKeyRouted); + Assert.False(channel.IsSharded); + Assert.True(channel.IsPattern); + Assert.True(channel.IgnoreChannelPrefix); + } + + [Theory] + [InlineData("a?bc", null)] + [InlineData("a?bc", 42)] + [InlineData("a*bc", null)] + [InlineData("a*bc", 42)] + [InlineData("a[bc", null)] + [InlineData("a[bc", 42)] + public void CreateKeySpaceNotificationPrefix_DisallowGlob(string prefix, int? database) + { + var bytes = Encoding.UTF8.GetBytes(prefix); + var ex = Assert.Throws(() => + RedisChannel.KeySpacePrefix((RedisKey)bytes, database)); + Assert.StartsWith("The supplied key contains pattern characters, but patterns are not supported in this context.", ex.Message); + + ex = Assert.Throws(() => + RedisChannel.KeySpacePrefix((ReadOnlySpan)bytes, database)); + Assert.StartsWith("The supplied key contains pattern characters, but patterns are not supported in this context.", ex.Message); + } + + [Theory] + [InlineData(KeyNotificationType.Set, null, "__keyevent@*__:set", true)] + [InlineData(KeyNotificationType.XGroupCreate, null, "__keyevent@*__:xgroup-create", true)] + [InlineData(KeyNotificationType.Set, 42, "__keyevent@42__:set", false)] + [InlineData(KeyNotificationType.XGroupCreate, 42, "__keyevent@42__:xgroup-create", false)] + public void CreateKeyEventNotification(KeyNotificationType type, int? database, string expected, bool isPattern) + { + var channel = RedisChannel.KeyEvent(type, database); + Assert.Equal(expected, channel.ToString()); + Assert.True(channel.IsMultiNode); + Assert.False(channel.IsKeyRouted); + Assert.False(channel.IsSharded); + Assert.True(channel.IgnoreChannelPrefix); + if (isPattern) + { + Assert.True(channel.IsPattern); + } + else + { + Assert.False(channel.IsPattern); + } + } + + [Theory] + [InlineData("abc", "__keyspace@42__:abc")] + [InlineData("a*bc", "__keyspace@42__:a*bc")] // pattern-like is allowed, since not using PSUBSCRIBE + public void Cannot_KeyRoute_KeySpace_SingleKeyIsKeyRouted(string key, string pattern) + { + var channel = RedisChannel.KeySpaceSingleKey(key, 42); + Assert.Equal(pattern, channel.ToString()); + Assert.False(channel.IsMultiNode); + Assert.False(channel.IsPattern); + Assert.False(channel.IsSharded); + Assert.True(channel.IgnoreChannelPrefix); + Assert.True(channel.IsKeyRouted); + Assert.True(channel.WithKeyRouting().IsKeyRouted); // no change, still key-routed + Assert.Equal(RedisCommand.PUBLISH, channel.GetPublishCommand()); + } + + [Fact] + public void Cannot_KeyRoute_KeySpacePattern() + { + var channel = RedisChannel.KeySpacePattern("abc", 42); + Assert.True(channel.IsMultiNode); + Assert.False(channel.IsKeyRouted); + Assert.True(channel.IgnoreChannelPrefix); + Assert.StartsWith("Key routing is not supported for multi-node channels", Assert.Throws(() => channel.WithKeyRouting()).Message); + Assert.StartsWith("Publishing is not supported for multi-node channels", Assert.Throws(() => channel.GetPublishCommand()).Message); + } + + [Fact] + public void Cannot_KeyRoute_KeyEvent() + { + var channel = RedisChannel.KeyEvent(KeyNotificationType.Set, 42); + Assert.True(channel.IsMultiNode); + Assert.False(channel.IsKeyRouted); + Assert.True(channel.IgnoreChannelPrefix); + Assert.StartsWith("Key routing is not supported for multi-node channels", Assert.Throws(() => channel.WithKeyRouting()).Message); + Assert.StartsWith("Publishing is not supported for multi-node channels", Assert.Throws(() => channel.GetPublishCommand()).Message); + } + + [Fact] + public void Cannot_KeyRoute_KeyEvent_Custom() + { + var channel = RedisChannel.KeyEvent("foo"u8, 42); + Assert.True(channel.IsMultiNode); + Assert.False(channel.IsKeyRouted); + Assert.True(channel.IgnoreChannelPrefix); + Assert.StartsWith("Key routing is not supported for multi-node channels", Assert.Throws(() => channel.WithKeyRouting()).Message); + Assert.StartsWith("Publishing is not supported for multi-node channels", Assert.Throws(() => channel.GetPublishCommand()).Message); + } + + [Fact] + public void KeyEventPrefix_KeySpacePrefix_Length_Matches() + { + // this is a sanity check for the parsing step in KeyNotification.TryParse + Assert.Equal(KeyNotificationChannels.KeySpacePrefix.Length, KeyNotificationChannels.KeyEventPrefix.Length); + } + + [Theory] + [InlineData(false)] + [InlineData(true)] + public void KeyNotificationKeyStripping(bool asString) + { + Span blob = stackalloc byte[32]; + Span clob = stackalloc char[32]; + + RedisChannel channel = RedisChannel.Literal("__keyevent@0__:sadd"); + RedisValue value = asString ? "mykey:abc" : "mykey:abc"u8.ToArray(); + KeyNotification.TryParse(in channel, in value, out var notification); + Assert.Equal("mykey:abc", (string?)notification.GetKey()); + Assert.True(notification.KeyStartsWith("mykey:"u8)); + Assert.Equal(0, notification.KeyOffset); + + Assert.Equal(9, notification.GetKeyByteCount()); + Assert.Equal(asString ? 30 : 9, notification.GetKeyMaxByteCount()); + Assert.Equal(9, notification.GetKeyCharCount()); + Assert.Equal(asString ? 9 : 10, notification.GetKeyMaxCharCount()); + + Assert.True(notification.TryCopyKey(blob, out var bytesWritten)); + Assert.Equal(9, bytesWritten); + Assert.Equal("mykey:abc", Encoding.UTF8.GetString(blob.Slice(0, bytesWritten))); + + Assert.True(notification.TryCopyKey(clob, out var charsWritten)); + Assert.Equal(9, charsWritten); + Assert.Equal("mykey:abc", clob.Slice(0, charsWritten).ToString()); + + // now with a prefix + notification = notification.WithKeySlice("mykey:"u8.Length); + Assert.Equal("abc", (string?)notification.GetKey()); + Assert.False(notification.KeyStartsWith("mykey:"u8)); + Assert.Equal(6, notification.KeyOffset); + + Assert.Equal(3, notification.GetKeyByteCount()); + Assert.Equal(asString ? 24 : 3, notification.GetKeyMaxByteCount()); + Assert.Equal(3, notification.GetKeyCharCount()); + Assert.Equal(asString ? 3 : 4, notification.GetKeyMaxCharCount()); + + Assert.True(notification.TryCopyKey(blob, out bytesWritten)); + Assert.Equal(3, bytesWritten); + Assert.Equal("abc", Encoding.UTF8.GetString(blob.Slice(0, bytesWritten))); + + Assert.True(notification.TryCopyKey(clob, out charsWritten)); + Assert.Equal(3, charsWritten); + Assert.Equal("abc", clob.Slice(0, charsWritten).ToString()); + } +} diff --git a/tests/StackExchange.Redis.Tests/KeyPrefixedBatchTests.cs b/tests/StackExchange.Redis.Tests/KeyPrefixedBatchTests.cs new file mode 100644 index 000000000..e92a7a227 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/KeyPrefixedBatchTests.cs @@ -0,0 +1,26 @@ +using System.Text; +using NSubstitute; +using StackExchange.Redis.KeyspaceIsolation; +using Xunit; + +namespace StackExchange.Redis.Tests; + +[Collection(nameof(SubstituteDependentCollection))] +public sealed class KeyPrefixedBatchTests +{ + private readonly IBatch mock; + private readonly KeyPrefixedBatch prefixed; + + public KeyPrefixedBatchTests() + { + mock = Substitute.For(); + prefixed = new KeyPrefixedBatch(mock, Encoding.UTF8.GetBytes("prefix:")); + } + + [Fact] + public void Execute() + { + prefixed.Execute(); + mock.Received(1).Execute(); + } +} diff --git a/tests/StackExchange.Redis.Tests/KeyPrefixedDatabaseTests.cs b/tests/StackExchange.Redis.Tests/KeyPrefixedDatabaseTests.cs new file mode 100644 index 000000000..0b781123c --- /dev/null +++ b/tests/StackExchange.Redis.Tests/KeyPrefixedDatabaseTests.cs @@ -0,0 +1,1794 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Linq.Expressions; +using System.Net; +using System.Text; +using NSubstitute; +using StackExchange.Redis.KeyspaceIsolation; +using Xunit; + +namespace StackExchange.Redis.Tests; + +[CollectionDefinition(nameof(SubstituteDependentCollection), DisableParallelization = true)] +public class SubstituteDependentCollection { } + +[Collection(nameof(SubstituteDependentCollection))] +public sealed class KeyPrefixedDatabaseTests +{ + private readonly IDatabase mock; + private readonly IDatabase prefixed; + + internal static RedisKey[] IsKeys(params RedisKey[] expected) => IsRaw(expected); + internal static RedisValue[] IsValues(params RedisValue[] expected) => IsRaw(expected); + private static T[] IsRaw(T[] expected) + { + Expression> lambda = actual => actual.Length == expected.Length && expected.SequenceEqual(actual); + return Arg.Is(lambda); + } + + public KeyPrefixedDatabaseTests() + { + mock = Substitute.For(); + prefixed = new KeyPrefixedDatabase(mock, Encoding.UTF8.GetBytes("prefix:")); + } + + [Fact] + public void CreateBatch() + { + object asyncState = new(); + IBatch innerBatch = Substitute.For(); + mock.CreateBatch(asyncState).Returns(innerBatch); + IBatch wrappedBatch = prefixed.CreateBatch(asyncState); + mock.Received().CreateBatch(asyncState); + Assert.IsType(wrappedBatch); + Assert.Same(innerBatch, ((KeyPrefixedBatch)wrappedBatch).Inner); + } + + [Fact] + public void CreateTransaction() + { + object asyncState = new(); + ITransaction innerTransaction = Substitute.For(); + mock.CreateTransaction(asyncState).Returns(innerTransaction); + ITransaction wrappedTransaction = prefixed.CreateTransaction(asyncState); + mock.Received().CreateTransaction(asyncState); + Assert.IsType(wrappedTransaction); + Assert.Same(innerTransaction, ((KeyPrefixedTransaction)wrappedTransaction).Inner); + } + + [Fact] + public void DebugObject() + { + prefixed.DebugObject("key", CommandFlags.None); + mock.Received().DebugObject("prefix:key", CommandFlags.None); + } + + [Fact] + public void Get_Database() + { + mock.Database.Returns(123); + Assert.Equal(123, prefixed.Database); + } + + [Fact] + public void HashDecrement_1() + { + prefixed.HashDecrement("key", "hashField", 123, CommandFlags.None); + mock.Received().HashDecrement("prefix:key", "hashField", 123, CommandFlags.None); + } + + [Fact] + public void HashDecrement_2() + { + prefixed.HashDecrement("key", "hashField", 1.23, CommandFlags.None); + mock.Received().HashDecrement("prefix:key", "hashField", 1.23, CommandFlags.None); + } + + [Fact] + public void HashDelete_1() + { + prefixed.HashDelete("key", "hashField", CommandFlags.None); + mock.Received().HashDelete("prefix:key", "hashField", CommandFlags.None); + } + + [Fact] + public void HashDelete_2() + { + RedisValue[] hashFields = Array.Empty(); + prefixed.HashDelete("key", hashFields, CommandFlags.None); + mock.Received().HashDelete("prefix:key", hashFields, CommandFlags.None); + } + + [Fact] + public void HashExists() + { + prefixed.HashExists("key", "hashField", CommandFlags.None); + mock.Received().HashExists("prefix:key", "hashField", CommandFlags.None); + } + + [Fact] + public void HashGet_1() + { + prefixed.HashGet("key", "hashField", CommandFlags.None); + mock.Received().HashGet("prefix:key", "hashField", CommandFlags.None); + } + + [Fact] + public void HashGet_2() + { + RedisValue[] hashFields = Array.Empty(); + prefixed.HashGet("key", hashFields, CommandFlags.None); + mock.Received().HashGet("prefix:key", hashFields, CommandFlags.None); + } + + [Fact] + public void HashGetAll() + { + prefixed.HashGetAll("key", CommandFlags.None); + mock.Received().HashGetAll("prefix:key", CommandFlags.None); + } + + [Fact] + public void HashIncrement_1() + { + prefixed.HashIncrement("key", "hashField", 123, CommandFlags.None); + mock.Received().HashIncrement("prefix:key", "hashField", 123, CommandFlags.None); + } + + [Fact] + public void HashIncrement_2() + { + prefixed.HashIncrement("key", "hashField", 1.23, CommandFlags.None); + mock.Received().HashIncrement("prefix:key", "hashField", 1.23, CommandFlags.None); + } + + [Fact] + public void HashKeys() + { + prefixed.HashKeys("key", CommandFlags.None); + mock.Received().HashKeys("prefix:key", CommandFlags.None); + } + + [Fact] + public void HashLength() + { + prefixed.HashLength("key", CommandFlags.None); + mock.Received().HashLength("prefix:key", CommandFlags.None); + } + + [Fact] + public void HashScan() + { + prefixed.HashScan("key", "pattern", 123, flags: CommandFlags.None); + mock.Received().HashScan("prefix:key", "pattern", 123, CommandFlags.None); + } + + [Fact] + public void HashScan_Full() + { + prefixed.HashScan("key", "pattern", 123, 42, 64, flags: CommandFlags.None); + mock.Received().HashScan("prefix:key", "pattern", 123, 42, 64, CommandFlags.None); + } + + [Fact] + public void HashScanNoValues() + { + prefixed.HashScanNoValues("key", "pattern", 123, flags: CommandFlags.None); + mock.Received().HashScanNoValues("prefix:key", "pattern", 123, flags: CommandFlags.None); + } + + [Fact] + public void HashScanNoValues_Full() + { + prefixed.HashScanNoValues("key", "pattern", 123, 42, 64, flags: CommandFlags.None); + mock.Received().HashScanNoValues("prefix:key", "pattern", 123, 42, 64, CommandFlags.None); + } + + [Fact] + public void HashSet_1() + { + HashEntry[] hashFields = Array.Empty(); + prefixed.HashSet("key", hashFields, CommandFlags.None); + mock.Received().HashSet("prefix:key", hashFields, CommandFlags.None); + } + + [Fact] + public void HashSet_2() + { + prefixed.HashSet("key", "hashField", "value", When.Exists, CommandFlags.None); + mock.Received().HashSet("prefix:key", "hashField", "value", When.Exists, CommandFlags.None); + } + + [Fact] + public void HashStringLength() + { + prefixed.HashStringLength("key", "field", CommandFlags.None); + mock.Received().HashStringLength("prefix:key", "field", CommandFlags.None); + } + + [Fact] + public void HashValues() + { + prefixed.HashValues("key", CommandFlags.None); + mock.Received().HashValues("prefix:key", CommandFlags.None); + } + + [Fact] + public void HyperLogLogAdd_1() + { + prefixed.HyperLogLogAdd("key", "value", CommandFlags.None); + mock.Received().HyperLogLogAdd("prefix:key", "value", CommandFlags.None); + } + + [Fact] + public void HyperLogLogAdd_2() + { + RedisValue[] values = Array.Empty(); + prefixed.HyperLogLogAdd("key", values, CommandFlags.None); + mock.Received().HyperLogLogAdd("prefix:key", values, CommandFlags.None); + } + + [Fact] + public void HyperLogLogLength() + { + prefixed.HyperLogLogLength("key", CommandFlags.None); + mock.Received().HyperLogLogLength("prefix:key", CommandFlags.None); + } + + [Fact] + public void HyperLogLogMerge_1() + { + prefixed.HyperLogLogMerge("destination", "first", "second", CommandFlags.None); + mock.Received().HyperLogLogMerge("prefix:destination", "prefix:first", "prefix:second", CommandFlags.None); + } + + [Fact] + public void HyperLogLogMerge_2() + { + prefixed.HyperLogLogMerge("destination", ["a", "b"], CommandFlags.None); + mock.Received().HyperLogLogMerge("prefix:destination", IsKeys(["prefix:a", "prefix:b"]), CommandFlags.None); + } + + [Fact] + public void IdentifyEndpoint() + { + prefixed.IdentifyEndpoint("key", CommandFlags.None); + mock.Received().IdentifyEndpoint("prefix:key", CommandFlags.None); + } + + [Fact] + public void KeyCopy() + { + prefixed.KeyCopy("key", "destination", flags: CommandFlags.None); + mock.Received().KeyCopy("prefix:key", "prefix:destination", -1, false, CommandFlags.None); + } + + [Fact] + public void KeyDelete_1() + { + prefixed.KeyDelete("key", CommandFlags.None); + mock.Received().KeyDelete("prefix:key", CommandFlags.None); + } + + [Fact] + public void KeyDelete_2() + { + prefixed.KeyDelete(["a", "b"], CommandFlags.None); + mock.Received().KeyDelete(IsKeys(["prefix:a", "prefix:b"]), CommandFlags.None); + } + + [Fact] + public void KeyDump() + { + prefixed.KeyDump("key", CommandFlags.None); + mock.Received().KeyDump("prefix:key", CommandFlags.None); + } + + [Fact] + public void KeyEncoding() + { + prefixed.KeyEncoding("key", CommandFlags.None); + mock.Received().KeyEncoding("prefix:key", CommandFlags.None); + } + + [Fact] + public void KeyExists() + { + prefixed.KeyExists("key", CommandFlags.None); + mock.Received().KeyExists("prefix:key", CommandFlags.None); + } + + [Fact] + public void KeyExpire_1() + { + TimeSpan expiry = TimeSpan.FromSeconds(123); + prefixed.KeyExpire("key", expiry, CommandFlags.None); + mock.Received().KeyExpire("prefix:key", expiry, CommandFlags.None); + } + + [Fact] + public void KeyExpire_2() + { + DateTime expiry = DateTime.Now; + prefixed.KeyExpire("key", expiry, CommandFlags.None); + mock.Received().KeyExpire("prefix:key", expiry, CommandFlags.None); + } + + [Fact] + public void KeyExpire_3() + { + TimeSpan expiry = TimeSpan.FromSeconds(123); + prefixed.KeyExpire("key", expiry, ExpireWhen.HasNoExpiry, CommandFlags.None); + mock.Received().KeyExpire("prefix:key", expiry, ExpireWhen.HasNoExpiry, CommandFlags.None); + } + + [Fact] + public void KeyExpire_4() + { + DateTime expiry = DateTime.Now; + prefixed.KeyExpire("key", expiry, ExpireWhen.HasNoExpiry, CommandFlags.None); + mock.Received().KeyExpire("prefix:key", expiry, ExpireWhen.HasNoExpiry, CommandFlags.None); + } + + [Fact] + public void KeyExpireTime() + { + prefixed.KeyExpireTime("key", CommandFlags.None); + mock.Received().KeyExpireTime("prefix:key", CommandFlags.None); + } + + [Fact] + public void KeyFrequency() + { + prefixed.KeyFrequency("key", CommandFlags.None); + mock.Received().KeyFrequency("prefix:key", CommandFlags.None); + } + + [Fact] + public void KeyMigrate() + { + EndPoint toServer = new IPEndPoint(IPAddress.Loopback, 123); + prefixed.KeyMigrate("key", toServer, 123, 456, MigrateOptions.Copy, CommandFlags.None); + mock.Received().KeyMigrate("prefix:key", toServer, 123, 456, MigrateOptions.Copy, CommandFlags.None); + } + + [Fact] + public void KeyMove() + { + prefixed.KeyMove("key", 123, CommandFlags.None); + mock.Received().KeyMove("prefix:key", 123, CommandFlags.None); + } + + [Fact] + public void KeyPersist() + { + prefixed.KeyPersist("key", CommandFlags.None); + mock.Received().KeyPersist("prefix:key", CommandFlags.None); + } + + [Fact] + public void KeyRandom() + { + Assert.Throws(() => prefixed.KeyRandom()); + } + + [Fact] + public void KeyRefCount() + { + prefixed.KeyRefCount("key", CommandFlags.None); + mock.Received().KeyRefCount("prefix:key", CommandFlags.None); + } + + [Fact] + public void KeyRename() + { + prefixed.KeyRename("key", "newKey", When.Exists, CommandFlags.None); + mock.Received().KeyRename("prefix:key", "prefix:newKey", When.Exists, CommandFlags.None); + } + + [Fact] + public void KeyRestore() + { + byte[] value = Array.Empty(); + TimeSpan expiry = TimeSpan.FromSeconds(123); + prefixed.KeyRestore("key", value, expiry, CommandFlags.None); + mock.Received().KeyRestore("prefix:key", value, expiry, CommandFlags.None); + } + + [Fact] + public void KeyTimeToLive() + { + prefixed.KeyTimeToLive("key", CommandFlags.None); + mock.Received().KeyTimeToLive("prefix:key", CommandFlags.None); + } + + [Fact] + public void KeyType() + { + prefixed.KeyType("key", CommandFlags.None); + mock.Received().KeyType("prefix:key", CommandFlags.None); + } + + [Fact] + public void ListGetByIndex() + { + prefixed.ListGetByIndex("key", 123, CommandFlags.None); + mock.Received().ListGetByIndex("prefix:key", 123, CommandFlags.None); + } + + [Fact] + public void ListInsertAfter() + { + prefixed.ListInsertAfter("key", "pivot", "value", CommandFlags.None); + mock.Received().ListInsertAfter("prefix:key", "pivot", "value", CommandFlags.None); + } + + [Fact] + public void ListInsertBefore() + { + prefixed.ListInsertBefore("key", "pivot", "value", CommandFlags.None); + mock.Received().ListInsertBefore("prefix:key", "pivot", "value", CommandFlags.None); + } + + [Fact] + public void ListLeftPop() + { + prefixed.ListLeftPop("key", CommandFlags.None); + mock.Received().ListLeftPop("prefix:key", CommandFlags.None); + } + + [Fact] + public void ListLeftPop_1() + { + prefixed.ListLeftPop("key", 123, CommandFlags.None); + mock.Received().ListLeftPop("prefix:key", 123, CommandFlags.None); + } + + [Fact] + public void ListLeftPush_1() + { + prefixed.ListLeftPush("key", "value", When.Exists, CommandFlags.None); + mock.Received().ListLeftPush("prefix:key", "value", When.Exists, CommandFlags.None); + } + + [Fact] + public void ListLeftPush_2() + { + RedisValue[] values = Array.Empty(); + prefixed.ListLeftPush("key", values, CommandFlags.None); + mock.Received().ListLeftPush("prefix:key", values, CommandFlags.None); + } + + [Fact] + public void ListLeftPush_3() + { + RedisValue[] values = ["value1", "value2"]; + prefixed.ListLeftPush("key", values, When.Exists, CommandFlags.None); + mock.Received().ListLeftPush("prefix:key", values, When.Exists, CommandFlags.None); + } + + [Fact] + public void ListLength() + { + prefixed.ListLength("key", CommandFlags.None); + mock.Received().ListLength("prefix:key", CommandFlags.None); + } + + [Fact] + public void ListMove() + { + prefixed.ListMove("key", "destination", ListSide.Left, ListSide.Right, CommandFlags.None); + mock.Received().ListMove("prefix:key", "prefix:destination", ListSide.Left, ListSide.Right, CommandFlags.None); + } + + [Fact] + public void ListRange() + { + prefixed.ListRange("key", 123, 456, CommandFlags.None); + mock.Received().ListRange("prefix:key", 123, 456, CommandFlags.None); + } + + [Fact] + public void ListRemove() + { + prefixed.ListRemove("key", "value", 123, CommandFlags.None); + mock.Received().ListRemove("prefix:key", "value", 123, CommandFlags.None); + } + + [Fact] + public void ListRightPop() + { + prefixed.ListRightPop("key", CommandFlags.None); + mock.Received().ListRightPop("prefix:key", CommandFlags.None); + } + + [Fact] + public void ListRightPop_1() + { + prefixed.ListRightPop("key", 123, CommandFlags.None); + mock.Received().ListRightPop("prefix:key", 123, CommandFlags.None); + } + + [Fact] + public void ListRightPopLeftPush() + { + prefixed.ListRightPopLeftPush("source", "destination", CommandFlags.None); + mock.Received().ListRightPopLeftPush("prefix:source", "prefix:destination", CommandFlags.None); + } + + [Fact] + public void ListRightPush_1() + { + prefixed.ListRightPush("key", "value", When.Exists, CommandFlags.None); + mock.Received().ListRightPush("prefix:key", "value", When.Exists, CommandFlags.None); + } + + [Fact] + public void ListRightPush_2() + { + RedisValue[] values = Array.Empty(); + prefixed.ListRightPush("key", values, CommandFlags.None); + mock.Received().ListRightPush("prefix:key", values, CommandFlags.None); + } + + [Fact] + public void ListRightPush_3() + { + RedisValue[] values = ["value1", "value2"]; + prefixed.ListRightPush("key", values, When.Exists, CommandFlags.None); + mock.Received().ListRightPush("prefix:key", values, When.Exists, CommandFlags.None); + } + + [Fact] + public void ListSetByIndex() + { + prefixed.ListSetByIndex("key", 123, "value", CommandFlags.None); + mock.Received().ListSetByIndex("prefix:key", 123, "value", CommandFlags.None); + } + + [Fact] + public void ListTrim() + { + prefixed.ListTrim("key", 123, 456, CommandFlags.None); + mock.Received().ListTrim("prefix:key", 123, 456, CommandFlags.None); + } + + [Fact] + public void LockExtend() + { + TimeSpan expiry = TimeSpan.FromSeconds(123); + prefixed.LockExtend("key", "value", expiry, CommandFlags.None); + mock.Received().LockExtend("prefix:key", "value", expiry, CommandFlags.None); + } + + [Fact] + public void LockQuery() + { + prefixed.LockQuery("key", CommandFlags.None); + mock.Received().LockQuery("prefix:key", CommandFlags.None); + } + + [Fact] + public void LockRelease() + { + prefixed.LockRelease("key", "value", CommandFlags.None); + mock.Received().LockRelease("prefix:key", "value", CommandFlags.None); + } + + [Fact] + public void LockTake() + { + TimeSpan expiry = TimeSpan.FromSeconds(123); + prefixed.LockTake("key", "value", expiry, CommandFlags.None); + mock.Received().LockTake("prefix:key", "value", expiry, CommandFlags.None); + } + + [Fact] + public void Publish() + { +#pragma warning disable CS0618 + prefixed.Publish("channel", "message", CommandFlags.None); + mock.Received().Publish("prefix:channel", "message", CommandFlags.None); +#pragma warning restore CS0618 + } + + [Fact] + public void ScriptEvaluate_1() + { + byte[] hash = Array.Empty(); + RedisValue[] values = Array.Empty(); + RedisKey[] keys = ["a", "b"]; + prefixed.ScriptEvaluate(hash, keys, values, CommandFlags.None); + mock.Received().ScriptEvaluate(hash, IsKeys(["prefix:a", "prefix:b"]), values, CommandFlags.None); + } + + [Fact] + public void ScriptEvaluate_2() + { + RedisValue[] values = Array.Empty(); + RedisKey[] keys = ["a", "b"]; + prefixed.ScriptEvaluate(script: "script", keys: keys, values: values, flags: CommandFlags.None); + mock.Received().ScriptEvaluate(script: "script", keys: IsKeys(["prefix:a", "prefix:b"]), values: values, flags: CommandFlags.None); + } + + [Fact] + public void SetAdd_1() + { + prefixed.SetAdd("key", "value", CommandFlags.None); + mock.Received().SetAdd("prefix:key", "value", CommandFlags.None); + } + + [Fact] + public void SetAdd_2() + { + RedisValue[] values = Array.Empty(); + prefixed.SetAdd("key", values, CommandFlags.None); + mock.Received().SetAdd("prefix:key", values, CommandFlags.None); + } + + [Fact] + public void SetCombine_1() + { + prefixed.SetCombine(SetOperation.Intersect, "first", "second", CommandFlags.None); + mock.Received().SetCombine(SetOperation.Intersect, "prefix:first", "prefix:second", CommandFlags.None); + } + + [Fact] + public void SetCombine_2() + { + RedisKey[] keys = ["a", "b"]; + prefixed.SetCombine(SetOperation.Intersect, keys, CommandFlags.None); + mock.Received().SetCombine(SetOperation.Intersect, IsKeys(["prefix:a", "prefix:b"]), CommandFlags.None); + } + + [Fact] + public void SetCombineAndStore_1() + { + prefixed.SetCombineAndStore(SetOperation.Intersect, "destination", "first", "second", CommandFlags.None); + mock.Received().SetCombineAndStore(SetOperation.Intersect, "prefix:destination", "prefix:first", "prefix:second", CommandFlags.None); + } + + [Fact] + public void SetCombineAndStore_2() + { + RedisKey[] keys = ["a", "b"]; + prefixed.SetCombineAndStore(SetOperation.Intersect, "destination", keys, CommandFlags.None); + mock.Received().SetCombineAndStore(SetOperation.Intersect, "prefix:destination", IsKeys(["prefix:a", "prefix:b"]), CommandFlags.None); + } + + [Fact] + public void SetContains() + { + prefixed.SetContains("key", "value", CommandFlags.None); + mock.Received().SetContains("prefix:key", "value", CommandFlags.None); + } + + [Fact] + public void SetContains_2() + { + RedisValue[] values = ["value1", "value2"]; + prefixed.SetContains("key", values, CommandFlags.None); + mock.Received().SetContains("prefix:key", values, CommandFlags.None); + } + + [Fact] + public void SetIntersectionLength() + { + prefixed.SetIntersectionLength(["key1", "key2"]); + mock.Received().SetIntersectionLength(IsKeys(["prefix:key1", "prefix:key2"]), 0, CommandFlags.None); + } + + [Fact] + public void SetLength() + { + prefixed.SetLength("key", CommandFlags.None); + mock.Received().SetLength("prefix:key", CommandFlags.None); + } + + [Fact] + public void SetMembers() + { + prefixed.SetMembers("key", CommandFlags.None); + mock.Received().SetMembers("prefix:key", CommandFlags.None); + } + + [Fact] + public void SetMove() + { + prefixed.SetMove("source", "destination", "value", CommandFlags.None); + mock.Received().SetMove("prefix:source", "prefix:destination", "value", CommandFlags.None); + } + + [Fact] + public void SetPop_1() + { + prefixed.SetPop("key", CommandFlags.None); + mock.Received().SetPop("prefix:key", CommandFlags.None); + + prefixed.SetPop("key", 5, CommandFlags.None); + mock.Received().SetPop("prefix:key", 5, CommandFlags.None); + } + + [Fact] + public void SetPop_2() + { + prefixed.SetPop("key", 5, CommandFlags.None); + mock.Received().SetPop("prefix:key", 5, CommandFlags.None); + } + + [Fact] + public void SetRandomMember() + { + prefixed.SetRandomMember("key", CommandFlags.None); + mock.Received().SetRandomMember("prefix:key", CommandFlags.None); + } + + [Fact] + public void SetRandomMembers() + { + prefixed.SetRandomMembers("key", 123, CommandFlags.None); + mock.Received().SetRandomMembers("prefix:key", 123, CommandFlags.None); + } + + [Fact] + public void SetRemove_1() + { + prefixed.SetRemove("key", "value", CommandFlags.None); + mock.Received().SetRemove("prefix:key", "value", CommandFlags.None); + } + + [Fact] + public void SetRemove_2() + { + RedisValue[] values = Array.Empty(); + prefixed.SetRemove("key", values, CommandFlags.None); + mock.Received().SetRemove("prefix:key", values, CommandFlags.None); + } + + [Fact] + public void SetScan() + { + prefixed.SetScan("key", "pattern", 123, flags: CommandFlags.None); + mock.Received().SetScan("prefix:key", "pattern", 123, CommandFlags.None); + } + + [Fact] + public void SetScan_Full() + { + prefixed.SetScan("key", "pattern", 123, 42, 64, flags: CommandFlags.None); + mock.Received().SetScan("prefix:key", "pattern", 123, 42, 64, CommandFlags.None); + } + + [Fact] + public void Sort() + { + RedisValue[] get = ["a", "#"]; + + prefixed.Sort("key", 123, 456, Order.Descending, SortType.Alphabetic, "nosort", get, CommandFlags.None); + prefixed.Sort("key", 123, 456, Order.Descending, SortType.Alphabetic, "by", get, CommandFlags.None); + + mock.Received().Sort("prefix:key", 123, 456, Order.Descending, SortType.Alphabetic, "nosort", IsValues(["prefix:a", "#"]), CommandFlags.None); + mock.Received().Sort("prefix:key", 123, 456, Order.Descending, SortType.Alphabetic, "prefix:by", IsValues(["prefix:a", "#"]), CommandFlags.None); + } + + [Fact] + public void SortAndStore() + { + RedisValue[] get = ["a", "#"]; + + prefixed.SortAndStore("destination", "key", 123, 456, Order.Descending, SortType.Alphabetic, "nosort", get, CommandFlags.None); + prefixed.SortAndStore("destination", "key", 123, 456, Order.Descending, SortType.Alphabetic, "by", get, CommandFlags.None); + + mock.Received().SortAndStore("prefix:destination", "prefix:key", 123, 456, Order.Descending, SortType.Alphabetic, "nosort", IsValues(["prefix:a", "#"]), CommandFlags.None); + mock.Received().SortAndStore("prefix:destination", "prefix:key", 123, 456, Order.Descending, SortType.Alphabetic, "prefix:by", IsValues(["prefix:a", "#"]), CommandFlags.None); + } + + [Fact] + public void SortedSetAdd_1() + { + prefixed.SortedSetAdd("key", "member", 1.23, When.Exists, CommandFlags.None); + mock.Received().SortedSetAdd("prefix:key", "member", 1.23, When.Exists, CommandFlags.None); + } + + [Fact] + public void SortedSetAdd_2() + { + SortedSetEntry[] values = Array.Empty(); + prefixed.SortedSetAdd("key", values, When.Exists, CommandFlags.None); + mock.Received().SortedSetAdd("prefix:key", values, When.Exists, CommandFlags.None); + } + + [Fact] + public void SortedSetAdd_3() + { + SortedSetEntry[] values = Array.Empty(); + prefixed.SortedSetAdd("key", values, SortedSetWhen.GreaterThan, CommandFlags.None); + mock.Received().SortedSetAdd("prefix:key", values, SortedSetWhen.GreaterThan, CommandFlags.None); + } + + [Fact] + public void SortedSetCombine() + { + RedisKey[] keys = ["a", "b"]; + prefixed.SortedSetCombine(SetOperation.Intersect, ["a", "b"]); + mock.Received().SortedSetCombine(SetOperation.Intersect, IsKeys(["prefix:a", "prefix:b"]), null, Aggregate.Sum, CommandFlags.None); + } + + [Fact] + public void SortedSetCombineWithScores() + { + prefixed.SortedSetCombineWithScores(SetOperation.Intersect, ["a", "b"]); + mock.Received().SortedSetCombineWithScores(SetOperation.Intersect, IsKeys("prefix:a", "prefix:b"), null, Aggregate.Sum, CommandFlags.None); + } + + [Fact] + public void SortedSetCombineAndStore_1() + { + prefixed.SortedSetCombineAndStore(SetOperation.Intersect, "destination", "first", "second", Aggregate.Max, CommandFlags.None); + mock.Received().SortedSetCombineAndStore(SetOperation.Intersect, "prefix:destination", "prefix:first", "prefix:second", Aggregate.Max, CommandFlags.None); + } + + [Fact] + public void SortedSetCombineAndStore_2() + { + RedisKey[] keys = ["a", "b"]; + prefixed.SetCombineAndStore(SetOperation.Intersect, "destination", keys, CommandFlags.None); + mock.Received().SetCombineAndStore(SetOperation.Intersect, "prefix:destination", IsKeys("prefix:a", "prefix:b"), CommandFlags.None); + } + + [Fact] + public void SortedSetDecrement() + { + prefixed.SortedSetDecrement("key", "member", 1.23, CommandFlags.None); + mock.Received().SortedSetDecrement("prefix:key", "member", 1.23, CommandFlags.None); + } + + [Fact] + public void SortedSetIncrement() + { + prefixed.SortedSetIncrement("key", "member", 1.23, CommandFlags.None); + mock.Received().SortedSetIncrement("prefix:key", "member", 1.23, CommandFlags.None); + } + + [Fact] + public void SortedSetIntersectionLength() + { + prefixed.SortedSetIntersectionLength(["a", "b"], 1, CommandFlags.None); + mock.Received().SortedSetIntersectionLength(IsKeys("prefix:a", "prefix:b"), 1, CommandFlags.None); + } + + [Fact] + public void SortedSetLength() + { + prefixed.SortedSetLength("key", 1.23, 1.23, Exclude.Start, CommandFlags.None); + mock.Received().SortedSetLength("prefix:key", 1.23, 1.23, Exclude.Start, CommandFlags.None); + } + + [Fact] + public void SortedSetRandomMember() + { + prefixed.SortedSetRandomMember("key", CommandFlags.None); + mock.Received().SortedSetRandomMember("prefix:key", CommandFlags.None); + } + + [Fact] + public void SortedSetRandomMembers() + { + prefixed.SortedSetRandomMembers("key", 2, CommandFlags.None); + mock.Received().SortedSetRandomMembers("prefix:key", 2, CommandFlags.None); + } + + [Fact] + public void SortedSetRandomMembersWithScores() + { + prefixed.SortedSetRandomMembersWithScores("key", 2, CommandFlags.None); + mock.Received().SortedSetRandomMembersWithScores("prefix:key", 2, CommandFlags.None); + } + + [Fact] + public void SortedSetLengthByValue() + { + prefixed.SortedSetLengthByValue("key", "min", "max", Exclude.Start, CommandFlags.None); + mock.Received().SortedSetLengthByValue("prefix:key", "min", "max", Exclude.Start, CommandFlags.None); + } + + [Fact] + public void SortedSetRangeByRank() + { + prefixed.SortedSetRangeByRank("key", 123, 456, Order.Descending, CommandFlags.None); + mock.Received().SortedSetRangeByRank("prefix:key", 123, 456, Order.Descending, CommandFlags.None); + } + + [Fact] + public void SortedSetRangeByRankWithScores() + { + prefixed.SortedSetRangeByRankWithScores("key", 123, 456, Order.Descending, CommandFlags.None); + mock.Received().SortedSetRangeByRankWithScores("prefix:key", 123, 456, Order.Descending, CommandFlags.None); + } + + [Fact] + public void SortedSetRangeByScore() + { + prefixed.SortedSetRangeByScore("key", 1.23, 1.23, Exclude.Start, Order.Descending, 123, 456, CommandFlags.None); + mock.Received().SortedSetRangeByScore("prefix:key", 1.23, 1.23, Exclude.Start, Order.Descending, 123, 456, CommandFlags.None); + } + + [Fact] + public void SortedSetRangeByScoreWithScores() + { + prefixed.SortedSetRangeByScoreWithScores("key", 1.23, 1.23, Exclude.Start, Order.Descending, 123, 456, CommandFlags.None); + mock.Received().SortedSetRangeByScoreWithScores("prefix:key", 1.23, 1.23, Exclude.Start, Order.Descending, 123, 456, CommandFlags.None); + } + + [Fact] + public void SortedSetRangeByValue() + { + prefixed.SortedSetRangeByValue("key", "min", "max", Exclude.Start, 123, 456, CommandFlags.None); + mock.Received().SortedSetRangeByValue("prefix:key", "min", "max", Exclude.Start, Order.Ascending, 123, 456, CommandFlags.None); + } + + [Fact] + public void SortedSetRangeByValueDesc() + { + prefixed.SortedSetRangeByValue("key", "min", "max", Exclude.Start, Order.Descending, 123, 456, CommandFlags.None); + mock.Received().SortedSetRangeByValue("prefix:key", "min", "max", Exclude.Start, Order.Descending, 123, 456, CommandFlags.None); + } + + [Fact] + public void SortedSetRank() + { + prefixed.SortedSetRank("key", "member", Order.Descending, CommandFlags.None); + mock.Received().SortedSetRank("prefix:key", "member", Order.Descending, CommandFlags.None); + } + + [Fact] + public void SortedSetRemove_1() + { + prefixed.SortedSetRemove("key", "member", CommandFlags.None); + mock.Received().SortedSetRemove("prefix:key", "member", CommandFlags.None); + } + + [Fact] + public void SortedSetRemove_2() + { + RedisValue[] members = Array.Empty(); + prefixed.SortedSetRemove("key", members, CommandFlags.None); + mock.Received().SortedSetRemove("prefix:key", members, CommandFlags.None); + } + + [Fact] + public void SortedSetRemoveRangeByRank() + { + prefixed.SortedSetRemoveRangeByRank("key", 123, 456, CommandFlags.None); + mock.Received().SortedSetRemoveRangeByRank("prefix:key", 123, 456, CommandFlags.None); + } + + [Fact] + public void SortedSetRemoveRangeByScore() + { + prefixed.SortedSetRemoveRangeByScore("key", 1.23, 1.23, Exclude.Start, CommandFlags.None); + mock.Received().SortedSetRemoveRangeByScore("prefix:key", 1.23, 1.23, Exclude.Start, CommandFlags.None); + } + + [Fact] + public void SortedSetRemoveRangeByValue() + { + prefixed.SortedSetRemoveRangeByValue("key", "min", "max", Exclude.Start, CommandFlags.None); + mock.Received().SortedSetRemoveRangeByValue("prefix:key", "min", "max", Exclude.Start, CommandFlags.None); + } + + [Fact] + public void SortedSetScan() + { + prefixed.SortedSetScan("key", "pattern", 123, flags: CommandFlags.None); + mock.Received().SortedSetScan("prefix:key", "pattern", 123, CommandFlags.None); + } + + [Fact] + public void SortedSetScan_Full() + { + prefixed.SortedSetScan("key", "pattern", 123, 42, 64, flags: CommandFlags.None); + mock.Received().SortedSetScan("prefix:key", "pattern", 123, 42, 64, CommandFlags.None); + } + + [Fact] + public void SortedSetScore() + { + prefixed.SortedSetScore("key", "member", CommandFlags.None); + mock.Received().SortedSetScore("prefix:key", "member", CommandFlags.None); + } + + [Fact] + public void SortedSetScore_Multiple() + { + var values = new RedisValue[] { "member1", "member2" }; + prefixed.SortedSetScores("key", values, CommandFlags.None); + mock.Received().SortedSetScores("prefix:key", values, CommandFlags.None); + } + + [Fact] + public void SortedSetUpdate() + { + SortedSetEntry[] values = Array.Empty(); + prefixed.SortedSetUpdate("key", values, SortedSetWhen.GreaterThan, CommandFlags.None); + mock.Received().SortedSetUpdate("prefix:key", values, SortedSetWhen.GreaterThan, CommandFlags.None); + } + + [Fact] + public void StreamAcknowledge_1() + { + prefixed.StreamAcknowledge("key", "group", "0-0", CommandFlags.None); + mock.Received().StreamAcknowledge("prefix:key", "group", "0-0", CommandFlags.None); + } + + [Fact] + public void StreamAcknowledge_2() + { + var messageIds = new RedisValue[] { "0-0", "0-1", "0-2" }; + prefixed.StreamAcknowledge("key", "group", messageIds, CommandFlags.None); + mock.Received().StreamAcknowledge("prefix:key", "group", messageIds, CommandFlags.None); + } + + [Fact] + public void StreamAdd_1() + { + prefixed.StreamAdd("key", "field1", "value1", "*", 1000, true, CommandFlags.None); + mock.Received().StreamAdd("prefix:key", "field1", "value1", "*", 1000, true, CommandFlags.None); + } + + [Fact] + public void StreamAdd_2() + { + var fields = Array.Empty(); + prefixed.StreamAdd("key", fields, "*", 1000, true, CommandFlags.None); + mock.Received().StreamAdd("prefix:key", fields, "*", 1000, true, CommandFlags.None); + } + + [Fact] + public void StreamAutoClaim() + { + prefixed.StreamAutoClaim("key", "group", "consumer", 0, "0-0", 100, CommandFlags.None); + mock.Received().StreamAutoClaim("prefix:key", "group", "consumer", 0, "0-0", 100, CommandFlags.None); + } + + [Fact] + public void StreamAutoClaimIdsOnly() + { + prefixed.StreamAutoClaimIdsOnly("key", "group", "consumer", 0, "0-0", 100, CommandFlags.None); + mock.Received().StreamAutoClaimIdsOnly("prefix:key", "group", "consumer", 0, "0-0", 100, CommandFlags.None); + } + + [Fact] + public void StreamClaimMessages() + { + var messageIds = Array.Empty(); + prefixed.StreamClaim("key", "group", "consumer", 1000, messageIds, CommandFlags.None); + mock.Received().StreamClaim("prefix:key", "group", "consumer", 1000, messageIds, CommandFlags.None); + } + + [Fact] + public void StreamClaimMessagesReturningIds() + { + var messageIds = Array.Empty(); + prefixed.StreamClaimIdsOnly("key", "group", "consumer", 1000, messageIds, CommandFlags.None); + mock.Received().StreamClaimIdsOnly("prefix:key", "group", "consumer", 1000, messageIds, CommandFlags.None); + } + + [Fact] + public void StreamConsumerGroupSetPosition() + { + prefixed.StreamConsumerGroupSetPosition("key", "group", StreamPosition.Beginning, CommandFlags.None); + mock.Received().StreamConsumerGroupSetPosition("prefix:key", "group", StreamPosition.Beginning, CommandFlags.None); + } + + [Fact] + public void StreamConsumerInfoGet() + { + prefixed.StreamConsumerInfo("key", "group", CommandFlags.None); + mock.Received().StreamConsumerInfo("prefix:key", "group", CommandFlags.None); + } + + [Fact] + public void StreamCreateConsumerGroup() + { + prefixed.StreamCreateConsumerGroup("key", "group", StreamPosition.Beginning, false, CommandFlags.None); + mock.Received().StreamCreateConsumerGroup("prefix:key", "group", StreamPosition.Beginning, false, CommandFlags.None); + } + + [Fact] + public void StreamGroupInfoGet() + { + prefixed.StreamGroupInfo("key", CommandFlags.None); + mock.Received().StreamGroupInfo("prefix:key", CommandFlags.None); + } + + [Fact] + public void StreamInfoGet() + { + prefixed.StreamInfo("key", CommandFlags.None); + mock.Received().StreamInfo("prefix:key", CommandFlags.None); + } + + [Fact] + public void StreamLength() + { + prefixed.StreamLength("key", CommandFlags.None); + mock.Received().StreamLength("prefix:key", CommandFlags.None); + } + + [Fact] + public void StreamMessagesDelete() + { + var messageIds = Array.Empty(); + prefixed.StreamDelete("key", messageIds, CommandFlags.None); + mock.Received().StreamDelete("prefix:key", messageIds, CommandFlags.None); + } + + [Fact] + public void StreamDeleteConsumer() + { + prefixed.StreamDeleteConsumer("key", "group", "consumer", CommandFlags.None); + mock.Received().StreamDeleteConsumer("prefix:key", "group", "consumer", CommandFlags.None); + } + + [Fact] + public void StreamDeleteConsumerGroup() + { + prefixed.StreamDeleteConsumerGroup("key", "group", CommandFlags.None); + mock.Received().StreamDeleteConsumerGroup("prefix:key", "group", CommandFlags.None); + } + + [Fact] + public void StreamPendingInfoGet() + { + prefixed.StreamPending("key", "group", CommandFlags.None); + mock.Received().StreamPending("prefix:key", "group", CommandFlags.None); + } + + [Fact] + public void StreamPendingMessageInfoGet() + { + prefixed.StreamPendingMessages("key", "group", 10, RedisValue.Null, "-", "+", 1000, CommandFlags.None); + mock.Received().StreamPendingMessages("prefix:key", "group", 10, RedisValue.Null, "-", "+", 1000, CommandFlags.None); + } + + [Fact] + public void StreamRange() + { + prefixed.StreamRange("key", "-", "+", null, Order.Ascending, CommandFlags.None); + mock.Received().StreamRange("prefix:key", "-", "+", null, Order.Ascending, CommandFlags.None); + } + + [Fact] + public void StreamRead_1() + { + var streamPositions = Array.Empty(); + prefixed.StreamRead(streamPositions, null, CommandFlags.None); + mock.Received().StreamRead(streamPositions, null, CommandFlags.None); + } + + [Fact] + public void StreamRead_2() + { + prefixed.StreamRead("key", "0-0", null, CommandFlags.None); + mock.Received().StreamRead("prefix:key", "0-0", null, CommandFlags.None); + } + + [Fact] + public void StreamStreamReadGroup_1() + { + prefixed.StreamReadGroup("key", "group", "consumer", "0-0", 10, false, CommandFlags.None); + mock.Received().StreamReadGroup("prefix:key", "group", "consumer", "0-0", 10, false, CommandFlags.None); + } + + [Fact] + public void StreamStreamReadGroup_2() + { + var streamPositions = Array.Empty(); + prefixed.StreamReadGroup(streamPositions, "group", "consumer", 10, false, CommandFlags.None); + mock.Received().StreamReadGroup(streamPositions, "group", "consumer", 10, false, CommandFlags.None); + } + + [Fact] + public void StreamTrim() + { + prefixed.StreamTrim("key", 1000, true, CommandFlags.None); + mock.Received().StreamTrim("prefix:key", 1000, true, CommandFlags.None); + } + + [Fact] + public void StreamTrimByMinId() + { + prefixed.StreamTrimByMinId("key", 1111111111); + mock.Received().StreamTrimByMinId("prefix:key", 1111111111); + } + + [Fact] + public void StreamTrimByMinIdWithApproximate() + { + prefixed.StreamTrimByMinId("key", 1111111111, useApproximateMaxLength: true); + mock.Received().StreamTrimByMinId("prefix:key", 1111111111, useApproximateMaxLength: true); + } + + [Fact] + public void StreamTrimByMinIdWithApproximateAndLimit() + { + prefixed.StreamTrimByMinId("key", 1111111111, useApproximateMaxLength: true, limit: 100); + mock.Received().StreamTrimByMinId("prefix:key", 1111111111, useApproximateMaxLength: true, limit: 100); + } + + [Fact] + public void StringAppend() + { + prefixed.StringAppend("key", "value", CommandFlags.None); + mock.Received().StringAppend("prefix:key", "value", CommandFlags.None); + } + + [Fact] + public void StringBitCount() + { + prefixed.StringBitCount("key", 123, 456, CommandFlags.None); + mock.Received().StringBitCount("prefix:key", 123, 456, CommandFlags.None); + } + + [Fact] + public void StringBitCount_2() + { + prefixed.StringBitCount("key", 123, 456, StringIndexType.Byte, CommandFlags.None); + mock.Received().StringBitCount("prefix:key", 123, 456, StringIndexType.Byte, CommandFlags.None); + } + + [Fact] + public void StringBitOperation_1() + { + prefixed.StringBitOperation(Bitwise.Xor, "destination", "first", "second", CommandFlags.None); + mock.Received().StringBitOperation(Bitwise.Xor, "prefix:destination", "prefix:first", "prefix:second", CommandFlags.None); + } + + [Fact] + public void StringBitOperation_2() + { + RedisKey[] keys = ["a", "b"]; + prefixed.StringBitOperation(Bitwise.Xor, "destination", keys, CommandFlags.None); + mock.Received().StringBitOperation(Bitwise.Xor, "prefix:destination", IsKeys("prefix:a", "prefix:b"), CommandFlags.None); + } + + [Fact] + public void StringBitOperation_Diff() + { + RedisKey[] keys = ["x", "y1", "y2"]; + prefixed.StringBitOperation(Bitwise.Diff, "destination", keys, CommandFlags.None); + mock.Received().StringBitOperation(Bitwise.Diff, "prefix:destination", IsKeys("prefix:x", "prefix:y1", "prefix:y2"), CommandFlags.None); + } + + [Fact] + public void StringBitOperation_Diff1() + { + RedisKey[] keys = ["x", "y1", "y2"]; + prefixed.StringBitOperation(Bitwise.Diff1, "destination", keys, CommandFlags.None); + mock.Received().StringBitOperation(Bitwise.Diff1, "prefix:destination", IsKeys("prefix:x", "prefix:y1", "prefix:y2"), CommandFlags.None); + } + + [Fact] + public void StringBitOperation_AndOr() + { + RedisKey[] keys = ["x", "y1", "y2"]; + prefixed.StringBitOperation(Bitwise.AndOr, "destination", keys, CommandFlags.None); + mock.Received().StringBitOperation(Bitwise.AndOr, "prefix:destination", IsKeys("prefix:x", "prefix:y1", "prefix:y2"), CommandFlags.None); + } + + [Fact] + public void StringBitOperation_One() + { + RedisKey[] keys = ["a", "b", "c"]; + prefixed.StringBitOperation(Bitwise.One, "destination", keys, CommandFlags.None); + mock.Received().StringBitOperation(Bitwise.One, "prefix:destination", IsKeys("prefix:a", "prefix:b", "prefix:c"), CommandFlags.None); + } + + [Fact] + public void StringBitPosition() + { + prefixed.StringBitPosition("key", true, 123, 456, CommandFlags.None); + mock.Received().StringBitPosition("prefix:key", true, 123, 456, CommandFlags.None); + } + + [Fact] + public void StringBitPosition_2() + { + prefixed.StringBitPosition("key", true, 123, 456, StringIndexType.Byte, CommandFlags.None); + mock.Received().StringBitPosition("prefix:key", true, 123, 456, StringIndexType.Byte, CommandFlags.None); + } + + [Fact] + public void StringDecrement_1() + { + prefixed.StringDecrement("key", 123, CommandFlags.None); + mock.Received().StringDecrement("prefix:key", 123, CommandFlags.None); + } + + [Fact] + public void StringDecrement_2() + { + prefixed.StringDecrement("key", 1.23, CommandFlags.None); + mock.Received().StringDecrement("prefix:key", 1.23, CommandFlags.None); + } + + [Fact] + public void StringGet_1() + { + prefixed.StringGet("key", CommandFlags.None); + mock.Received().StringGet("prefix:key", CommandFlags.None); + } + + [Fact] + public void StringGet_2() + { + RedisKey[] keys = ["a", "b"]; + prefixed.StringGet(keys, CommandFlags.None); + mock.Received().StringGet(IsKeys("prefix:a", "prefix:b"), CommandFlags.None); + } + + [Fact] + public void StringGetBit() + { + prefixed.StringGetBit("key", 123, CommandFlags.None); + mock.Received().StringGetBit("prefix:key", 123, CommandFlags.None); + } + + [Fact] + public void StringGetRange() + { + prefixed.StringGetRange("key", 123, 456, CommandFlags.None); + mock.Received().StringGetRange("prefix:key", 123, 456, CommandFlags.None); + } + + [Fact] + public void StringGetSet() + { + prefixed.StringGetSet("key", "value", CommandFlags.None); + mock.Received().StringGetSet("prefix:key", "value", CommandFlags.None); + } + + [Fact] + public void StringGetDelete() + { + prefixed.StringGetDelete("key", CommandFlags.None); + mock.Received().StringGetDelete("prefix:key", CommandFlags.None); + } + + [Fact] + public void StringGetWithExpiry() + { + prefixed.StringGetWithExpiry("key", CommandFlags.None); + mock.Received().StringGetWithExpiry("prefix:key", CommandFlags.None); + } + + [Fact] + public void StringIncrement_1() + { + prefixed.StringIncrement("key", 123, CommandFlags.None); + mock.Received().StringIncrement("prefix:key", 123, CommandFlags.None); + } + + [Fact] + public void StringIncrement_2() + { + prefixed.StringIncrement("key", 1.23, CommandFlags.None); + mock.Received().StringIncrement("prefix:key", 1.23, CommandFlags.None); + } + + [Fact] + public void StringLength() + { + prefixed.StringLength("key", CommandFlags.None); + mock.Received().StringLength("prefix:key", CommandFlags.None); + } + + [Fact] + public void StringSet_1() + { + TimeSpan expiry = TimeSpan.FromSeconds(123); + prefixed.StringSet("key", "value", expiry, When.Exists, CommandFlags.None); + mock.Received().StringSet("prefix:key", "value", expiry, When.Exists, CommandFlags.None); + } + + [Fact] + public void StringSet_2() + { + TimeSpan? expiry = null; + prefixed.StringSet("key", "value", expiry, true, When.Exists, CommandFlags.None); + mock.Received().StringSet("prefix:key", "value", expiry, true, When.Exists, CommandFlags.None); + } + + [Fact] + public void StringSet_3() + { + KeyValuePair[] values = [new KeyValuePair("a", "x"), new KeyValuePair("b", "y")]; + Expression[]>> valid = _ => _.Length == 2 && _[0].Key == "prefix:a" && _[0].Value == "x" && _[1].Key == "prefix:b" && _[1].Value == "y"; + prefixed.StringSet(values, When.Exists, CommandFlags.None); + mock.Received().StringSet(Arg.Is(valid), When.Exists, CommandFlags.None); + } + + [Fact] + public void StringSet_Compat() + { + TimeSpan? expiry = null; + prefixed.StringSet("key", "value", expiry, When.Exists); + mock.Received().StringSet("prefix:key", "value", expiry, When.Exists); + } + + [Fact] + public void StringSetBit() + { + prefixed.StringSetBit("key", 123, true, CommandFlags.None); + mock.Received().StringSetBit("prefix:key", 123, true, CommandFlags.None); + } + + [Fact] + public void StringSetRange() + { + prefixed.StringSetRange("key", 123, "value", CommandFlags.None); + mock.Received().StringSetRange("prefix:key", 123, "value", CommandFlags.None); + } + + [Fact] + public void Execute_1() + { + prefixed.Execute("CUSTOM", "arg1", (RedisKey)"arg2"); + mock.Received().Execute("CUSTOM", Arg.Is(args => args.Length == 2 && args[0].Equals("arg1") && args[1].Equals((RedisKey)"prefix:arg2")), CommandFlags.None); + } + + [Fact] + public void Execute_2() + { + var args = new List { "arg1", (RedisKey)"arg2" }; + prefixed.Execute("CUSTOM", args, CommandFlags.None); + mock.Received().Execute("CUSTOM", Arg.Is>(a => a.Count == 2 && a.ElementAt(0).Equals("arg1") && a.ElementAt(1).Equals((RedisKey)"prefix:arg2"))!, CommandFlags.None); + } + + [Fact] + public void GeoAdd_1() + { + prefixed.GeoAdd("key", 1.23, 4.56, "member", CommandFlags.None); + mock.Received().GeoAdd("prefix:key", 1.23, 4.56, "member", CommandFlags.None); + } + + [Fact] + public void GeoAdd_2() + { + var geoEntry = new GeoEntry(1.23, 4.56, "member"); + prefixed.GeoAdd("key", geoEntry, CommandFlags.None); + mock.Received().GeoAdd("prefix:key", geoEntry, CommandFlags.None); + } + + [Fact] + public void GeoAdd_3() + { + var geoEntries = new GeoEntry[] { new GeoEntry(1.23, 4.56, "member1") }; + prefixed.GeoAdd("key", geoEntries, CommandFlags.None); + mock.Received().GeoAdd("prefix:key", geoEntries, CommandFlags.None); + } + + [Fact] + public void GeoRemove() + { + prefixed.GeoRemove("key", "member", CommandFlags.None); + mock.Received().GeoRemove("prefix:key", "member", CommandFlags.None); + } + + [Fact] + public void GeoDistance() + { + prefixed.GeoDistance("key", "member1", "member2", GeoUnit.Meters, CommandFlags.None); + mock.Received().GeoDistance("prefix:key", "member1", "member2", GeoUnit.Meters, CommandFlags.None); + } + + [Fact] + public void GeoHash_1() + { + prefixed.GeoHash("key", "member", CommandFlags.None); + mock.Received().GeoHash("prefix:key", "member", CommandFlags.None); + } + + [Fact] + public void GeoHash_2() + { + var members = new RedisValue[] { "member1", "member2" }; + prefixed.GeoHash("key", members, CommandFlags.None); + mock.Received().GeoHash("prefix:key", members, CommandFlags.None); + } + + [Fact] + public void GeoPosition_1() + { + prefixed.GeoPosition("key", "member", CommandFlags.None); + mock.Received().GeoPosition("prefix:key", "member", CommandFlags.None); + } + + [Fact] + public void GeoPosition_2() + { + var members = new RedisValue[] { "member1", "member2" }; + prefixed.GeoPosition("key", members, CommandFlags.None); + mock.Received().GeoPosition("prefix:key", members, CommandFlags.None); + } + + [Fact] + public void GeoRadius_1() + { + prefixed.GeoRadius("key", "member", 100, GeoUnit.Meters, 10, Order.Ascending, GeoRadiusOptions.Default, CommandFlags.None); + mock.Received().GeoRadius("prefix:key", "member", 100, GeoUnit.Meters, 10, Order.Ascending, GeoRadiusOptions.Default, CommandFlags.None); + } + + [Fact] + public void GeoRadius_2() + { + prefixed.GeoRadius("key", 1.23, 4.56, 100, GeoUnit.Meters, 10, Order.Ascending, GeoRadiusOptions.Default, CommandFlags.None); + mock.Received().GeoRadius("prefix:key", 1.23, 4.56, 100, GeoUnit.Meters, 10, Order.Ascending, GeoRadiusOptions.Default, CommandFlags.None); + } + + [Fact] + public void GeoSearch_1() + { + var shape = new GeoSearchCircle(100, GeoUnit.Meters); + prefixed.GeoSearch("key", "member", shape, 10, true, Order.Ascending, GeoRadiusOptions.Default, CommandFlags.None); + mock.Received().GeoSearch("prefix:key", "member", shape, 10, true, Order.Ascending, GeoRadiusOptions.Default, CommandFlags.None); + } + + [Fact] + public void GeoSearch_2() + { + var shape = new GeoSearchCircle(100, GeoUnit.Meters); + prefixed.GeoSearch("key", 1.23, 4.56, shape, 10, true, Order.Ascending, GeoRadiusOptions.Default, CommandFlags.None); + mock.Received().GeoSearch("prefix:key", 1.23, 4.56, shape, 10, true, Order.Ascending, GeoRadiusOptions.Default, CommandFlags.None); + } + + [Fact] + public void GeoSearchAndStore_1() + { + var shape = new GeoSearchCircle(100, GeoUnit.Meters); + prefixed.GeoSearchAndStore("source", "destination", "member", shape, 10, true, Order.Ascending, false, CommandFlags.None); + mock.Received().GeoSearchAndStore("prefix:source", "prefix:destination", "member", shape, 10, true, Order.Ascending, false, CommandFlags.None); + } + + [Fact] + public void GeoSearchAndStore_2() + { + var shape = new GeoSearchCircle(100, GeoUnit.Meters); + prefixed.GeoSearchAndStore("source", "destination", 1.23, 4.56, shape, 10, true, Order.Ascending, false, CommandFlags.None); + mock.Received().GeoSearchAndStore("prefix:source", "prefix:destination", 1.23, 4.56, shape, 10, true, Order.Ascending, false, CommandFlags.None); + } + + [Fact] + public void HashFieldExpire_1() + { + var hashFields = new RedisValue[] { "field1", "field2" }; + var expiry = TimeSpan.FromSeconds(60); + prefixed.HashFieldExpire("key", hashFields, expiry, ExpireWhen.Always, CommandFlags.None); + mock.Received().HashFieldExpire("prefix:key", hashFields, expiry, ExpireWhen.Always, CommandFlags.None); + } + + [Fact] + public void HashFieldExpire_2() + { + var hashFields = new RedisValue[] { "field1", "field2" }; + var expiry = DateTime.Now.AddMinutes(1); + prefixed.HashFieldExpire("key", hashFields, expiry, ExpireWhen.Always, CommandFlags.None); + mock.Received().HashFieldExpire("prefix:key", hashFields, expiry, ExpireWhen.Always, CommandFlags.None); + } + + [Fact] + public void HashFieldGetExpireDateTime() + { + var hashFields = new RedisValue[] { "field1", "field2" }; + prefixed.HashFieldGetExpireDateTime("key", hashFields, CommandFlags.None); + mock.Received().HashFieldGetExpireDateTime("prefix:key", hashFields, CommandFlags.None); + } + + [Fact] + public void HashFieldPersist() + { + var hashFields = new RedisValue[] { "field1", "field2" }; + prefixed.HashFieldPersist("key", hashFields, CommandFlags.None); + mock.Received().HashFieldPersist("prefix:key", hashFields, CommandFlags.None); + } + + [Fact] + public void HashFieldGetTimeToLive() + { + var hashFields = new RedisValue[] { "field1", "field2" }; + prefixed.HashFieldGetTimeToLive("key", hashFields, CommandFlags.None); + mock.Received().HashFieldGetTimeToLive("prefix:key", hashFields, CommandFlags.None); + } + + [Fact] + public void HashGetLease() + { + prefixed.HashGetLease("key", "field", CommandFlags.None); + mock.Received().HashGetLease("prefix:key", "field", CommandFlags.None); + } + + [Fact] + public void HashFieldGetAndDelete_1() + { + prefixed.HashFieldGetAndDelete("key", "field", CommandFlags.None); + mock.Received().HashFieldGetAndDelete("prefix:key", "field", CommandFlags.None); + } + + [Fact] + public void HashFieldGetAndDelete_2() + { + var hashFields = new RedisValue[] { "field1", "field2" }; + prefixed.HashFieldGetAndDelete("key", hashFields, CommandFlags.None); + mock.Received().HashFieldGetAndDelete("prefix:key", hashFields, CommandFlags.None); + } + + [Fact] + public void HashFieldGetLeaseAndDelete() + { + prefixed.HashFieldGetLeaseAndDelete("key", "field", CommandFlags.None); + mock.Received().HashFieldGetLeaseAndDelete("prefix:key", "field", CommandFlags.None); + } + + [Fact] + public void HashFieldGetAndSetExpiry_1() + { + var expiry = TimeSpan.FromMinutes(5); + prefixed.HashFieldGetAndSetExpiry("key", "field", expiry, false, CommandFlags.None); + mock.Received().HashFieldGetAndSetExpiry("prefix:key", "field", expiry, false, CommandFlags.None); + } + + [Fact] + public void HashFieldGetAndSetExpiry_2() + { + var expiry = DateTime.Now.AddMinutes(5); + prefixed.HashFieldGetAndSetExpiry("key", "field", expiry, CommandFlags.None); + mock.Received().HashFieldGetAndSetExpiry("prefix:key", "field", expiry, CommandFlags.None); + } + + [Fact] + public void HashFieldGetLeaseAndSetExpiry_1() + { + var expiry = TimeSpan.FromMinutes(5); + prefixed.HashFieldGetLeaseAndSetExpiry("key", "field", expiry, false, CommandFlags.None); + mock.Received().HashFieldGetLeaseAndSetExpiry("prefix:key", "field", expiry, false, CommandFlags.None); + } + + [Fact] + public void HashFieldGetLeaseAndSetExpiry_2() + { + var expiry = DateTime.Now.AddMinutes(5); + prefixed.HashFieldGetLeaseAndSetExpiry("key", "field", expiry, CommandFlags.None); + mock.Received().HashFieldGetLeaseAndSetExpiry("prefix:key", "field", expiry, CommandFlags.None); + } + [Fact] + public void StringGetLease() + { + prefixed.StringGetLease("key", CommandFlags.None); + mock.Received().StringGetLease("prefix:key", CommandFlags.None); + } + + [Fact] + public void StringGetSetExpiry_1() + { + var expiry = TimeSpan.FromMinutes(5); + prefixed.StringGetSetExpiry("key", expiry, CommandFlags.None); + mock.Received().StringGetSetExpiry("prefix:key", expiry, CommandFlags.None); + } + + [Fact] + public void StringGetSetExpiry_2() + { + var expiry = DateTime.Now.AddMinutes(5); + prefixed.StringGetSetExpiry("key", expiry, CommandFlags.None); + mock.Received().StringGetSetExpiry("prefix:key", expiry, CommandFlags.None); + } + + [Fact] + public void StringSetAndGet_1() + { + var expiry = TimeSpan.FromMinutes(5); + prefixed.StringSetAndGet("key", "value", expiry, When.Always, CommandFlags.None); + mock.Received().StringSetAndGet("prefix:key", "value", expiry, When.Always, CommandFlags.None); + } + + [Fact] + public void StringSetAndGet_2() + { + var expiry = TimeSpan.FromMinutes(5); + prefixed.StringSetAndGet("key", "value", expiry, false, When.Always, CommandFlags.None); + mock.Received().StringSetAndGet("prefix:key", "value", expiry, false, When.Always, CommandFlags.None); + } + [Fact] + public void StringLongestCommonSubsequence() + { + prefixed.StringLongestCommonSubsequence("key1", "key2", CommandFlags.None); + mock.Received().StringLongestCommonSubsequence("prefix:key1", "prefix:key2", CommandFlags.None); + } + + [Fact] + public void StringLongestCommonSubsequenceLength() + { + prefixed.StringLongestCommonSubsequenceLength("key1", "key2", CommandFlags.None); + mock.Received().StringLongestCommonSubsequenceLength("prefix:key1", "prefix:key2", CommandFlags.None); + } + + [Fact] + public void StringLongestCommonSubsequenceWithMatches() + { + prefixed.StringLongestCommonSubsequenceWithMatches("key1", "key2", 5, CommandFlags.None); + mock.Received().StringLongestCommonSubsequenceWithMatches("prefix:key1", "prefix:key2", 5, CommandFlags.None); + } + [Fact] + public void IsConnected() + { + prefixed.IsConnected("key", CommandFlags.None); + mock.Received().IsConnected("prefix:key", CommandFlags.None); + } + [Fact] + public void StreamAdd_WithTrimMode_1() + { + prefixed.StreamAdd("key", "field", "value", "*", 1000, false, 100, StreamTrimMode.KeepReferences, CommandFlags.None); + mock.Received().StreamAdd("prefix:key", "field", "value", "*", 1000, false, 100, StreamTrimMode.KeepReferences, CommandFlags.None); + } + + [Fact] + public void StreamAdd_WithTrimMode_2() + { + var fields = new NameValueEntry[] { new NameValueEntry("field", "value") }; + prefixed.StreamAdd("key", fields, "*", 1000, false, 100, StreamTrimMode.KeepReferences, CommandFlags.None); + mock.Received().StreamAdd("prefix:key", fields, "*", 1000, false, 100, StreamTrimMode.KeepReferences, CommandFlags.None); + } + + [Fact] + public void StreamTrim_WithMode() + { + prefixed.StreamTrim("key", 1000, false, 100, StreamTrimMode.KeepReferences, CommandFlags.None); + mock.Received().StreamTrim("prefix:key", 1000, false, 100, StreamTrimMode.KeepReferences, CommandFlags.None); + } + + [Fact] + public void StreamTrimByMinId_WithMode() + { + prefixed.StreamTrimByMinId("key", "1111111111", false, 100, StreamTrimMode.KeepReferences, CommandFlags.None); + mock.Received().StreamTrimByMinId("prefix:key", "1111111111", false, 100, StreamTrimMode.KeepReferences, CommandFlags.None); + } + + [Fact] + public void StreamReadGroup_WithNoAck_1() + { + prefixed.StreamReadGroup("key", "group", "consumer", "0-0", 10, true, CommandFlags.None); + mock.Received().StreamReadGroup("prefix:key", "group", "consumer", "0-0", 10, true, CommandFlags.None); + } + + [Fact] + public void StreamReadGroup_WithNoAck_2() + { + var streamPositions = new StreamPosition[] { new StreamPosition("key", "0-0") }; + prefixed.StreamReadGroup(streamPositions, "group", "consumer", 10, true, CommandFlags.None); + mock.Received().StreamReadGroup(streamPositions, "group", "consumer", 10, true, CommandFlags.None); + } + + [Fact] + public void StreamTrim_Simple() + { + prefixed.StreamTrim("key", 1000, true, CommandFlags.None); + mock.Received().StreamTrim("prefix:key", 1000, true, CommandFlags.None); + } + + [Fact] + public void StreamReadGroup_Simple_1() + { + prefixed.StreamReadGroup("key", "group", "consumer", "0-0", 10, CommandFlags.None); + mock.Received().StreamReadGroup("prefix:key", "group", "consumer", "0-0", 10, CommandFlags.None); + } + + [Fact] + public void StreamReadGroup_Simple_2() + { + var streamPositions = new StreamPosition[] { new StreamPosition("key", "0-0") }; + prefixed.StreamReadGroup(streamPositions, "group", "consumer", 10, CommandFlags.None); + mock.Received().StreamReadGroup(streamPositions, "group", "consumer", 10, CommandFlags.None); + } +} diff --git a/tests/StackExchange.Redis.Tests/KeyPrefixedTests.cs b/tests/StackExchange.Redis.Tests/KeyPrefixedTests.cs new file mode 100644 index 000000000..94b54e112 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/KeyPrefixedTests.cs @@ -0,0 +1,1744 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Linq.Expressions; +using System.Net; +using System.Text; +using System.Threading.Tasks; +using NSubstitute; +using StackExchange.Redis.KeyspaceIsolation; +using Xunit; +using static StackExchange.Redis.Tests.KeyPrefixedDatabaseTests; // for IsKeys etc + +namespace StackExchange.Redis.Tests +{ + [Collection(nameof(SubstituteDependentCollection))] + public sealed class KeyPrefixedTests + { + private readonly IDatabaseAsync mock; + private readonly KeyPrefixed prefixed; + + public KeyPrefixedTests() + { + mock = Substitute.For(); + prefixed = new KeyPrefixed(mock, Encoding.UTF8.GetBytes("prefix:")); + } + + [Fact] + public async Task DebugObjectAsync() + { + await prefixed.DebugObjectAsync("key", CommandFlags.None); + await mock.Received().DebugObjectAsync("prefix:key", CommandFlags.None); + } + + [Fact] + public async Task HashDecrementAsync_1() + { + await prefixed.HashDecrementAsync("key", "hashField", 123, CommandFlags.None); + await mock.Received().HashDecrementAsync("prefix:key", "hashField", 123, CommandFlags.None); + } + + [Fact] + public async Task HashDecrementAsync_2() + { + await prefixed.HashDecrementAsync("key", "hashField", 1.23, CommandFlags.None); + await mock.Received().HashDecrementAsync("prefix:key", "hashField", 1.23, CommandFlags.None); + } + + [Fact] + public async Task HashDeleteAsync_1() + { + await prefixed.HashDeleteAsync("key", "hashField", CommandFlags.None); + await mock.Received().HashDeleteAsync("prefix:key", "hashField", CommandFlags.None); + } + + [Fact] + public async Task HashDeleteAsync_2() + { + RedisValue[] hashFields = Array.Empty(); + await prefixed.HashDeleteAsync("key", hashFields, CommandFlags.None); + await mock.Received().HashDeleteAsync("prefix:key", hashFields, CommandFlags.None); + } + + [Fact] + public async Task HashExistsAsync() + { + await prefixed.HashExistsAsync("key", "hashField", CommandFlags.None); + await mock.Received().HashExistsAsync("prefix:key", "hashField", CommandFlags.None); + } + + [Fact] + public async Task HashGetAllAsync() + { + await prefixed.HashGetAllAsync("key", CommandFlags.None); + await mock.Received().HashGetAllAsync("prefix:key", CommandFlags.None); + } + + [Fact] + public async Task HashGetAsync_1() + { + await prefixed.HashGetAsync("key", "hashField", CommandFlags.None); + await mock.Received().HashGetAsync("prefix:key", "hashField", CommandFlags.None); + } + + [Fact] + public async Task HashGetAsync_2() + { + RedisValue[] hashFields = Array.Empty(); + await prefixed.HashGetAsync("key", hashFields, CommandFlags.None); + await mock.Received().HashGetAsync("prefix:key", hashFields, CommandFlags.None); + } + + [Fact] + public async Task HashIncrementAsync_1() + { + await prefixed.HashIncrementAsync("key", "hashField", 123, CommandFlags.None); + await mock.Received().HashIncrementAsync("prefix:key", "hashField", 123, CommandFlags.None); + } + + [Fact] + public async Task HashIncrementAsync_2() + { + await prefixed.HashIncrementAsync("key", "hashField", 1.23, CommandFlags.None); + await mock.Received().HashIncrementAsync("prefix:key", "hashField", 1.23, CommandFlags.None); + } + + [Fact] + public async Task HashKeysAsync() + { + await prefixed.HashKeysAsync("key", CommandFlags.None); + await mock.Received().HashKeysAsync("prefix:key", CommandFlags.None); + } + + [Fact] + public async Task HashLengthAsync() + { + await prefixed.HashLengthAsync("key", CommandFlags.None); + await mock.Received().HashLengthAsync("prefix:key", CommandFlags.None); + } + + [Fact] + public async Task HashSetAsync_1() + { + HashEntry[] hashFields = Array.Empty(); + await prefixed.HashSetAsync("key", hashFields, CommandFlags.None); + await mock.Received().HashSetAsync("prefix:key", hashFields, CommandFlags.None); + } + + [Fact] + public async Task HashSetAsync_2() + { + await prefixed.HashSetAsync("key", "hashField", "value", When.Exists, CommandFlags.None); + await mock.Received().HashSetAsync("prefix:key", "hashField", "value", When.Exists, CommandFlags.None); + } + + [Fact] + public async Task HashStringLengthAsync() + { + await prefixed.HashStringLengthAsync("key", "field", CommandFlags.None); + await mock.Received().HashStringLengthAsync("prefix:key", "field", CommandFlags.None); + } + + [Fact] + public async Task HashValuesAsync() + { + await prefixed.HashValuesAsync("key", CommandFlags.None); + await mock.Received().HashValuesAsync("prefix:key", CommandFlags.None); + } + + [Fact] + public async Task HyperLogLogAddAsync_1() + { + await prefixed.HyperLogLogAddAsync("key", "value", CommandFlags.None); + await mock.Received().HyperLogLogAddAsync("prefix:key", "value", CommandFlags.None); + } + + [Fact] + public async Task HyperLogLogAddAsync_2() + { + var values = Array.Empty(); + await prefixed.HyperLogLogAddAsync("key", values, CommandFlags.None); + await mock.Received().HyperLogLogAddAsync("prefix:key", values, CommandFlags.None); + } + + [Fact] + public async Task HyperLogLogLengthAsync() + { + await prefixed.HyperLogLogLengthAsync("key", CommandFlags.None); + await mock.Received().HyperLogLogLengthAsync("prefix:key", CommandFlags.None); + } + + [Fact] + public async Task HyperLogLogMergeAsync_1() + { + await prefixed.HyperLogLogMergeAsync("destination", "first", "second", CommandFlags.None); + await mock.Received().HyperLogLogMergeAsync("prefix:destination", "prefix:first", "prefix:second", CommandFlags.None); + } + + [Fact] + public async Task HyperLogLogMergeAsync_2() + { + RedisKey[] keys = ["a", "b"]; + await prefixed.HyperLogLogMergeAsync("destination", keys, CommandFlags.None); + await mock.Received().HyperLogLogMergeAsync("prefix:destination", IsKeys("prefix:a", "prefix:b"), CommandFlags.None); + } + + [Fact] + public async Task IdentifyEndpointAsync() + { + await prefixed.IdentifyEndpointAsync("key", CommandFlags.None); + await mock.Received().IdentifyEndpointAsync("prefix:key", CommandFlags.None); + } + + [Fact] + public void IsConnected() + { + prefixed.IsConnected("key", CommandFlags.None); + mock.Received().IsConnected("prefix:key", CommandFlags.None); + } + + [Fact] + public async Task KeyCopyAsync() + { + await prefixed.KeyCopyAsync("key", "destination", flags: CommandFlags.None); + await mock.Received().KeyCopyAsync("prefix:key", "prefix:destination", -1, false, CommandFlags.None); + } + + [Fact] + public async Task KeyDeleteAsync_1() + { + await prefixed.KeyDeleteAsync("key", CommandFlags.None); + await mock.Received().KeyDeleteAsync("prefix:key", CommandFlags.None); + } + + [Fact] + public async Task KeyDeleteAsync_2() + { + RedisKey[] keys = ["a", "b"]; + await prefixed.KeyDeleteAsync(keys, CommandFlags.None); + await mock.Received().KeyDeleteAsync(IsKeys("prefix:a", "prefix:b"), CommandFlags.None); + } + + [Fact] + public async Task KeyDumpAsync() + { + await prefixed.KeyDumpAsync("key", CommandFlags.None); + await mock.Received().KeyDumpAsync("prefix:key", CommandFlags.None); + } + + [Fact] + public async Task KeyEncodingAsync() + { + await prefixed.KeyEncodingAsync("key", CommandFlags.None); + await mock.Received().KeyEncodingAsync("prefix:key", CommandFlags.None); + } + + [Fact] + public async Task KeyExistsAsync() + { + await prefixed.KeyExistsAsync("key", CommandFlags.None); + await mock.Received().KeyExistsAsync("prefix:key", CommandFlags.None); + } + + [Fact] + public async Task KeyExpireAsync_1() + { + TimeSpan expiry = TimeSpan.FromSeconds(123); + await prefixed.KeyExpireAsync("key", expiry, CommandFlags.None); + await mock.Received().KeyExpireAsync("prefix:key", expiry, CommandFlags.None); + } + + [Fact] + public async Task KeyExpireAsync_2() + { + DateTime expiry = DateTime.Now; + await prefixed.KeyExpireAsync("key", expiry, CommandFlags.None); + await mock.Received().KeyExpireAsync("prefix:key", expiry, CommandFlags.None); + } + + [Fact] + public async Task KeyExpireAsync_3() + { + TimeSpan expiry = TimeSpan.FromSeconds(123); + await prefixed.KeyExpireAsync("key", expiry, ExpireWhen.HasNoExpiry, CommandFlags.None); + await mock.Received().KeyExpireAsync("prefix:key", expiry, ExpireWhen.HasNoExpiry, CommandFlags.None); + } + + [Fact] + public async Task KeyExpireAsync_4() + { + DateTime expiry = DateTime.Now; + await prefixed.KeyExpireAsync("key", expiry, ExpireWhen.HasNoExpiry, CommandFlags.None); + await mock.Received().KeyExpireAsync("prefix:key", expiry, ExpireWhen.HasNoExpiry, CommandFlags.None); + } + + [Fact] + public async Task KeyExpireTimeAsync() + { + await prefixed.KeyExpireTimeAsync("key", CommandFlags.None); + await mock.Received().KeyExpireTimeAsync("prefix:key", CommandFlags.None); + } + + [Fact] + public async Task KeyFrequencyAsync() + { + await prefixed.KeyFrequencyAsync("key", CommandFlags.None); + await mock.Received().KeyFrequencyAsync("prefix:key", CommandFlags.None); + } + + [Fact] + public async Task KeyMigrateAsync() + { + EndPoint toServer = new IPEndPoint(IPAddress.Loopback, 123); + await prefixed.KeyMigrateAsync("key", toServer, 123, 456, MigrateOptions.Copy, CommandFlags.None); + await mock.Received().KeyMigrateAsync("prefix:key", toServer, 123, 456, MigrateOptions.Copy, CommandFlags.None); + } + + [Fact] + public async Task KeyMoveAsync() + { + await prefixed.KeyMoveAsync("key", 123, CommandFlags.None); + await mock.Received().KeyMoveAsync("prefix:key", 123, CommandFlags.None); + } + + [Fact] + public async Task KeyPersistAsync() + { + await prefixed.KeyPersistAsync("key", CommandFlags.None); + await mock.Received().KeyPersistAsync("prefix:key", CommandFlags.None); + } + + [Fact] + public Task KeyRandomAsync() + { + return Assert.ThrowsAsync(() => prefixed.KeyRandomAsync()); + } + + [Fact] + public async Task KeyRefCountAsync() + { + await prefixed.KeyRefCountAsync("key", CommandFlags.None); + await mock.Received().KeyRefCountAsync("prefix:key", CommandFlags.None); + } + + [Fact] + public async Task KeyRenameAsync() + { + await prefixed.KeyRenameAsync("key", "newKey", When.Exists, CommandFlags.None); + await mock.Received().KeyRenameAsync("prefix:key", "prefix:newKey", When.Exists, CommandFlags.None); + } + + [Fact] + public async Task KeyRestoreAsync() + { + byte[] value = Array.Empty(); + TimeSpan expiry = TimeSpan.FromSeconds(123); + await prefixed.KeyRestoreAsync("key", value, expiry, CommandFlags.None); + await mock.Received().KeyRestoreAsync("prefix:key", value, expiry, CommandFlags.None); + } + + [Fact] + public async Task KeyTimeToLiveAsync() + { + await prefixed.KeyTimeToLiveAsync("key", CommandFlags.None); + await mock.Received().KeyTimeToLiveAsync("prefix:key", CommandFlags.None); + } + + [Fact] + public async Task KeyTypeAsync() + { + await prefixed.KeyTypeAsync("key", CommandFlags.None); + await mock.Received().KeyTypeAsync("prefix:key", CommandFlags.None); + } + + [Fact] + public async Task ListGetByIndexAsync() + { + await prefixed.ListGetByIndexAsync("key", 123, CommandFlags.None); + await mock.Received().ListGetByIndexAsync("prefix:key", 123, CommandFlags.None); + } + + [Fact] + public async Task ListInsertAfterAsync() + { + await prefixed.ListInsertAfterAsync("key", "pivot", "value", CommandFlags.None); + await mock.Received().ListInsertAfterAsync("prefix:key", "pivot", "value", CommandFlags.None); + } + + [Fact] + public async Task ListInsertBeforeAsync() + { + await prefixed.ListInsertBeforeAsync("key", "pivot", "value", CommandFlags.None); + await mock.Received().ListInsertBeforeAsync("prefix:key", "pivot", "value", CommandFlags.None); + } + + [Fact] + public async Task ListLeftPopAsync() + { + await prefixed.ListLeftPopAsync("key", CommandFlags.None); + await mock.Received().ListLeftPopAsync("prefix:key", CommandFlags.None); + } + + [Fact] + public async Task ListLeftPopAsync_1() + { + await prefixed.ListLeftPopAsync("key", 123, CommandFlags.None); + await mock.Received().ListLeftPopAsync("prefix:key", 123, CommandFlags.None); + } + + [Fact] + public async Task ListLeftPushAsync_1() + { + await prefixed.ListLeftPushAsync("key", "value", When.Exists, CommandFlags.None); + await mock.Received().ListLeftPushAsync("prefix:key", "value", When.Exists, CommandFlags.None); + } + + [Fact] + public async Task ListLeftPushAsync_2() + { + RedisValue[] values = Array.Empty(); + await prefixed.ListLeftPushAsync("key", values, CommandFlags.None); + await mock.Received().ListLeftPushAsync("prefix:key", values, CommandFlags.None); + } + + [Fact] + public async Task ListLeftPushAsync_3() + { + RedisValue[] values = ["value1", "value2"]; + await prefixed.ListLeftPushAsync("key", values, When.Exists, CommandFlags.None); + await mock.Received().ListLeftPushAsync("prefix:key", values, When.Exists, CommandFlags.None); + } + + [Fact] + public async Task ListLengthAsync() + { + await prefixed.ListLengthAsync("key", CommandFlags.None); + await mock.Received().ListLengthAsync("prefix:key", CommandFlags.None); + } + + [Fact] + public async Task ListMoveAsync() + { + await prefixed.ListMoveAsync("key", "destination", ListSide.Left, ListSide.Right, CommandFlags.None); + await mock.Received().ListMoveAsync("prefix:key", "prefix:destination", ListSide.Left, ListSide.Right, CommandFlags.None); + } + + [Fact] + public async Task ListRangeAsync() + { + await prefixed.ListRangeAsync("key", 123, 456, CommandFlags.None); + await mock.Received().ListRangeAsync("prefix:key", 123, 456, CommandFlags.None); + } + + [Fact] + public async Task ListRemoveAsync() + { + await prefixed.ListRemoveAsync("key", "value", 123, CommandFlags.None); + await mock.Received().ListRemoveAsync("prefix:key", "value", 123, CommandFlags.None); + } + + [Fact] + public async Task ListRightPopAsync() + { + await prefixed.ListRightPopAsync("key", CommandFlags.None); + await mock.Received().ListRightPopAsync("prefix:key", CommandFlags.None); + } + + [Fact] + public async Task ListRightPopAsync_1() + { + await prefixed.ListRightPopAsync("key", 123, CommandFlags.None); + await mock.Received().ListRightPopAsync("prefix:key", 123, CommandFlags.None); + } + + [Fact] + public async Task ListRightPopLeftPushAsync() + { + await prefixed.ListRightPopLeftPushAsync("source", "destination", CommandFlags.None); + await mock.Received().ListRightPopLeftPushAsync("prefix:source", "prefix:destination", CommandFlags.None); + } + + [Fact] + public async Task ListRightPushAsync_1() + { + await prefixed.ListRightPushAsync("key", "value", When.Exists, CommandFlags.None); + await mock.Received().ListRightPushAsync("prefix:key", "value", When.Exists, CommandFlags.None); + } + + [Fact] + public async Task ListRightPushAsync_2() + { + RedisValue[] values = Array.Empty(); + await prefixed.ListRightPushAsync("key", values, CommandFlags.None); + await mock.Received().ListRightPushAsync("prefix:key", values, CommandFlags.None); + } + + [Fact] + public async Task ListRightPushAsync_3() + { + RedisValue[] values = ["value1", "value2"]; + await prefixed.ListRightPushAsync("key", values, When.Exists, CommandFlags.None); + await mock.Received().ListRightPushAsync("prefix:key", values, When.Exists, CommandFlags.None); + } + + [Fact] + public async Task ListSetByIndexAsync() + { + await prefixed.ListSetByIndexAsync("key", 123, "value", CommandFlags.None); + await mock.Received().ListSetByIndexAsync("prefix:key", 123, "value", CommandFlags.None); + } + + [Fact] + public async Task ListTrimAsync() + { + await prefixed.ListTrimAsync("key", 123, 456, CommandFlags.None); + await mock.Received().ListTrimAsync("prefix:key", 123, 456, CommandFlags.None); + } + + [Fact] + public async Task LockExtendAsync() + { + TimeSpan expiry = TimeSpan.FromSeconds(123); + await prefixed.LockExtendAsync("key", "value", expiry, CommandFlags.None); + await mock.Received().LockExtendAsync("prefix:key", "value", expiry, CommandFlags.None); + } + + [Fact] + public async Task LockQueryAsync() + { + await prefixed.LockQueryAsync("key", CommandFlags.None); + await mock.Received().LockQueryAsync("prefix:key", CommandFlags.None); + } + + [Fact] + public async Task LockReleaseAsync() + { + await prefixed.LockReleaseAsync("key", "value", CommandFlags.None); + await mock.Received().LockReleaseAsync("prefix:key", "value", CommandFlags.None); + } + + [Fact] + public async Task LockTakeAsync() + { + TimeSpan expiry = TimeSpan.FromSeconds(123); + await prefixed.LockTakeAsync("key", "value", expiry, CommandFlags.None); + await mock.Received().LockTakeAsync("prefix:key", "value", expiry, CommandFlags.None); + } + + [Fact] + public async Task PublishAsync() + { + await prefixed.PublishAsync(RedisChannel.Literal("channel"), "message", CommandFlags.None); + await mock.Received().PublishAsync(RedisChannel.Literal("prefix:channel"), "message", CommandFlags.None); + } + + [Fact] + public async Task ScriptEvaluateAsync_1() + { + byte[] hash = Array.Empty(); + RedisValue[] values = Array.Empty(); + RedisKey[] keys = ["a", "b"]; + await prefixed.ScriptEvaluateAsync(hash, keys, values, CommandFlags.None); + await mock.Received().ScriptEvaluateAsync(hash, IsKeys("prefix:a", "prefix:b"), values, CommandFlags.None); + } + + [Fact] + public async Task ScriptEvaluateAsync_2() + { + RedisValue[] values = Array.Empty(); + RedisKey[] keys = ["a", "b"]; + await prefixed.ScriptEvaluateAsync("script", keys, values, CommandFlags.None); + await mock.Received().ScriptEvaluateAsync(script: "script", keys: IsKeys("prefix:a", "prefix:b"), values: values, flags: CommandFlags.None); + } + + [Fact] + public async Task SetAddAsync_1() + { + await prefixed.SetAddAsync("key", "value", CommandFlags.None); + await mock.Received().SetAddAsync("prefix:key", "value", CommandFlags.None); + } + + [Fact] + public async Task SetAddAsync_2() + { + RedisValue[] values = Array.Empty(); + await prefixed.SetAddAsync("key", values, CommandFlags.None); + await mock.Received().SetAddAsync("prefix:key", values, CommandFlags.None); + } + + [Fact] + public async Task SetCombineAndStoreAsync_1() + { + await prefixed.SetCombineAndStoreAsync(SetOperation.Intersect, "destination", "first", "second", CommandFlags.None); + await mock.Received().SetCombineAndStoreAsync(SetOperation.Intersect, "prefix:destination", "prefix:first", "prefix:second", CommandFlags.None); + } + + [Fact] + public async Task SetCombineAndStoreAsync_2() + { + RedisKey[] keys = ["a", "b"]; + await prefixed.SetCombineAndStoreAsync(SetOperation.Intersect, "destination", keys, CommandFlags.None); + await mock.Received().SetCombineAndStoreAsync(SetOperation.Intersect, "prefix:destination", IsKeys("prefix:a", "prefix:b"), CommandFlags.None); + } + + [Fact] + public async Task SetCombineAsync_1() + { + await prefixed.SetCombineAsync(SetOperation.Intersect, "first", "second", CommandFlags.None); + await mock.Received().SetCombineAsync(SetOperation.Intersect, "prefix:first", "prefix:second", CommandFlags.None); + } + + [Fact] + public async Task SetCombineAsync_2() + { + RedisKey[] keys = ["a", "b"]; + await prefixed.SetCombineAsync(SetOperation.Intersect, keys, CommandFlags.None); + await mock.Received().SetCombineAsync(SetOperation.Intersect, IsKeys("prefix:a", "prefix:b"), CommandFlags.None); + } + + [Fact] + public async Task SetContainsAsync() + { + await prefixed.SetContainsAsync("key", "value", CommandFlags.None); + await mock.Received().SetContainsAsync("prefix:key", "value", CommandFlags.None); + } + + [Fact] + public async Task SetContainsAsync_2() + { + RedisValue[] values = ["value1", "value2"]; + await prefixed.SetContainsAsync("key", values, CommandFlags.None); + await mock.Received().SetContainsAsync("prefix:key", values, CommandFlags.None); + } + + [Fact] + public async Task SetIntersectionLengthAsync() + { + await prefixed.SetIntersectionLengthAsync(["key1", "key2"]); + await mock.Received().SetIntersectionLengthAsync(IsKeys("prefix:key1", "prefix:key2"), 0, CommandFlags.None); + } + + [Fact] + public async Task SetLengthAsync() + { + await prefixed.SetLengthAsync("key", CommandFlags.None); + await mock.Received().SetLengthAsync("prefix:key", CommandFlags.None); + } + + [Fact] + public async Task SetMembersAsync() + { + await prefixed.SetMembersAsync("key", CommandFlags.None); + await mock.Received().SetMembersAsync("prefix:key", CommandFlags.None); + } + + [Fact] + public async Task SetMoveAsync() + { + await prefixed.SetMoveAsync("source", "destination", "value", CommandFlags.None); + await mock.Received().SetMoveAsync("prefix:source", "prefix:destination", "value", CommandFlags.None); + } + + [Fact] + public async Task SetPopAsync_1() + { + await prefixed.SetPopAsync("key", CommandFlags.None); + await mock.Received().SetPopAsync("prefix:key", CommandFlags.None); + + await prefixed.SetPopAsync("key", 5, CommandFlags.None); + await mock.Received().SetPopAsync("prefix:key", 5, CommandFlags.None); + } + + [Fact] + public async Task SetPopAsync_2() + { + await prefixed.SetPopAsync("key", 5, CommandFlags.None); + await mock.Received().SetPopAsync("prefix:key", 5, CommandFlags.None); + } + + [Fact] + public async Task SetRandomMemberAsync() + { + await prefixed.SetRandomMemberAsync("key", CommandFlags.None); + await mock.Received().SetRandomMemberAsync("prefix:key", CommandFlags.None); + } + + [Fact] + public async Task SetRandomMembersAsync() + { + await prefixed.SetRandomMembersAsync("key", 123, CommandFlags.None); + await mock.Received().SetRandomMembersAsync("prefix:key", 123, CommandFlags.None); + } + + [Fact] + public async Task SetRemoveAsync_1() + { + await prefixed.SetRemoveAsync("key", "value", CommandFlags.None); + await mock.Received().SetRemoveAsync("prefix:key", "value", CommandFlags.None); + } + + [Fact] + public async Task SetRemoveAsync_2() + { + RedisValue[] values = Array.Empty(); + await prefixed.SetRemoveAsync("key", values, CommandFlags.None); + await mock.Received().SetRemoveAsync("prefix:key", values, CommandFlags.None); + } + + [Fact] + public async Task SortAndStoreAsync() + { + RedisValue[] get = ["a", "#"]; + + await prefixed.SortAndStoreAsync("destination", "key", 123, 456, Order.Descending, SortType.Alphabetic, "nosort", get, CommandFlags.None); + await prefixed.SortAndStoreAsync("destination", "key", 123, 456, Order.Descending, SortType.Alphabetic, "by", get, CommandFlags.None); + + await mock.Received().SortAndStoreAsync("prefix:destination", "prefix:key", 123, 456, Order.Descending, SortType.Alphabetic, "nosort", IsValues("prefix:a", "#"), CommandFlags.None); + await mock.Received().SortAndStoreAsync("prefix:destination", "prefix:key", 123, 456, Order.Descending, SortType.Alphabetic, "prefix:by", IsValues("prefix:a", "#"), CommandFlags.None); + } + + [Fact] + public async Task SortAsync() + { + RedisValue[] get = ["a", "#"]; + + await prefixed.SortAsync("key", 123, 456, Order.Descending, SortType.Alphabetic, "nosort", get, CommandFlags.None); + await prefixed.SortAsync("key", 123, 456, Order.Descending, SortType.Alphabetic, "by", get, CommandFlags.None); + + await mock.Received().SortAsync("prefix:key", 123, 456, Order.Descending, SortType.Alphabetic, "nosort", IsValues("prefix:a", "#"), CommandFlags.None); + await mock.Received().SortAsync("prefix:key", 123, 456, Order.Descending, SortType.Alphabetic, "prefix:by", IsValues("prefix:a", "#"), CommandFlags.None); + } + + [Fact] + public async Task SortedSetAddAsync_1() + { + await prefixed.SortedSetAddAsync("key", "member", 1.23, When.Exists, CommandFlags.None); + await mock.Received().SortedSetAddAsync("prefix:key", "member", 1.23, When.Exists, CommandFlags.None); + } + + [Fact] + public async Task SortedSetAddAsync_2() + { + SortedSetEntry[] values = Array.Empty(); + await prefixed.SortedSetAddAsync("key", values, When.Exists, CommandFlags.None); + await mock.Received().SortedSetAddAsync("prefix:key", values, When.Exists, CommandFlags.None); + } + + [Fact] + public async Task SortedSetAddAsync_3() + { + SortedSetEntry[] values = Array.Empty(); + await prefixed.SortedSetAddAsync("key", values, SortedSetWhen.GreaterThan, CommandFlags.None); + await mock.Received().SortedSetAddAsync("prefix:key", values, SortedSetWhen.GreaterThan, CommandFlags.None); + } + + [Fact] + public async Task SortedSetCombineAsync() + { + await prefixed.SortedSetCombineAsync(SetOperation.Intersect, ["a", "b"]); + await mock.Received().SortedSetCombineAsync(SetOperation.Intersect, IsKeys("prefix:a", "prefix:b"), null, Aggregate.Sum, CommandFlags.None); + } + + [Fact] + public async Task SortedSetCombineWithScoresAsync() + { + await prefixed.SortedSetCombineWithScoresAsync(SetOperation.Intersect, ["a", "b"]); + await mock.Received().SortedSetCombineWithScoresAsync(SetOperation.Intersect, IsKeys("prefix:a", "prefix:b"), null, Aggregate.Sum, CommandFlags.None); + } + + [Fact] + public async Task SortedSetCombineAndStoreAsync_1() + { + await prefixed.SortedSetCombineAndStoreAsync(SetOperation.Intersect, "destination", "first", "second", Aggregate.Max, CommandFlags.None); + await mock.Received().SortedSetCombineAndStoreAsync(SetOperation.Intersect, "prefix:destination", "prefix:first", "prefix:second", Aggregate.Max, CommandFlags.None); + } + + [Fact] + public async Task SortedSetCombineAndStoreAsync_2() + { + RedisKey[] keys = ["a", "b"]; + await prefixed.SetCombineAndStoreAsync(SetOperation.Intersect, "destination", keys, CommandFlags.None); + await mock.Received().SetCombineAndStoreAsync(SetOperation.Intersect, "prefix:destination", IsKeys("prefix:a", "prefix:b"), CommandFlags.None); + } + + [Fact] + public async Task SortedSetDecrementAsync() + { + await prefixed.SortedSetDecrementAsync("key", "member", 1.23, CommandFlags.None); + await mock.Received().SortedSetDecrementAsync("prefix:key", "member", 1.23, CommandFlags.None); + } + + [Fact] + public async Task SortedSetIncrementAsync() + { + await prefixed.SortedSetIncrementAsync("key", "member", 1.23, CommandFlags.None); + await mock.Received().SortedSetIncrementAsync("prefix:key", "member", 1.23, CommandFlags.None); + } + + [Fact] + public async Task SortedSetIntersectionLengthAsync() + { + await prefixed.SortedSetIntersectionLengthAsync(["a", "b"], 1, CommandFlags.None); + await mock.Received().SortedSetIntersectionLengthAsync(IsKeys("prefix:a", "prefix:b"), 1, CommandFlags.None); + } + + [Fact] + public async Task SortedSetLengthAsync() + { + await prefixed.SortedSetLengthAsync("key", 1.23, 1.23, Exclude.Start, CommandFlags.None); + await mock.Received().SortedSetLengthAsync("prefix:key", 1.23, 1.23, Exclude.Start, CommandFlags.None); + } + + [Fact] + public async Task SortedSetLengthByValueAsync() + { + await prefixed.SortedSetLengthByValueAsync("key", "min", "max", Exclude.Start, CommandFlags.None); + await mock.Received().SortedSetLengthByValueAsync("prefix:key", "min", "max", Exclude.Start, CommandFlags.None); + } + + [Fact] + public async Task SortedSetRandomMemberAsync() + { + await prefixed.SortedSetRandomMemberAsync("key", CommandFlags.None); + await mock.Received().SortedSetRandomMemberAsync("prefix:key", CommandFlags.None); + } + + [Fact] + public async Task SortedSetRandomMembersAsync() + { + await prefixed.SortedSetRandomMembersAsync("key", 2, CommandFlags.None); + await mock.Received().SortedSetRandomMembersAsync("prefix:key", 2, CommandFlags.None); + } + + [Fact] + public async Task SortedSetRandomMemberWithScoresAsync() + { + await prefixed.SortedSetRandomMembersWithScoresAsync("key", 2, CommandFlags.None); + await mock.Received().SortedSetRandomMembersWithScoresAsync("prefix:key", 2, CommandFlags.None); + } + + [Fact] + public async Task SortedSetRangeByRankAsync() + { + await prefixed.SortedSetRangeByRankAsync("key", 123, 456, Order.Descending, CommandFlags.None); + await mock.Received().SortedSetRangeByRankAsync("prefix:key", 123, 456, Order.Descending, CommandFlags.None); + } + + [Fact] + public async Task SortedSetRangeByRankWithScoresAsync() + { + await prefixed.SortedSetRangeByRankWithScoresAsync("key", 123, 456, Order.Descending, CommandFlags.None); + await mock.Received().SortedSetRangeByRankWithScoresAsync("prefix:key", 123, 456, Order.Descending, CommandFlags.None); + } + + [Fact] + public async Task SortedSetRangeByScoreAsync() + { + await prefixed.SortedSetRangeByScoreAsync("key", 1.23, 1.23, Exclude.Start, Order.Descending, 123, 456, CommandFlags.None); + await mock.Received().SortedSetRangeByScoreAsync("prefix:key", 1.23, 1.23, Exclude.Start, Order.Descending, 123, 456, CommandFlags.None); + } + + [Fact] + public async Task SortedSetRangeByScoreWithScoresAsync() + { + await prefixed.SortedSetRangeByScoreWithScoresAsync("key", 1.23, 1.23, Exclude.Start, Order.Descending, 123, 456, CommandFlags.None); + await mock.Received().SortedSetRangeByScoreWithScoresAsync("prefix:key", 1.23, 1.23, Exclude.Start, Order.Descending, 123, 456, CommandFlags.None); + } + + [Fact] + public async Task SortedSetRangeByValueAsync() + { + await prefixed.SortedSetRangeByValueAsync("key", "min", "max", Exclude.Start, 123, 456, CommandFlags.None); + await mock.Received().SortedSetRangeByValueAsync("prefix:key", "min", "max", Exclude.Start, Order.Ascending, 123, 456, CommandFlags.None); + } + + [Fact] + public async Task SortedSetRangeByValueDescAsync() + { + await prefixed.SortedSetRangeByValueAsync("key", "min", "max", Exclude.Start, Order.Descending, 123, 456, CommandFlags.None); + await mock.Received().SortedSetRangeByValueAsync("prefix:key", "min", "max", Exclude.Start, Order.Descending, 123, 456, CommandFlags.None); + } + + [Fact] + public async Task SortedSetRankAsync() + { + await prefixed.SortedSetRankAsync("key", "member", Order.Descending, CommandFlags.None); + await mock.Received().SortedSetRankAsync("prefix:key", "member", Order.Descending, CommandFlags.None); + } + + [Fact] + public async Task SortedSetRemoveAsync_1() + { + await prefixed.SortedSetRemoveAsync("key", "member", CommandFlags.None); + await mock.Received().SortedSetRemoveAsync("prefix:key", "member", CommandFlags.None); + } + + [Fact] + public async Task SortedSetRemoveAsync_2() + { + RedisValue[] members = Array.Empty(); + await prefixed.SortedSetRemoveAsync("key", members, CommandFlags.None); + await mock.Received().SortedSetRemoveAsync("prefix:key", members, CommandFlags.None); + } + + [Fact] + public async Task SortedSetRemoveRangeByRankAsync() + { + await prefixed.SortedSetRemoveRangeByRankAsync("key", 123, 456, CommandFlags.None); + await mock.Received().SortedSetRemoveRangeByRankAsync("prefix:key", 123, 456, CommandFlags.None); + } + + [Fact] + public async Task SortedSetRemoveRangeByScoreAsync() + { + await prefixed.SortedSetRemoveRangeByScoreAsync("key", 1.23, 1.23, Exclude.Start, CommandFlags.None); + await mock.Received().SortedSetRemoveRangeByScoreAsync("prefix:key", 1.23, 1.23, Exclude.Start, CommandFlags.None); + } + + [Fact] + public async Task SortedSetRemoveRangeByValueAsync() + { + await prefixed.SortedSetRemoveRangeByValueAsync("key", "min", "max", Exclude.Start, CommandFlags.None); + await mock.Received().SortedSetRemoveRangeByValueAsync("prefix:key", "min", "max", Exclude.Start, CommandFlags.None); + } + + [Fact] + public async Task SortedSetScoreAsync() + { + await prefixed.SortedSetScoreAsync("key", "member", CommandFlags.None); + await mock.Received().SortedSetScoreAsync("prefix:key", "member", CommandFlags.None); + } + + [Fact] + public async Task SortedSetScoreAsync_Multiple() + { + var values = new RedisValue[] { "member1", "member2" }; + await prefixed.SortedSetScoresAsync("key", values, CommandFlags.None); + await mock.Received().SortedSetScoresAsync("prefix:key", values, CommandFlags.None); + } + + [Fact] + public async Task SortedSetUpdateAsync() + { + SortedSetEntry[] values = Array.Empty(); + await prefixed.SortedSetUpdateAsync("key", values, SortedSetWhen.GreaterThan, CommandFlags.None); + await mock.Received().SortedSetUpdateAsync("prefix:key", values, SortedSetWhen.GreaterThan, CommandFlags.None); + } + + [Fact] + public async Task StreamAcknowledgeAsync_1() + { + await prefixed.StreamAcknowledgeAsync("key", "group", "0-0", CommandFlags.None); + await mock.Received().StreamAcknowledgeAsync("prefix:key", "group", "0-0", CommandFlags.None); + } + + [Fact] + public async Task StreamAcknowledgeAsync_2() + { + var messageIds = new RedisValue[] { "0-0", "0-1", "0-2" }; + await prefixed.StreamAcknowledgeAsync("key", "group", messageIds, CommandFlags.None); + await mock.Received().StreamAcknowledgeAsync("prefix:key", "group", messageIds, CommandFlags.None); + } + + [Fact] + public async Task StreamAddAsync_1() + { + await prefixed.StreamAddAsync("key", "field1", "value1", "*", 1000, true, CommandFlags.None); + await mock.Received().StreamAddAsync("prefix:key", "field1", "value1", "*", 1000, true, CommandFlags.None); + } + + [Fact] + public async Task StreamAddAsync_2() + { + var fields = Array.Empty(); + await prefixed.StreamAddAsync("key", fields, "*", 1000, true, CommandFlags.None); + await mock.Received().StreamAddAsync("prefix:key", fields, "*", 1000, true, CommandFlags.None); + } + + [Fact] + public async Task StreamAutoClaimAsync() + { + await prefixed.StreamAutoClaimAsync("key", "group", "consumer", 0, "0-0", 100, CommandFlags.None); + await mock.Received().StreamAutoClaimAsync("prefix:key", "group", "consumer", 0, "0-0", 100, CommandFlags.None); + } + + [Fact] + public async Task StreamAutoClaimIdsOnlyAsync() + { + await prefixed.StreamAutoClaimIdsOnlyAsync("key", "group", "consumer", 0, "0-0", 100, CommandFlags.None); + await mock.Received().StreamAutoClaimIdsOnlyAsync("prefix:key", "group", "consumer", 0, "0-0", 100, CommandFlags.None); + } + + [Fact] + public async Task StreamClaimMessagesAsync() + { + var messageIds = Array.Empty(); + await prefixed.StreamClaimAsync("key", "group", "consumer", 1000, messageIds, CommandFlags.None); + await mock.Received().StreamClaimAsync("prefix:key", "group", "consumer", 1000, messageIds, CommandFlags.None); + } + + [Fact] + public async Task StreamClaimMessagesReturningIdsAsync() + { + var messageIds = Array.Empty(); + await prefixed.StreamClaimIdsOnlyAsync("key", "group", "consumer", 1000, messageIds, CommandFlags.None); + await mock.Received().StreamClaimIdsOnlyAsync("prefix:key", "group", "consumer", 1000, messageIds, CommandFlags.None); + } + + [Fact] + public async Task StreamConsumerInfoGetAsync() + { + await prefixed.StreamConsumerInfoAsync("key", "group", CommandFlags.None); + await mock.Received().StreamConsumerInfoAsync("prefix:key", "group", CommandFlags.None); + } + + [Fact] + public async Task StreamConsumerGroupSetPositionAsync() + { + await prefixed.StreamConsumerGroupSetPositionAsync("key", "group", StreamPosition.Beginning, CommandFlags.None); + await mock.Received().StreamConsumerGroupSetPositionAsync("prefix:key", "group", StreamPosition.Beginning, CommandFlags.None); + } + + [Fact] + public async Task StreamCreateConsumerGroupAsync() + { + await prefixed.StreamCreateConsumerGroupAsync("key", "group", "0-0", false, CommandFlags.None); + await mock.Received().StreamCreateConsumerGroupAsync("prefix:key", "group", "0-0", false, CommandFlags.None); + } + + [Fact] + public async Task StreamGroupInfoGetAsync() + { + await prefixed.StreamGroupInfoAsync("key", CommandFlags.None); + await mock.Received().StreamGroupInfoAsync("prefix:key", CommandFlags.None); + } + + [Fact] + public async Task StreamInfoGetAsync() + { + await prefixed.StreamInfoAsync("key", CommandFlags.None); + await mock.Received().StreamInfoAsync("prefix:key", CommandFlags.None); + } + + [Fact] + public async Task StreamLengthAsync() + { + await prefixed.StreamLengthAsync("key", CommandFlags.None); + await mock.Received().StreamLengthAsync("prefix:key", CommandFlags.None); + } + + [Fact] + public async Task StreamMessagesDeleteAsync() + { + var messageIds = Array.Empty(); + await prefixed.StreamDeleteAsync("key", messageIds, CommandFlags.None); + await mock.Received().StreamDeleteAsync("prefix:key", messageIds, CommandFlags.None); + } + + [Fact] + public async Task StreamDeleteConsumerAsync() + { + await prefixed.StreamDeleteConsumerAsync("key", "group", "consumer", CommandFlags.None); + await mock.Received().StreamDeleteConsumerAsync("prefix:key", "group", "consumer", CommandFlags.None); + } + + [Fact] + public async Task StreamDeleteConsumerGroupAsync() + { + await prefixed.StreamDeleteConsumerGroupAsync("key", "group", CommandFlags.None); + await mock.Received().StreamDeleteConsumerGroupAsync("prefix:key", "group", CommandFlags.None); + } + + [Fact] + public async Task StreamPendingInfoGetAsync() + { + await prefixed.StreamPendingAsync("key", "group", CommandFlags.None); + await mock.Received().StreamPendingAsync("prefix:key", "group", CommandFlags.None); + } + + [Fact] + public async Task StreamPendingMessageInfoGetAsync() + { + await prefixed.StreamPendingMessagesAsync("key", "group", 10, RedisValue.Null, "-", "+", 1000, CommandFlags.None); + await mock.Received().StreamPendingMessagesAsync("prefix:key", "group", 10, RedisValue.Null, "-", "+", 1000, CommandFlags.None); + } + + [Fact] + public async Task StreamRangeAsync() + { + await prefixed.StreamRangeAsync("key", "-", "+", null, Order.Ascending, CommandFlags.None); + await mock.Received().StreamRangeAsync("prefix:key", "-", "+", null, Order.Ascending, CommandFlags.None); + } + + [Fact] + public async Task StreamReadAsync_1() + { + var streamPositions = Array.Empty(); + await prefixed.StreamReadAsync(streamPositions, null, CommandFlags.None); + await mock.Received().StreamReadAsync(streamPositions, null, CommandFlags.None); + } + + [Fact] + public async Task StreamReadAsync_2() + { + await prefixed.StreamReadAsync("key", "0-0", null, CommandFlags.None); + await mock.Received().StreamReadAsync("prefix:key", "0-0", null, CommandFlags.None); + } + + [Fact] + public async Task StreamReadGroupAsync_1() + { + await prefixed.StreamReadGroupAsync("key", "group", "consumer", StreamPosition.Beginning, 10, false, CommandFlags.None); + await mock.Received().StreamReadGroupAsync("prefix:key", "group", "consumer", StreamPosition.Beginning, 10, false, CommandFlags.None); + } + + [Fact] + public async Task StreamStreamReadGroupAsync_2() + { + var streamPositions = Array.Empty(); + await prefixed.StreamReadGroupAsync(streamPositions, "group", "consumer", 10, false, CommandFlags.None); + await mock.Received().StreamReadGroupAsync(streamPositions, "group", "consumer", 10, false, CommandFlags.None); + } + + [Fact] + public async Task StreamTrimAsync() + { + await prefixed.StreamTrimAsync("key", 1000, true, CommandFlags.None); + await mock.Received().StreamTrimAsync("prefix:key", 1000, true, CommandFlags.None); + } + + [Fact] + public async Task StreamTrimByMinIdAsync() + { + await prefixed.StreamTrimByMinIdAsync("key", 1111111111); + await mock.Received().StreamTrimByMinIdAsync("prefix:key", 1111111111); + } + + [Fact] + public async Task StreamTrimByMinIdAsyncWithApproximate() + { + await prefixed.StreamTrimByMinIdAsync("key", 1111111111, useApproximateMaxLength: true); + await mock.Received().StreamTrimByMinIdAsync("prefix:key", 1111111111, useApproximateMaxLength: true); + } + + [Fact] + public async Task StreamTrimByMinIdAsyncWithApproximateAndLimit() + { + await prefixed.StreamTrimByMinIdAsync("key", 1111111111, useApproximateMaxLength: true, limit: 100); + await mock.Received().StreamTrimByMinIdAsync("prefix:key", 1111111111, useApproximateMaxLength: true, limit: 100); + } + + [Fact] + public async Task StringAppendAsync() + { + await prefixed.StringAppendAsync("key", "value", CommandFlags.None); + await mock.Received().StringAppendAsync("prefix:key", "value", CommandFlags.None); + } + + [Fact] + public async Task StringBitCountAsync() + { + await prefixed.StringBitCountAsync("key", 123, 456, CommandFlags.None); + await mock.Received().StringBitCountAsync("prefix:key", 123, 456, CommandFlags.None); + } + + [Fact] + public async Task StringBitCountAsync_2() + { + await prefixed.StringBitCountAsync("key", 123, 456, StringIndexType.Byte, CommandFlags.None); + await mock.Received().StringBitCountAsync("prefix:key", 123, 456, StringIndexType.Byte, CommandFlags.None); + } + + [Fact] + public async Task StringBitOperationAsync_1() + { + await prefixed.StringBitOperationAsync(Bitwise.Xor, "destination", "first", "second", CommandFlags.None); + await mock.Received().StringBitOperationAsync(Bitwise.Xor, "prefix:destination", "prefix:first", "prefix:second", CommandFlags.None); + } + + [Fact] + public async Task StringBitOperationAsync_2() + { + RedisKey[] keys = ["a", "b"]; + await prefixed.StringBitOperationAsync(Bitwise.Xor, "destination", keys, CommandFlags.None); + await mock.Received().StringBitOperationAsync(Bitwise.Xor, "prefix:destination", IsKeys("prefix:a", "prefix:b"), CommandFlags.None); + } + + [Fact] + public async Task StringBitOperationAsync_Diff() + { + RedisKey[] keys = ["x", "y1", "y2"]; + await prefixed.StringBitOperationAsync(Bitwise.Diff, "destination", keys, CommandFlags.None); + await mock.Received().StringBitOperationAsync(Bitwise.Diff, "prefix:destination", IsKeys("prefix:x", "prefix:y1", "prefix:y2"), CommandFlags.None); + } + + [Fact] + public async Task StringBitOperationAsync_Diff1() + { + RedisKey[] keys = ["x", "y1", "y2"]; + await prefixed.StringBitOperationAsync(Bitwise.Diff1, "destination", keys, CommandFlags.None); + await mock.Received().StringBitOperationAsync(Bitwise.Diff1, "prefix:destination", IsKeys("prefix:x", "prefix:y1", "prefix:y2"), CommandFlags.None); + } + + [Fact] + public async Task StringBitOperationAsync_AndOr() + { + RedisKey[] keys = ["x", "y1", "y2"]; + await prefixed.StringBitOperationAsync(Bitwise.AndOr, "destination", keys, CommandFlags.None); + await mock.Received().StringBitOperationAsync(Bitwise.AndOr, "prefix:destination", IsKeys("prefix:x", "prefix:y1", "prefix:y2"), CommandFlags.None); + } + + [Fact] + public async Task StringBitOperationAsync_One() + { + RedisKey[] keys = ["a", "b", "c"]; + await prefixed.StringBitOperationAsync(Bitwise.One, "destination", keys, CommandFlags.None); + await mock.Received().StringBitOperationAsync(Bitwise.One, "prefix:destination", IsKeys("prefix:a", "prefix:b", "prefix:c"), CommandFlags.None); + } + + [Fact] + public async Task StringBitPositionAsync() + { + await prefixed.StringBitPositionAsync("key", true, 123, 456, CommandFlags.None); + await mock.Received().StringBitPositionAsync("prefix:key", true, 123, 456, CommandFlags.None); + } + + [Fact] + public async Task StringBitPositionAsync_2() + { + await prefixed.StringBitPositionAsync("key", true, 123, 456, StringIndexType.Byte, CommandFlags.None); + await mock.Received().StringBitPositionAsync("prefix:key", true, 123, 456, StringIndexType.Byte, CommandFlags.None); + } + + [Fact] + public async Task StringDecrementAsync_1() + { + await prefixed.StringDecrementAsync("key", 123, CommandFlags.None); + await mock.Received().StringDecrementAsync("prefix:key", 123, CommandFlags.None); + } + + [Fact] + public async Task StringDecrementAsync_2() + { + await prefixed.StringDecrementAsync("key", 1.23, CommandFlags.None); + await mock.Received().StringDecrementAsync("prefix:key", 1.23, CommandFlags.None); + } + + [Fact] + public async Task StringGetAsync_1() + { + await prefixed.StringGetAsync("key", CommandFlags.None); + await mock.Received().StringGetAsync("prefix:key", CommandFlags.None); + } + + [Fact] + public async Task StringGetAsync_2() + { + RedisKey[] keys = ["a", "b"]; + await prefixed.StringGetAsync(keys, CommandFlags.None); + await mock.Received().StringGetAsync(IsKeys("prefix:a", "prefix:b"), CommandFlags.None); + } + + [Fact] + public async Task StringGetBitAsync() + { + await prefixed.StringGetBitAsync("key", 123, CommandFlags.None); + await mock.Received().StringGetBitAsync("prefix:key", 123, CommandFlags.None); + } + + [Fact] + public async Task StringGetRangeAsync() + { + await prefixed.StringGetRangeAsync("key", 123, 456, CommandFlags.None); + await mock.Received().StringGetRangeAsync("prefix:key", 123, 456, CommandFlags.None); + } + + [Fact] + public async Task StringGetSetAsync() + { + await prefixed.StringGetSetAsync("key", "value", CommandFlags.None); + await mock.Received().StringGetSetAsync("prefix:key", "value", CommandFlags.None); + } + + [Fact] + public async Task StringGetDeleteAsync() + { + await prefixed.StringGetDeleteAsync("key", CommandFlags.None); + await mock.Received().StringGetDeleteAsync("prefix:key", CommandFlags.None); + } + + [Fact] + public async Task StringGetWithExpiryAsync() + { + await prefixed.StringGetWithExpiryAsync("key", CommandFlags.None); + await mock.Received().StringGetWithExpiryAsync("prefix:key", CommandFlags.None); + } + + [Fact] + public async Task StringIncrementAsync_1() + { + await prefixed.StringIncrementAsync("key", 123, CommandFlags.None); + await mock.Received().StringIncrementAsync("prefix:key", 123, CommandFlags.None); + } + + [Fact] + public async Task StringIncrementAsync_2() + { + await prefixed.StringIncrementAsync("key", 1.23, CommandFlags.None); + await mock.Received().StringIncrementAsync("prefix:key", 1.23, CommandFlags.None); + } + + [Fact] + public async Task StringLengthAsync() + { + await prefixed.StringLengthAsync("key", CommandFlags.None); + await mock.Received().StringLengthAsync("prefix:key", CommandFlags.None); + } + + [Fact] + public async Task StringSetAsync_1() + { + TimeSpan expiry = TimeSpan.FromSeconds(123); + await prefixed.StringSetAsync("key", "value", expiry, When.Exists, CommandFlags.None); + await mock.Received().StringSetAsync("prefix:key", "value", expiry, When.Exists, CommandFlags.None); + } + + [Fact] + public async Task StringSetAsync_2() + { + TimeSpan? expiry = null; + await prefixed.StringSetAsync("key", "value", expiry, true, When.Exists, CommandFlags.None); + await mock.Received().StringSetAsync("prefix:key", "value", expiry, true, When.Exists, CommandFlags.None); + } + + [Fact] + public async Task StringSetAsync_3() + { + KeyValuePair[] values = [new KeyValuePair("a", "x"), new KeyValuePair("b", "y")]; + Expression[]>> valid = _ => _.Length == 2 && _[0].Key == "prefix:a" && _[0].Value == "x" && _[1].Key == "prefix:b" && _[1].Value == "y"; + await prefixed.StringSetAsync(values, When.Exists, CommandFlags.None); + await mock.Received().StringSetAsync(Arg.Is(valid), When.Exists, CommandFlags.None); + } + + [Fact] + public async Task StringSetAsync_Compat() + { + TimeSpan expiry = TimeSpan.FromSeconds(123); + await prefixed.StringSetAsync("key", "value", expiry, When.Exists); + await mock.Received().StringSetAsync("prefix:key", "value", expiry, When.Exists); + } + + [Fact] + public async Task StringSetBitAsync() + { + await prefixed.StringSetBitAsync("key", 123, true, CommandFlags.None); + await mock.Received().StringSetBitAsync("prefix:key", 123, true, CommandFlags.None); + } + + [Fact] + public async Task StringSetRangeAsync() + { + await prefixed.StringSetRangeAsync("key", 123, "value", CommandFlags.None); + await mock.Received().StringSetRangeAsync("prefix:key", 123, "value", CommandFlags.None); + } + + [Fact] + public async Task KeyTouchAsync_1() + { + await prefixed.KeyTouchAsync("key", CommandFlags.None); + await mock.Received().KeyTouchAsync("prefix:key", CommandFlags.None); + } + + [Fact] + public async Task KeyTouchAsync_2() + { + RedisKey[] keys = ["a", "b"]; + await prefixed.KeyTouchAsync(keys, CommandFlags.None); + await mock.Received().KeyTouchAsync(IsKeys("prefix:a", "prefix:b"), CommandFlags.None); + } + [Fact] + public async Task ExecuteAsync_1() + { + await prefixed.ExecuteAsync("CUSTOM", "arg1", (RedisKey)"arg2"); + await mock.Received().ExecuteAsync("CUSTOM", Arg.Is(args => args.Length == 2 && args[0].Equals("arg1") && args[1].Equals((RedisKey)"prefix:arg2")), CommandFlags.None); + } + + [Fact] + public async Task ExecuteAsync_2() + { + var args = new List { "arg1", (RedisKey)"arg2" }; + await prefixed.ExecuteAsync("CUSTOM", args, CommandFlags.None); + await mock.Received().ExecuteAsync("CUSTOM", Arg.Is?>(a => a != null && a.Count == 2 && a.ElementAt(0).Equals("arg1") && a.ElementAt(1).Equals((RedisKey)"prefix:arg2")), CommandFlags.None); + } + [Fact] + public async Task GeoAddAsync_1() + { + await prefixed.GeoAddAsync("key", 1.23, 4.56, "member", CommandFlags.None); + await mock.Received().GeoAddAsync("prefix:key", 1.23, 4.56, "member", CommandFlags.None); + } + + [Fact] + public async Task GeoAddAsync_2() + { + var geoEntry = new GeoEntry(1.23, 4.56, "member"); + await prefixed.GeoAddAsync("key", geoEntry, CommandFlags.None); + await mock.Received().GeoAddAsync("prefix:key", geoEntry, CommandFlags.None); + } + + [Fact] + public async Task GeoAddAsync_3() + { + var geoEntries = new GeoEntry[] { new GeoEntry(1.23, 4.56, "member1") }; + await prefixed.GeoAddAsync("key", geoEntries, CommandFlags.None); + await mock.Received().GeoAddAsync("prefix:key", geoEntries, CommandFlags.None); + } + + [Fact] + public async Task GeoRemoveAsync() + { + await prefixed.GeoRemoveAsync("key", "member", CommandFlags.None); + await mock.Received().GeoRemoveAsync("prefix:key", "member", CommandFlags.None); + } + + [Fact] + public async Task GeoDistanceAsync() + { + await prefixed.GeoDistanceAsync("key", "member1", "member2", GeoUnit.Meters, CommandFlags.None); + await mock.Received().GeoDistanceAsync("prefix:key", "member1", "member2", GeoUnit.Meters, CommandFlags.None); + } + + [Fact] + public async Task GeoHashAsync_1() + { + await prefixed.GeoHashAsync("key", "member", CommandFlags.None); + await mock.Received().GeoHashAsync("prefix:key", "member", CommandFlags.None); + } + + [Fact] + public async Task GeoHashAsync_2() + { + var members = new RedisValue[] { "member1", "member2" }; + await prefixed.GeoHashAsync("key", members, CommandFlags.None); + await mock.Received().GeoHashAsync("prefix:key", members, CommandFlags.None); + } + + [Fact] + public async Task GeoPositionAsync_1() + { + await prefixed.GeoPositionAsync("key", "member", CommandFlags.None); + await mock.Received().GeoPositionAsync("prefix:key", "member", CommandFlags.None); + } + + [Fact] + public async Task GeoPositionAsync_2() + { + var members = new RedisValue[] { "member1", "member2" }; + await prefixed.GeoPositionAsync("key", members, CommandFlags.None); + await mock.Received().GeoPositionAsync("prefix:key", members, CommandFlags.None); + } + + [Fact] + public async Task GeoRadiusAsync_1() + { + await prefixed.GeoRadiusAsync("key", "member", 100, GeoUnit.Meters, 10, Order.Ascending, GeoRadiusOptions.Default, CommandFlags.None); + await mock.Received().GeoRadiusAsync("prefix:key", "member", 100, GeoUnit.Meters, 10, Order.Ascending, GeoRadiusOptions.Default, CommandFlags.None); + } + + [Fact] + public async Task GeoRadiusAsync_2() + { + await prefixed.GeoRadiusAsync("key", 1.23, 4.56, 100, GeoUnit.Meters, 10, Order.Ascending, GeoRadiusOptions.Default, CommandFlags.None); + await mock.Received().GeoRadiusAsync("prefix:key", 1.23, 4.56, 100, GeoUnit.Meters, 10, Order.Ascending, GeoRadiusOptions.Default, CommandFlags.None); + } + + [Fact] + public async Task GeoSearchAsync_1() + { + var shape = new GeoSearchCircle(100, GeoUnit.Meters); + await prefixed.GeoSearchAsync("key", "member", shape, 10, true, Order.Ascending, GeoRadiusOptions.Default, CommandFlags.None); + await mock.Received().GeoSearchAsync("prefix:key", "member", shape, 10, true, Order.Ascending, GeoRadiusOptions.Default, CommandFlags.None); + } + + [Fact] + public async Task GeoSearchAsync_2() + { + var shape = new GeoSearchCircle(100, GeoUnit.Meters); + await prefixed.GeoSearchAsync("key", 1.23, 4.56, shape, 10, true, Order.Ascending, GeoRadiusOptions.Default, CommandFlags.None); + await mock.Received().GeoSearchAsync("prefix:key", 1.23, 4.56, shape, 10, true, Order.Ascending, GeoRadiusOptions.Default, CommandFlags.None); + } + + [Fact] + public async Task GeoSearchAndStoreAsync_1() + { + var shape = new GeoSearchCircle(100, GeoUnit.Meters); + await prefixed.GeoSearchAndStoreAsync("source", "destination", "member", shape, 10, true, Order.Ascending, false, CommandFlags.None); + await mock.Received().GeoSearchAndStoreAsync("prefix:source", "prefix:destination", "member", shape, 10, true, Order.Ascending, false, CommandFlags.None); + } + + [Fact] + public async Task GeoSearchAndStoreAsync_2() + { + var shape = new GeoSearchCircle(100, GeoUnit.Meters); + await prefixed.GeoSearchAndStoreAsync("source", "destination", 1.23, 4.56, shape, 10, true, Order.Ascending, false, CommandFlags.None); + await mock.Received().GeoSearchAndStoreAsync("prefix:source", "prefix:destination", 1.23, 4.56, shape, 10, true, Order.Ascending, false, CommandFlags.None); + } + [Fact] + public async Task HashFieldExpireAsync_1() + { + var hashFields = new RedisValue[] { "field1", "field2" }; + var expiry = TimeSpan.FromSeconds(60); + await prefixed.HashFieldExpireAsync("key", hashFields, expiry, ExpireWhen.Always, CommandFlags.None); + await mock.Received().HashFieldExpireAsync("prefix:key", hashFields, expiry, ExpireWhen.Always, CommandFlags.None); + } + + [Fact] + public async Task HashFieldExpireAsync_2() + { + var hashFields = new RedisValue[] { "field1", "field2" }; + var expiry = DateTime.Now.AddMinutes(1); + await prefixed.HashFieldExpireAsync("key", hashFields, expiry, ExpireWhen.Always, CommandFlags.None); + await mock.Received().HashFieldExpireAsync("prefix:key", hashFields, expiry, ExpireWhen.Always, CommandFlags.None); + } + + [Fact] + public async Task HashFieldGetExpireDateTimeAsync() + { + var hashFields = new RedisValue[] { "field1", "field2" }; + await prefixed.HashFieldGetExpireDateTimeAsync("key", hashFields, CommandFlags.None); + await mock.Received().HashFieldGetExpireDateTimeAsync("prefix:key", hashFields, CommandFlags.None); + } + + [Fact] + public async Task HashFieldPersistAsync() + { + var hashFields = new RedisValue[] { "field1", "field2" }; + await prefixed.HashFieldPersistAsync("key", hashFields, CommandFlags.None); + await mock.Received().HashFieldPersistAsync("prefix:key", hashFields, CommandFlags.None); + } + + [Fact] + public async Task HashFieldGetTimeToLiveAsync() + { + var hashFields = new RedisValue[] { "field1", "field2" }; + await prefixed.HashFieldGetTimeToLiveAsync("key", hashFields, CommandFlags.None); + await mock.Received().HashFieldGetTimeToLiveAsync("prefix:key", hashFields, CommandFlags.None); + } + [Fact] + public async Task HashGetLeaseAsync() + { + await prefixed.HashGetLeaseAsync("key", "field", CommandFlags.None); + await mock.Received().HashGetLeaseAsync("prefix:key", "field", CommandFlags.None); + } + + [Fact] + public async Task HashFieldGetAndDeleteAsync_1() + { + await prefixed.HashFieldGetAndDeleteAsync("key", "field", CommandFlags.None); + await mock.Received().HashFieldGetAndDeleteAsync("prefix:key", "field", CommandFlags.None); + } + + [Fact] + public async Task HashFieldGetAndDeleteAsync_2() + { + var hashFields = new RedisValue[] { "field1", "field2" }; + await prefixed.HashFieldGetAndDeleteAsync("key", hashFields, CommandFlags.None); + await mock.Received().HashFieldGetAndDeleteAsync("prefix:key", hashFields, CommandFlags.None); + } + + [Fact] + public async Task HashFieldGetLeaseAndDeleteAsync() + { + await prefixed.HashFieldGetLeaseAndDeleteAsync("key", "field", CommandFlags.None); + await mock.Received().HashFieldGetLeaseAndDeleteAsync("prefix:key", "field", CommandFlags.None); + } + + [Fact] + public async Task HashFieldGetAndSetExpiryAsync_1() + { + var expiry = TimeSpan.FromMinutes(5); + await prefixed.HashFieldGetAndSetExpiryAsync("key", "field", expiry, false, CommandFlags.None); + await mock.Received().HashFieldGetAndSetExpiryAsync("prefix:key", "field", expiry, false, CommandFlags.None); + } + + [Fact] + public async Task HashFieldGetAndSetExpiryAsync_2() + { + var expiry = DateTime.Now.AddMinutes(5); + await prefixed.HashFieldGetAndSetExpiryAsync("key", "field", expiry, CommandFlags.None); + await mock.Received().HashFieldGetAndSetExpiryAsync("prefix:key", "field", expiry, CommandFlags.None); + } + + [Fact] + public async Task HashFieldGetLeaseAndSetExpiryAsync_1() + { + var expiry = TimeSpan.FromMinutes(5); + await prefixed.HashFieldGetLeaseAndSetExpiryAsync("key", "field", expiry, false, CommandFlags.None); + await mock.Received().HashFieldGetLeaseAndSetExpiryAsync("prefix:key", "field", expiry, false, CommandFlags.None); + } + + [Fact] + public async Task HashFieldGetLeaseAndSetExpiryAsync_2() + { + var expiry = DateTime.Now.AddMinutes(5); + await prefixed.HashFieldGetLeaseAndSetExpiryAsync("key", "field", expiry, CommandFlags.None); + await mock.Received().HashFieldGetLeaseAndSetExpiryAsync("prefix:key", "field", expiry, CommandFlags.None); + } + [Fact] + public async Task StringGetLeaseAsync() + { + await prefixed.StringGetLeaseAsync("key", CommandFlags.None); + await mock.Received().StringGetLeaseAsync("prefix:key", CommandFlags.None); + } + + [Fact] + public async Task StringGetSetExpiryAsync_1() + { + var expiry = TimeSpan.FromMinutes(5); + await prefixed.StringGetSetExpiryAsync("key", expiry, CommandFlags.None); + await mock.Received().StringGetSetExpiryAsync("prefix:key", expiry, CommandFlags.None); + } + + [Fact] + public async Task StringGetSetExpiryAsync_2() + { + var expiry = DateTime.Now.AddMinutes(5); + await prefixed.StringGetSetExpiryAsync("key", expiry, CommandFlags.None); + await mock.Received().StringGetSetExpiryAsync("prefix:key", expiry, CommandFlags.None); + } + + [Fact] + public async Task StringSetAndGetAsync_1() + { + var expiry = TimeSpan.FromMinutes(5); + await prefixed.StringSetAndGetAsync("key", "value", expiry, When.Always, CommandFlags.None); + await mock.Received().StringSetAndGetAsync("prefix:key", "value", expiry, When.Always, CommandFlags.None); + } + + [Fact] + public async Task StringSetAndGetAsync_2() + { + var expiry = TimeSpan.FromMinutes(5); + await prefixed.StringSetAndGetAsync("key", "value", expiry, false, When.Always, CommandFlags.None); + await mock.Received().StringSetAndGetAsync("prefix:key", "value", expiry, false, When.Always, CommandFlags.None); + } + [Fact] + public async Task StringLongestCommonSubsequenceAsync() + { + await prefixed.StringLongestCommonSubsequenceAsync("key1", "key2", CommandFlags.None); + await mock.Received().StringLongestCommonSubsequenceAsync("prefix:key1", "prefix:key2", CommandFlags.None); + } + + [Fact] + public async Task StringLongestCommonSubsequenceLengthAsync() + { + await prefixed.StringLongestCommonSubsequenceLengthAsync("key1", "key2", CommandFlags.None); + await mock.Received().StringLongestCommonSubsequenceLengthAsync("prefix:key1", "prefix:key2", CommandFlags.None); + } + + [Fact] + public async Task StringLongestCommonSubsequenceWithMatchesAsync() + { + await prefixed.StringLongestCommonSubsequenceWithMatchesAsync("key1", "key2", 5, CommandFlags.None); + await mock.Received().StringLongestCommonSubsequenceWithMatchesAsync("prefix:key1", "prefix:key2", 5, CommandFlags.None); + } + [Fact] + public async Task KeyIdleTimeAsync() + { + await prefixed.KeyIdleTimeAsync("key", CommandFlags.None); + await mock.Received().KeyIdleTimeAsync("prefix:key", CommandFlags.None); + } + [Fact] + public async Task StreamAddAsync_WithTrimMode_1() + { + await prefixed.StreamAddAsync("key", "field", "value", "*", 1000, false, 100, StreamTrimMode.KeepReferences, CommandFlags.None); + await mock.Received().StreamAddAsync("prefix:key", "field", "value", "*", 1000, false, 100, StreamTrimMode.KeepReferences, CommandFlags.None); + } + + [Fact] + public async Task StreamAddAsync_WithTrimMode_2() + { + var fields = new NameValueEntry[] { new NameValueEntry("field", "value") }; + await prefixed.StreamAddAsync("key", fields, "*", 1000, false, 100, StreamTrimMode.KeepReferences, CommandFlags.None); + await mock.Received().StreamAddAsync("prefix:key", fields, "*", 1000, false, 100, StreamTrimMode.KeepReferences, CommandFlags.None); + } + + [Fact] + public async Task StreamTrimAsync_WithMode() + { + await prefixed.StreamTrimAsync("key", 1000, false, 100, StreamTrimMode.KeepReferences, CommandFlags.None); + await mock.Received().StreamTrimAsync("prefix:key", 1000, false, 100, StreamTrimMode.KeepReferences, CommandFlags.None); + } + + [Fact] + public async Task StreamTrimByMinIdAsync_WithMode() + { + await prefixed.StreamTrimByMinIdAsync("key", "1111111111", false, 100, StreamTrimMode.KeepReferences, CommandFlags.None); + await mock.Received().StreamTrimByMinIdAsync("prefix:key", "1111111111", false, 100, StreamTrimMode.KeepReferences, CommandFlags.None); + } + + [Fact] + public async Task StreamReadGroupAsync_WithNoAck_1() + { + await prefixed.StreamReadGroupAsync("key", "group", "consumer", "0-0", 10, true, CommandFlags.None); + await mock.Received().StreamReadGroupAsync("prefix:key", "group", "consumer", "0-0", 10, true, CommandFlags.None); + } + + [Fact] + public async Task StreamReadGroupAsync_WithNoAck_2() + { + var streamPositions = new StreamPosition[] { new StreamPosition("key", "0-0") }; + await prefixed.StreamReadGroupAsync(streamPositions, "group", "consumer", 10, true, CommandFlags.None); + await mock.Received().StreamReadGroupAsync(streamPositions, "group", "consumer", 10, true, CommandFlags.None); + } + + [Fact] + public async Task StreamTrimAsync_Simple() + { + await prefixed.StreamTrimAsync("key", 1000, true, CommandFlags.None); + await mock.Received().StreamTrimAsync("prefix:key", 1000, true, CommandFlags.None); + } + + [Fact] + public async Task StreamReadGroupAsync_Simple_1() + { + await prefixed.StreamReadGroupAsync("key", "group", "consumer", "0-0", 10, CommandFlags.None); + await mock.Received().StreamReadGroupAsync("prefix:key", "group", "consumer", "0-0", 10, CommandFlags.None); + } + + [Fact] + public async Task StreamReadGroupAsync_Simple_2() + { + var streamPositions = new StreamPosition[] { new StreamPosition("key", "0-0") }; + await prefixed.StreamReadGroupAsync(streamPositions, "group", "consumer", 10, CommandFlags.None); + await mock.Received().StreamReadGroupAsync(streamPositions, "group", "consumer", 10, CommandFlags.None); + } + + [Fact] + public void HashScanAsync() + { + var result = prefixed.HashScanAsync("key", "pattern*", 10, 1, 2, CommandFlags.None); + _ = mock.Received().HashScanAsync("prefix:key", "pattern*", 10, 1, 2, CommandFlags.None); + } + + [Fact] + public void HashScanNoValuesAsync() + { + var result = prefixed.HashScanNoValuesAsync("key", "pattern*", 10, 1, 2, CommandFlags.None); + _ = mock.Received().HashScanNoValuesAsync("prefix:key", "pattern*", 10, 1, 2, CommandFlags.None); + } + + [Fact] + public void SetScanAsync() + { + var result = prefixed.SetScanAsync("key", "pattern*", 10, 1, 2, CommandFlags.None); + _ = mock.Received().SetScanAsync("prefix:key", "pattern*", 10, 1, 2, CommandFlags.None); + } + + [Fact] + public void SortedSetScanAsync() + { + var result = prefixed.SortedSetScanAsync("key", "pattern*", 10, 1, 2, CommandFlags.None); + _ = mock.Received().SortedSetScanAsync("prefix:key", "pattern*", 10, 1, 2, CommandFlags.None); + } + } +} diff --git a/tests/StackExchange.Redis.Tests/KeyPrefixedTransactionTests.cs b/tests/StackExchange.Redis.Tests/KeyPrefixedTransactionTests.cs new file mode 100644 index 000000000..04a974808 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/KeyPrefixedTransactionTests.cs @@ -0,0 +1,132 @@ +using System.Text; +using System.Threading.Tasks; +using NSubstitute; +using StackExchange.Redis.KeyspaceIsolation; +using Xunit; + +namespace StackExchange.Redis.Tests; + +[Collection(nameof(SubstituteDependentCollection))] +public sealed class KeyPrefixedTransactionTests +{ + private readonly ITransaction mock; + private readonly KeyPrefixedTransaction prefixed; + + public KeyPrefixedTransactionTests() + { + mock = Substitute.For(); + prefixed = new KeyPrefixedTransaction(mock, Encoding.UTF8.GetBytes("prefix:")); + } + + [Fact] + public void AddCondition_HashEqual() + { + prefixed.AddCondition(Condition.HashEqual("key", "field", "value")); + mock.Received().AddCondition(Arg.Is(value => "prefix:key Hash > field == value" == value.ToString())); + } + + [Fact] + public void AddCondition_HashNotEqual() + { + prefixed.AddCondition(Condition.HashNotEqual("key", "field", "value")); + mock.Received().AddCondition(Arg.Is(value => "prefix:key Hash > field != value" == value.ToString())); + } + + [Fact] + public void AddCondition_HashExists() + { + prefixed.AddCondition(Condition.HashExists("key", "field")); + mock.Received().AddCondition(Arg.Is(value => "prefix:key Hash > field exists" == value.ToString())); + } + + [Fact] + public void AddCondition_HashNotExists() + { + prefixed.AddCondition(Condition.HashNotExists("key", "field")); + mock.Received().AddCondition(Arg.Is(value => "prefix:key Hash > field does not exists" == value.ToString())); + } + + [Fact] + public void AddCondition_KeyExists() + { + prefixed.AddCondition(Condition.KeyExists("key")); + mock.Received().AddCondition(Arg.Is(value => "prefix:key exists" == value.ToString())); + } + + [Fact] + public void AddCondition_KeyNotExists() + { + prefixed.AddCondition(Condition.KeyNotExists("key")); + mock.Received().AddCondition(Arg.Is(value => "prefix:key does not exists" == value.ToString())); + } + + [Fact] + public void AddCondition_StringEqual() + { + prefixed.AddCondition(Condition.StringEqual("key", "value")); + mock.Received().AddCondition(Arg.Is(value => "prefix:key == value" == value.ToString())); + } + + [Fact] + public void AddCondition_StringNotEqual() + { + prefixed.AddCondition(Condition.StringNotEqual("key", "value")); + mock.Received().AddCondition(Arg.Is(value => "prefix:key != value" == value.ToString())); + } + + [Fact] + public void AddCondition_SortedSetEqual() + { + prefixed.AddCondition(Condition.SortedSetEqual("key", "member", "score")); + mock.Received().AddCondition(Arg.Is(value => "prefix:key SortedSet > member == score" == value.ToString())); + } + + [Fact] + public void AddCondition_SortedSetNotEqual() + { + prefixed.AddCondition(Condition.SortedSetNotEqual("key", "member", "score")); + mock.Received().AddCondition(Arg.Is(value => "prefix:key SortedSet > member != score" == value.ToString())); + } + + [Fact] + public void AddCondition_SortedSetScoreExists() + { + prefixed.AddCondition(Condition.SortedSetScoreExists("key", "score")); + mock.Received().AddCondition(Arg.Is(value => "prefix:key not contains 0 members with score: score" == value.ToString())); + } + + [Fact] + public void AddCondition_SortedSetScoreNotExists() + { + prefixed.AddCondition(Condition.SortedSetScoreNotExists("key", "score")); + mock.Received().AddCondition(Arg.Is(value => "prefix:key contains 0 members with score: score" == value.ToString())); + } + + [Fact] + public void AddCondition_SortedSetScoreCountExists() + { + prefixed.AddCondition(Condition.SortedSetScoreExists("key", "score", "count")); + mock.Received().AddCondition(Arg.Is(value => "prefix:key contains count members with score: score" == value.ToString())); + } + + [Fact] + public void AddCondition_SortedSetScoreCountNotExists() + { + prefixed.AddCondition(Condition.SortedSetScoreNotExists("key", "score", "count")); + mock.Received().AddCondition(Arg.Is(value => "prefix:key not contains count members with score: score" == value.ToString())); + } + + [Fact] + public async Task ExecuteAsync() + { + await prefixed.ExecuteAsync(CommandFlags.None); + await mock.Received(1).ExecuteAsync(CommandFlags.None); + } + + [Fact] + public void Execute() + { + prefixed.Execute(CommandFlags.None); + mock.Received(1).Execute(CommandFlags.None); + } +} diff --git a/tests/StackExchange.Redis.Tests/KeyPrefixedVectorSetTests.cs b/tests/StackExchange.Redis.Tests/KeyPrefixedVectorSetTests.cs new file mode 100644 index 000000000..b4ff2091b --- /dev/null +++ b/tests/StackExchange.Redis.Tests/KeyPrefixedVectorSetTests.cs @@ -0,0 +1,214 @@ +using System; +using System.Text; +using NSubstitute; +using Xunit; + +namespace StackExchange.Redis.Tests +{ + [Collection(nameof(SubstituteDependentCollection))] + public sealed class KeyPrefixedVectorSetTests + { + private readonly IDatabase mock; + private readonly IDatabase prefixed; + + public KeyPrefixedVectorSetTests() + { + mock = Substitute.For(); + prefixed = new KeyspaceIsolation.KeyPrefixedDatabase(mock, Encoding.UTF8.GetBytes("prefix:")); + } + + [Fact] + public void VectorSetAdd_Fp32() + { + if (BitConverter.IsLittleEndian) + { + Assert.True(VectorSetAddMessage.UseFp32); +#if DEBUG // can be suppressed + VectorSetAddMessage.SuppressFp32(); + Assert.False(VectorSetAddMessage.UseFp32); + VectorSetAddMessage.RestoreFp32(); + Assert.True(VectorSetAddMessage.UseFp32); +#endif + } + else + { + Assert.False(VectorSetAddMessage.UseFp32); + } + } + + [Fact] + public void VectorSetAdd_BasicCall() + { + var vector = new[] { 1.0f, 2.0f, 3.0f }.AsMemory(); + + var request = VectorSetAddRequest.Member("element1", vector); + prefixed.VectorSetAdd("vectorset", request); + + mock.Received().VectorSetAdd( + "prefix:vectorset", + request); + } + + [Fact] + public void VectorSetAdd_WithAllParameters() + { + var vector = new[] { 1.0f, 2.0f, 3.0f }.AsMemory(); + var attributes = """{"category":"test"}"""; + + var request = VectorSetAddRequest.Member( + "element1", + vector, + attributes); + request.ReducedDimensions = 64; + request.Quantization = VectorSetQuantization.Binary; + request.BuildExplorationFactor = 300; + request.MaxConnections = 32; + request.UseCheckAndSet = true; + prefixed.VectorSetAdd( + "vectorset", + request, + flags: CommandFlags.FireAndForget); + + mock.Received().VectorSetAdd( + "prefix:vectorset", + request, + CommandFlags.FireAndForget); + } + + [Fact] + public void VectorSetLength() + { + prefixed.VectorSetLength("vectorset"); + mock.Received().VectorSetLength("prefix:vectorset"); + } + + [Fact] + public void VectorSetDimension() + { + prefixed.VectorSetDimension("vectorset"); + mock.Received().VectorSetDimension("prefix:vectorset"); + } + + [Fact] + public void VectorSetGetApproximateVector() + { + prefixed.VectorSetGetApproximateVector("vectorset", "member1"); + mock.Received().VectorSetGetApproximateVector("prefix:vectorset", "member1"); + } + + [Fact] + public void VectorSetGetAttributesJson() + { + prefixed.VectorSetGetAttributesJson("vectorset", "member1"); + mock.Received().VectorSetGetAttributesJson("prefix:vectorset", "member1"); + } + + [Fact] + public void VectorSetInfo() + { + prefixed.VectorSetInfo("vectorset"); + mock.Received().VectorSetInfo("prefix:vectorset"); + } + + [Fact] + public void VectorSetContains() + { + prefixed.VectorSetContains("vectorset", "member1"); + mock.Received().VectorSetContains("prefix:vectorset", "member1"); + } + + [Fact] + public void VectorSetGetLinks() + { + prefixed.VectorSetGetLinks("vectorset", "member1"); + mock.Received().VectorSetGetLinks("prefix:vectorset", "member1"); + } + + [Fact] + public void VectorSetGetLinksWithScores() + { + prefixed.VectorSetGetLinksWithScores("vectorset", "member1"); + mock.Received().VectorSetGetLinksWithScores("prefix:vectorset", "member1"); + } + + [Fact] + public void VectorSetRandomMember() + { + prefixed.VectorSetRandomMember("vectorset"); + mock.Received().VectorSetRandomMember("prefix:vectorset"); + } + + [Fact] + public void VectorSetRandomMembers() + { + prefixed.VectorSetRandomMembers("vectorset", 5); + mock.Received().VectorSetRandomMembers("prefix:vectorset", 5); + } + + [Fact] + public void VectorSetRemove() + { + prefixed.VectorSetRemove("vectorset", "member1"); + mock.Received().VectorSetRemove("prefix:vectorset", "member1"); + } + + [Fact] + public void VectorSetSetAttributesJson() + { + var attributes = """{"category":"test"}"""; + + prefixed.VectorSetSetAttributesJson("vectorset", "member1", attributes); + mock.Received().VectorSetSetAttributesJson("prefix:vectorset", "member1", attributes); + } + + [Fact] + public void VectorSetSimilaritySearchByVector() + { + var vector = new[] { 1.0f, 2.0f, 3.0f }.AsMemory(); + + var query = VectorSetSimilaritySearchRequest.ByVector(vector); + prefixed.VectorSetSimilaritySearch( + "vectorset", + query); + mock.Received().VectorSetSimilaritySearch( + "prefix:vectorset", + query); + } + + [Fact] + public void VectorSetSimilaritySearchByMember() + { + var query = VectorSetSimilaritySearchRequest.ByMember("member1"); + query.Count = 5; + query.WithScores = true; + query.WithAttributes = true; + query.Epsilon = 0.1; + query.SearchExplorationFactor = 400; + query.FilterExpression = "category='test'"; + query.MaxFilteringEffort = 1000; + query.UseExactSearch = true; + query.DisableThreading = true; + prefixed.VectorSetSimilaritySearch( + "vectorset", + query, + CommandFlags.FireAndForget); + mock.Received().VectorSetSimilaritySearch( + "prefix:vectorset", + query, + CommandFlags.FireAndForget); + } + + [Fact] + public void VectorSetSimilaritySearchByVector_DefaultParameters() + { + var vector = new[] { 1.0f, 2.0f }.AsMemory(); + + // Test that default parameters work correctly + var query = VectorSetSimilaritySearchRequest.ByVector(vector); + prefixed.VectorSetSimilaritySearch("vectorset", query); + mock.Received().VectorSetSimilaritySearch( + "prefix:vectorset", + query); + } + } +} diff --git a/tests/StackExchange.Redis.Tests/KeyTests.cs b/tests/StackExchange.Redis.Tests/KeyTests.cs new file mode 100644 index 000000000..e956af4ff --- /dev/null +++ b/tests/StackExchange.Redis.Tests/KeyTests.cs @@ -0,0 +1,457 @@ +using System; +using System.Buffers; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +[RunPerProtocol] +public class KeyTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + [Fact] + public async Task TestScan() + { + await using var conn = Create(allowAdmin: true); + + var dbId = TestConfig.GetDedicatedDB(conn); + var db = conn.GetDatabase(dbId); + var server = GetAnyPrimary(conn); + var prefix = Me(); + server.FlushDatabase(dbId, flags: CommandFlags.FireAndForget); + + const int Count = 1000; + for (int i = 0; i < Count; i++) + db.StringSet(prefix + "x" + i, "y" + i, flags: CommandFlags.FireAndForget); + + var count = server.Keys(dbId, prefix + "*").Count(); + Assert.Equal(Count, count); + } + + [Fact] + public async Task FlushFetchRandomKey() + { + await using var conn = Create(allowAdmin: true); + + var dbId = TestConfig.GetDedicatedDB(conn); + Skip.IfMissingDatabase(conn, dbId); + var db = conn.GetDatabase(dbId); + var prefix = Me(); + conn.GetServer(TestConfig.Current.PrimaryServerAndPort).FlushDatabase(dbId, CommandFlags.FireAndForget); + string? anyKey = db.KeyRandom(); + + Assert.Null(anyKey); + db.StringSet(prefix + "abc", "def"); + byte[]? keyBytes = db.KeyRandom(); + + Assert.NotNull(keyBytes); + Assert.Equal(prefix + "abc", Encoding.UTF8.GetString(keyBytes)); + } + + [Fact] + public async Task Zeros() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + var key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.StringSet(key, 123, flags: CommandFlags.FireAndForget); + int k = (int)db.StringGet(key); + Assert.Equal(123, k); + + db.KeyDelete(key, CommandFlags.FireAndForget); + int i = (int)db.StringGet(key); + Assert.Equal(0, i); + + Assert.True(db.StringGet(key).IsNull); + int? value = (int?)db.StringGet(key); + Assert.False(value.HasValue); + } + + [Fact] + public void PrependAppend() + { + { + // simple + RedisKey key = "world"; + var ret = key.Prepend("hello"); + Assert.Equal("helloworld", ret); + } + + { + RedisKey key1 = "world"; + RedisKey key2 = Encoding.UTF8.GetBytes("hello"); + var key3 = key1.Prepend(key2); + Assert.True(ReferenceEquals(key1.KeyValue, key3.KeyValue)); + Assert.True(ReferenceEquals(key2.KeyValue, key3.KeyPrefix)); + Assert.Equal("helloworld", key3); + } + + { + RedisKey key = "hello"; + var ret = key.Append("world"); + Assert.Equal("helloworld", ret); + } + + { + RedisKey key1 = Encoding.UTF8.GetBytes("hello"); + RedisKey key2 = "world"; + var key3 = key1.Append(key2); + Assert.True(ReferenceEquals(key2.KeyValue, key3.KeyValue)); + Assert.True(ReferenceEquals(key1.KeyValue, key3.KeyPrefix)); + Assert.Equal("helloworld", key3); + } + } + + [Fact] + public async Task Exists() + { + await using var conn = Create(); + + RedisKey key = Me(); + RedisKey key2 = Me() + "2"; + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.KeyDelete(key2, CommandFlags.FireAndForget); + + Assert.False(db.KeyExists(key)); + Assert.False(db.KeyExists(key2)); + Assert.Equal(0, db.KeyExists([key, key2])); + + db.StringSet(key, "new value", flags: CommandFlags.FireAndForget); + Assert.True(db.KeyExists(key)); + Assert.False(db.KeyExists(key2)); + Assert.Equal(1, db.KeyExists([key, key2])); + + db.StringSet(key2, "new value", flags: CommandFlags.FireAndForget); + Assert.True(db.KeyExists(key)); + Assert.True(db.KeyExists(key2)); + Assert.Equal(2, db.KeyExists([key, key2])); + } + + [Fact] + public async Task ExistsAsync() + { + await using var conn = Create(); + + RedisKey key = Me(); + RedisKey key2 = Me() + "2"; + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.KeyDelete(key2, CommandFlags.FireAndForget); + var a1 = db.KeyExistsAsync(key).ForAwait(); + var a2 = db.KeyExistsAsync(key2).ForAwait(); + var a3 = db.KeyExistsAsync([key, key2]).ForAwait(); + + db.StringSet(key, "new value", flags: CommandFlags.FireAndForget); + + var b1 = db.KeyExistsAsync(key).ForAwait(); + var b2 = db.KeyExistsAsync(key2).ForAwait(); + var b3 = db.KeyExistsAsync([key, key2]).ForAwait(); + + db.StringSet(key2, "new value", flags: CommandFlags.FireAndForget); + + var c1 = db.KeyExistsAsync(key).ForAwait(); + var c2 = db.KeyExistsAsync(key2).ForAwait(); + var c3 = db.KeyExistsAsync([key, key2]).ForAwait(); + + Assert.False(await a1); + Assert.False(await a2); + Assert.Equal(0, await a3); + + Assert.True(await b1); + Assert.False(await b2); + Assert.Equal(1, await b3); + + Assert.True(await c1); + Assert.True(await c2); + Assert.Equal(2, await c3); + } + + [Fact] + public async Task KeyEncoding() + { + await using var conn = Create(); + + var key = Me(); + var db = conn.GetDatabase(); + + db.KeyDelete(key, CommandFlags.FireAndForget); + db.StringSet(key, "new value", flags: CommandFlags.FireAndForget); + + Assert.True(db.KeyEncoding(key) is "embstr" or "raw"); // server-version dependent + Assert.True(await db.KeyEncodingAsync(key) is "embstr" or "raw"); + + db.KeyDelete(key, CommandFlags.FireAndForget); + db.ListLeftPush(key, "new value", flags: CommandFlags.FireAndForget); + + // Depending on server version, this is going to vary - we're sanity checking here. + var listTypes = new[] { "ziplist", "quicklist", "listpack" }; + Assert.Contains(db.KeyEncoding(key), listTypes); + Assert.Contains(await db.KeyEncodingAsync(key), listTypes); + + var keyNotExists = key + "no-exist"; + Assert.Null(db.KeyEncoding(keyNotExists)); + Assert.Null(await db.KeyEncodingAsync(keyNotExists)); + } + + [Fact] + public async Task KeyRefCount() + { + await using var conn = Create(); + + var key = Me(); + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.StringSet(key, "new value", flags: CommandFlags.FireAndForget); + + Assert.Equal(1, db.KeyRefCount(key)); + Assert.Equal(1, await db.KeyRefCountAsync(key)); + + var keyNotExists = key + "no-exist"; + Assert.Null(db.KeyRefCount(keyNotExists)); + Assert.Null(await db.KeyRefCountAsync(keyNotExists)); + } + + [Fact] + public async Task KeyFrequency() + { + await using var conn = Create(allowAdmin: true, require: RedisFeatures.v4_0_0); + + var key = Me(); + var db = conn.GetDatabase(); + var server = GetServer(conn); + + var serverConfig = server.ConfigGet("maxmemory-policy"); + var maxMemoryPolicy = serverConfig.Length == 1 ? serverConfig[0].Value : ""; + Log($"maxmemory-policy detected as {maxMemoryPolicy}"); + var isLfu = maxMemoryPolicy.Contains("lfu"); + + db.KeyDelete(key, CommandFlags.FireAndForget); + db.StringSet(key, "new value", flags: CommandFlags.FireAndForget); + db.StringGet(key); + + if (isLfu) + { + var count = db.KeyFrequency(key); + Assert.True(count > 0); + + count = await db.KeyFrequencyAsync(key); + Assert.True(count > 0); + + // Key not exists + db.KeyDelete(key, CommandFlags.FireAndForget); + var res = db.KeyFrequency(key); + Assert.Null(res); + + res = await db.KeyFrequencyAsync(key); + Assert.Null(res); + } + else + { + var ex = Assert.Throws(() => db.KeyFrequency(key)); + Assert.Contains("An LFU maxmemory policy is not selected", ex.Message); + ex = await Assert.ThrowsAsync(() => db.KeyFrequencyAsync(key)); + Assert.Contains("An LFU maxmemory policy is not selected", ex.Message); + } + } + + private static void TestTotalLengthAndCopyTo(in RedisKey key, int expectedLength) + { + var length = key.TotalLength(); + Assert.Equal(expectedLength, length); + var arr = ArrayPool.Shared.Rent(length + 20); // deliberately over-sized + try + { + var written = key.CopyTo(arr); + Assert.Equal(length, written); + + var viaCast = (byte[]?)key; + ReadOnlySpan x = viaCast, y = new ReadOnlySpan(arr, 0, length); + Assert.True(x.SequenceEqual(y)); + Assert.True(key.IsNull == viaCast is null); + } + finally + { + ArrayPool.Shared.Return(arr); + } + } + + [Fact] + public void NullKeySlot() + { + RedisKey key = RedisKey.Null; + Assert.True(key.TryGetSimpleBuffer(out var buffer)); + Assert.Empty(buffer); + TestTotalLengthAndCopyTo(key, 0); + + Assert.Equal(-1, GetHashSlot(key)); + } + + private static readonly byte[] KeyPrefix = Encoding.UTF8.GetBytes("abcde"); + + private static int GetHashSlot(in RedisKey key) + { + var strategy = new ServerSelectionStrategy(null!) + { + ServerType = ServerType.Cluster, + }; + return strategy.HashSlot(key); + } + + [Theory] + [InlineData(false, null, -1)] + [InlineData(false, "", 0)] + [InlineData(false, "f", 3168)] + [InlineData(false, "abcde", 16097)] + [InlineData(false, "abcdef", 15101)] + [InlineData(false, "abcdeffsdkjhsdfgkjh sdkjhsdkjf hsdkjfh skudrfy7 348iu yksef78 dssdhkfh ##$OIU", 5073)] + [InlineData(false, "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Cras lobortis quam ac molestie ultricies. Duis maximus, nunc a auctor faucibus, risus turpis porttitor nibh, sit amet consequat lacus nibh quis nisi. Aliquam ipsum quam, dapibus ut ex eu, efficitur vestibulum dui. Sed a nibh ut felis congue tempor vel vel lectus. Phasellus a neque placerat, blandit massa sed, imperdiet urna. Praesent scelerisque lorem ipsum, non facilisis libero hendrerit quis. Nullam sit amet malesuada velit, ac lacinia lacus. Donec mollis a massa sed egestas. Suspendisse vitae augue quis erat gravida consectetur. Aenean interdum neque id lacinia eleifend.", 4954)] + [InlineData(true, null, 16097)] + [InlineData(true, "", 16097)] // note same as false/abcde + [InlineData(true, "f", 15101)] // note same as false/abcdef + [InlineData(true, "abcde", 4089)] + [InlineData(true, "abcdef", 1167)] + [InlineData(true, "👻👩‍👩‍👦‍👦", 8494)] + [InlineData(true, "abcdeffsdkjhsdfgkjh sdkjhsdkjf hsdkjfh skudrfy7 348iu yksef78 dssdhkfh ##$OIU", 10923)] + [InlineData(true, "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Cras lobortis quam ac molestie ultricies. Duis maximus, nunc a auctor faucibus, risus turpis porttitor nibh, sit amet consequat lacus nibh quis nisi. Aliquam ipsum quam, dapibus ut ex eu, efficitur vestibulum dui. Sed a nibh ut felis congue tempor vel vel lectus. Phasellus a neque placerat, blandit massa sed, imperdiet urna. Praesent scelerisque lorem ipsum, non facilisis libero hendrerit quis. Nullam sit amet malesuada velit, ac lacinia lacus. Donec mollis a massa sed egestas. Suspendisse vitae augue quis erat gravida consectetur. Aenean interdum neque id lacinia eleifend.", 4452)] + public void TestStringKeySlot(bool prefixed, string? s, int slot) + { + RedisKey key = prefixed ? new RedisKey(KeyPrefix, s) : s; + if (s is null && !prefixed) + { + Assert.True(key.TryGetSimpleBuffer(out var buffer)); + Assert.Empty(buffer); + TestTotalLengthAndCopyTo(key, 0); + } + else + { + Assert.False(key.TryGetSimpleBuffer(out var _)); + } + TestTotalLengthAndCopyTo(key, Encoding.UTF8.GetByteCount(s ?? "") + (prefixed ? KeyPrefix.Length : 0)); + + Assert.Equal(slot, GetHashSlot(key)); + } + + [Theory] + [InlineData(false, -1, -1)] + [InlineData(false, 0, 0)] + [InlineData(false, 1, 10242)] + [InlineData(false, 6, 10015)] + [InlineData(false, 47, 849)] + [InlineData(false, 14123, 2356)] + [InlineData(true, -1, 16097)] + [InlineData(true, 0, 16097)] + [InlineData(true, 1, 7839)] + [InlineData(true, 6, 6509)] + [InlineData(true, 47, 2217)] + [InlineData(true, 14123, 6773)] + public void TestBlobKeySlot(bool prefixed, int count, int slot) + { + byte[]? blob = null; + if (count >= 0) + { + blob = new byte[count]; + new Random(count).NextBytes(blob); + for (int i = 0; i < blob.Length; i++) + { + if (blob[i] == (byte)'{') blob[i] = (byte)'!'; // avoid unexpected hash tags + } + } + RedisKey key = prefixed ? new RedisKey(KeyPrefix, blob) : blob; + if (prefixed) + { + Assert.False(key.TryGetSimpleBuffer(out _)); + } + else + { + Assert.True(key.TryGetSimpleBuffer(out var buffer)); + if (blob is null) + { + Assert.Empty(buffer); + } + else + { + Assert.Same(blob, buffer); + } + } + TestTotalLengthAndCopyTo(key, (blob?.Length ?? 0) + (prefixed ? KeyPrefix.Length : 0)); + + Assert.Equal(slot, GetHashSlot(key)); + } + + [Theory] + [MemberData(nameof(KeyEqualityData))] + public void KeyEquality(RedisKey x, RedisKey y, bool equal) + { + if (equal) + { + Assert.Equal(x, y); + Assert.True(x == y); + Assert.False(x != y); + Assert.True(x.Equals(y)); + Assert.True(x.Equals((object)y)); + Assert.Equal(x.GetHashCode(), y.GetHashCode()); + } + else + { + Assert.NotEqual(x, y); + Assert.False(x == y); + Assert.True(x != y); + Assert.False(x.Equals(y)); + Assert.False(x.Equals((object)y)); + // note that this last one is not strictly required, but: we pass, so: yay! + Assert.NotEqual(x.GetHashCode(), y.GetHashCode()); + } + } + + [System.Diagnostics.CodeAnalysis.SuppressMessage("Usage", "xUnit1046:Avoid using TheoryDataRow arguments that are not serializable", Justification = "No options at the moment.")] + public static IEnumerable> KeyEqualityData() + { + RedisKey abcString = "abc", abcBytes = Encoding.UTF8.GetBytes("abc"); + RedisKey abcdefString = "abcdef", abcdefBytes = Encoding.UTF8.GetBytes("abcdef"); + + yield return new(RedisKey.Null, abcString, false); + yield return new(RedisKey.Null, abcBytes, false); + yield return new(abcString, RedisKey.Null, false); + yield return new(abcBytes, RedisKey.Null, false); + yield return new(RedisKey.Null, RedisKey.Null, true); + yield return new(new RedisKey((string?)null), RedisKey.Null, true); + yield return new(new RedisKey(null, (byte[]?)null), RedisKey.Null, true); + yield return new(new RedisKey(""), RedisKey.Null, false); + yield return new(new RedisKey(null, Array.Empty()), RedisKey.Null, false); + + yield return new(abcString, abcString, true); + yield return new(abcBytes, abcBytes, true); + yield return new(abcString, abcBytes, true); + yield return new(abcBytes, abcString, true); + + yield return new(abcdefString, abcdefString, true); + yield return new(abcdefBytes, abcdefBytes, true); + yield return new(abcdefString, abcdefBytes, true); + yield return new(abcdefBytes, abcdefString, true); + + yield return new(abcString, abcdefString, false); + yield return new(abcBytes, abcdefBytes, false); + yield return new(abcString, abcdefBytes, false); + yield return new(abcBytes, abcdefString, false); + + yield return new(abcdefString, abcString, false); + yield return new(abcdefBytes, abcBytes, false); + yield return new(abcdefString, abcBytes, false); + yield return new(abcdefBytes, abcString, false); + + var x = abcString.Append("def"); + yield return new(abcdefString, x, true); + yield return new(abcdefBytes, x, true); + yield return new(x, abcdefBytes, true); + yield return new(x, abcdefString, true); + yield return new(abcString, x, false); + yield return new(abcString, x, false); + yield return new(x, abcString, false); + yield return new(x, abcString, false); + } +} diff --git a/tests/StackExchange.Redis.Tests/Keys.cs b/tests/StackExchange.Redis.Tests/Keys.cs deleted file mode 100644 index 8dbe330ee..000000000 --- a/tests/StackExchange.Redis.Tests/Keys.cs +++ /dev/null @@ -1,267 +0,0 @@ -using System; -using System.Linq; -using System.Text; -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - [Collection(SharedConnectionFixture.Key)] - public class Keys : TestBase - { - public Keys(ITestOutputHelper output, SharedConnectionFixture fixture) : base (output, fixture) { } - - [Fact] - public void TestScan() - { - using (var muxer = Create(allowAdmin: true)) - { - var dbId = TestConfig.GetDedicatedDB(); - var db = muxer.GetDatabase(dbId); - var server = GetAnyMaster(muxer); - var prefix = Me(); - server.FlushDatabase(dbId, flags: CommandFlags.FireAndForget); - - const int Count = 1000; - for (int i = 0; i < Count; i++) - db.StringSet(prefix + "x" + i, "y" + i, flags: CommandFlags.FireAndForget); - - var count = server.Keys(dbId, prefix + "*").Count(); - Assert.Equal(Count, count); - } - } - - [Fact] - public void FlushFetchRandomKey() - { - using (var conn = Create(allowAdmin: true)) - { - var dbId = TestConfig.GetDedicatedDB(conn); - Skip.IfMissingDatabase(conn, dbId); - var db = conn.GetDatabase(dbId); - var prefix = Me(); - conn.GetServer(TestConfig.Current.MasterServerAndPort).FlushDatabase(dbId, CommandFlags.FireAndForget); - string anyKey = db.KeyRandom(); - - Assert.Null(anyKey); - db.StringSet(prefix + "abc", "def"); - byte[] keyBytes = db.KeyRandom(); - - Assert.Equal(prefix + "abc", Encoding.UTF8.GetString(keyBytes)); - } - } - - [Fact] - public void Zeros() - { - using (var conn = Create()) - { - var db = conn.GetDatabase(); - var key = Me(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.StringSet(key, 123, flags: CommandFlags.FireAndForget); - int k = (int)db.StringGet(key); - Assert.Equal(123, k); - - db.KeyDelete(key, CommandFlags.FireAndForget); - int i = (int)db.StringGet(key); - Assert.Equal(0, i); - - Assert.True(db.StringGet(key).IsNull); - int? value = (int?)db.StringGet(key); - Assert.False(value.HasValue); - } - } - - [Fact] - public void PrependAppend() - { - { - // simple - RedisKey key = "world"; - var ret = key.Prepend("hello"); - Assert.Equal("helloworld", ret); - } - - { - RedisKey key1 = "world"; - RedisKey key2 = Encoding.UTF8.GetBytes("hello"); - var key3 = key1.Prepend(key2); - Assert.True(ReferenceEquals(key1.KeyValue, key3.KeyValue)); - Assert.True(ReferenceEquals(key2.KeyValue, key3.KeyPrefix)); - Assert.Equal("helloworld", key3); - } - - { - RedisKey key = "hello"; - var ret = key.Append("world"); - Assert.Equal("helloworld", ret); - } - - { - RedisKey key1 = Encoding.UTF8.GetBytes("hello"); - RedisKey key2 = "world"; - var key3 = key1.Append(key2); - Assert.True(ReferenceEquals(key2.KeyValue, key3.KeyValue)); - Assert.True(ReferenceEquals(key1.KeyValue, key3.KeyPrefix)); - Assert.Equal("helloworld", key3); - } - } - - [Fact] - public void Exists() - { - using (var muxer = Create()) - { - RedisKey key = Me(); - RedisKey key2 = Me() + "2"; - var db = muxer.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.KeyDelete(key2, CommandFlags.FireAndForget); - - Assert.False(db.KeyExists(key)); - Assert.False(db.KeyExists(key2)); - Assert.Equal(0, db.KeyExists(new[] { key, key2 })); - - db.StringSet(key, "new value", flags: CommandFlags.FireAndForget); - Assert.True(db.KeyExists(key)); - Assert.False(db.KeyExists(key2)); - Assert.Equal(1, db.KeyExists(new[] { key, key2 })); - - db.StringSet(key2, "new value", flags: CommandFlags.FireAndForget); - Assert.True(db.KeyExists(key)); - Assert.True(db.KeyExists(key2)); - Assert.Equal(2, db.KeyExists(new[] { key, key2 })); - } - } - - [Fact] - public async Task ExistsAsync() - { - using (var muxer = Create()) - { - RedisKey key = Me(); - RedisKey key2 = Me() + "2"; - var db = muxer.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.KeyDelete(key2, CommandFlags.FireAndForget); - var a1 = db.KeyExistsAsync(key).ForAwait(); - var a2 = db.KeyExistsAsync(key2).ForAwait(); - var a3 = db.KeyExistsAsync(new[] { key, key2 }).ForAwait(); - - db.StringSet(key, "new value", flags: CommandFlags.FireAndForget); - - var b1 = db.KeyExistsAsync(key).ForAwait(); - var b2 = db.KeyExistsAsync(key2).ForAwait(); - var b3 = db.KeyExistsAsync(new[] { key, key2 }).ForAwait(); - - db.StringSet(key2, "new value", flags: CommandFlags.FireAndForget); - - var c1 = db.KeyExistsAsync(key).ForAwait(); - var c2 = db.KeyExistsAsync(key2).ForAwait(); - var c3 = db.KeyExistsAsync(new[] { key, key2 }).ForAwait(); - - Assert.False(await a1); - Assert.False(await a2); - Assert.Equal(0, await a3); - - Assert.True(await b1); - Assert.False(await b2); - Assert.Equal(1, await b3); - - Assert.True(await c1); - Assert.True(await c2); - Assert.Equal(2, await c3); - } - } - - [Fact] - public async Task IdleTime() - { - using (var muxer = Create()) - { - RedisKey key = Me(); - var db = muxer.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.StringSet(key, "new value", flags: CommandFlags.FireAndForget); - await Task.Delay(2000).ForAwait(); - var idleTime = db.KeyIdleTime(key); - Assert.True(idleTime > TimeSpan.Zero); - - db.StringSet(key, "new value2", flags: CommandFlags.FireAndForget); - var idleTime2 = db.KeyIdleTime(key); - Assert.True(idleTime2 < idleTime); - - db.KeyDelete(key); - var idleTime3 = db.KeyIdleTime(key); - Assert.Null(idleTime3); - } - } - - [Fact] - public async Task TouchIdleTime() - { - using (var muxer = Create()) - { - Skip.IfMissingFeature(muxer, nameof(RedisFeatures.KeyTouch), r => r.KeyTouch); - - RedisKey key = Me(); - var db = muxer.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.StringSet(key, "new value", flags: CommandFlags.FireAndForget); - await Task.Delay(2000).ForAwait(); - var idleTime = db.KeyIdleTime(key); - Assert.True(idleTime > TimeSpan.Zero); - - Assert.True(db.KeyTouch(key)); - var idleTime1 = db.KeyIdleTime(key); - Assert.True(idleTime1 < idleTime); - } - } - - [Fact] - public async Task IdleTimeAsync() - { - using (var muxer = Create()) - { - RedisKey key = Me(); - var db = muxer.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.StringSet(key, "new value", flags: CommandFlags.FireAndForget); - await Task.Delay(2000).ForAwait(); - var idleTime = await db.KeyIdleTimeAsync(key).ForAwait(); - Assert.True(idleTime > TimeSpan.Zero); - - db.StringSet(key, "new value2", flags: CommandFlags.FireAndForget); - var idleTime2 = await db.KeyIdleTimeAsync(key).ForAwait(); - Assert.True(idleTime2 < idleTime); - - db.KeyDelete(key); - var idleTime3 = await db.KeyIdleTimeAsync(key).ForAwait(); - Assert.Null(idleTime3); - } - } - - [Fact] - public async Task TouchIdleTimeAsync() - { - using (var muxer = Create()) - { - Skip.IfMissingFeature(muxer, nameof(RedisFeatures.KeyTouch), r => r.KeyTouch); - - RedisKey key = Me(); - var db = muxer.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.StringSet(key, "new value", flags: CommandFlags.FireAndForget); - await Task.Delay(2000).ForAwait(); - var idleTime = await db.KeyIdleTimeAsync(key).ForAwait(); - Assert.True(idleTime > TimeSpan.Zero); - - Assert.True(await db.KeyTouchAsync(key).ForAwait()); - var idleTime1 = await db.KeyIdleTimeAsync(key).ForAwait(); - Assert.True(idleTime1 < idleTime); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/KeysAndValues.cs b/tests/StackExchange.Redis.Tests/KeysAndValues.cs deleted file mode 100644 index b96b2a90f..000000000 --- a/tests/StackExchange.Redis.Tests/KeysAndValues.cs +++ /dev/null @@ -1,177 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Globalization; -using Xunit; - -namespace StackExchange.Redis.Tests -{ - public class KeysAndValues - { - [Fact] - public void TestValues() - { - RedisValue @default = default(RedisValue); - CheckNull(@default); - - RedisValue nullString = (string)null; - CheckNull(nullString); - - RedisValue nullBlob = (byte[])null; - CheckNull(nullBlob); - - RedisValue emptyString = ""; - CheckNotNull(emptyString); - - RedisValue emptyBlob = new byte[0]; - CheckNotNull(emptyBlob); - - RedisValue a0 = new string('a', 1); - CheckNotNull(a0); - RedisValue a1 = new string('a', 1); - CheckNotNull(a1); - RedisValue b0 = new[] { (byte)'b' }; - CheckNotNull(b0); - RedisValue b1 = new[] { (byte)'b' }; - CheckNotNull(b1); - - RedisValue i4 = 1; - CheckNotNull(i4); - RedisValue i8 = 1L; - CheckNotNull(i8); - - RedisValue bool1 = true; - CheckNotNull(bool1); - RedisValue bool2 = false; - CheckNotNull(bool2); - RedisValue bool3 = true; - CheckNotNull(bool3); - - CheckSame(a0, a0); - CheckSame(a1, a1); - CheckSame(a0, a1); - - CheckSame(b0, b0); - CheckSame(b1, b1); - CheckSame(b0, b1); - - CheckSame(i4, i4); - CheckSame(i8, i8); - CheckSame(i4, i8); - - CheckSame(bool1, bool3); - CheckNotSame(bool1, bool2); - } - - internal static void CheckSame(RedisValue x, RedisValue y) - { - Assert.True(Equals(x, y), "Equals(x, y)"); - Assert.True(Equals(y, x), "Equals(y, x)"); - Assert.True(EqualityComparer.Default.Equals(x, y), "EQ(x,y)"); - Assert.True(EqualityComparer.Default.Equals(y, x), "EQ(y,x)"); - Assert.True(x == y, "x==y"); - Assert.True(y == x, "y==x"); - Assert.False(x != y, "x!=y"); - Assert.False(y != x, "y!=x"); - Assert.True(x.Equals(y),"x.EQ(y)"); - Assert.True(y.Equals(x), "y.EQ(x)"); - Assert.True(x.GetHashCode() == y.GetHashCode(), "GetHashCode"); - } - - private void CheckNotSame(RedisValue x, RedisValue y) - { - Assert.False(Equals(x, y)); - Assert.False(Equals(y, x)); - Assert.False(EqualityComparer.Default.Equals(x, y)); - Assert.False(EqualityComparer.Default.Equals(y, x)); - Assert.False(x == y); - Assert.False(y == x); - Assert.True(x != y); - Assert.True(y != x); - Assert.False(x.Equals(y)); - Assert.False(y.Equals(x)); - Assert.False(x.GetHashCode() == y.GetHashCode()); // well, very unlikely - } - - private void CheckNotNull(RedisValue value) - { - Assert.False(value.IsNull); - Assert.NotNull((byte[])value); - Assert.NotNull((string)value); - Assert.NotEqual(-1, value.GetHashCode()); - - Assert.NotNull((string)value); - Assert.NotNull((byte[])value); - - CheckSame(value, value); - CheckNotSame(value, default(RedisValue)); - CheckNotSame(value, (string)null); - CheckNotSame(value, (byte[])null); - } - - internal static void CheckNull(RedisValue value) - { - Assert.True(value.IsNull); - Assert.True(value.IsNullOrEmpty); - Assert.False(value.IsInteger); - Assert.Equal(-1, value.GetHashCode()); - - Assert.Null((string)value); - Assert.Null((byte[])value); - - Assert.Equal(0, (int)value); - Assert.Equal(0L, (long)value); - - CheckSame(value, value); - //CheckSame(value, default(RedisValue)); - //CheckSame(value, (string)null); - //CheckSame(value, (byte[])null); - } - - [Fact] - public void ValuesAreConvertible() - { - RedisValue val = 123; - object o = val; - byte[] blob = (byte[])Convert.ChangeType(o, typeof(byte[])); - - Assert.Equal(3, blob.Length); - Assert.Equal((byte)'1', blob[0]); - Assert.Equal((byte)'2', blob[1]); - Assert.Equal((byte)'3', blob[2]); - - Assert.Equal(123, Convert.ToDouble(o)); - - IConvertible c = (IConvertible)o; - // ReSharper disable RedundantCast - Assert.Equal((short)123, c.ToInt16(CultureInfo.InvariantCulture)); - Assert.Equal((int)123, c.ToInt32(CultureInfo.InvariantCulture)); - Assert.Equal((long)123, c.ToInt64(CultureInfo.InvariantCulture)); - Assert.Equal((float)123, c.ToSingle(CultureInfo.InvariantCulture)); - Assert.Equal("123", c.ToString(CultureInfo.InvariantCulture)); - Assert.Equal((double)123, c.ToDouble(CultureInfo.InvariantCulture)); - Assert.Equal((decimal)123, c.ToDecimal(CultureInfo.InvariantCulture)); - Assert.Equal((ushort)123, c.ToUInt16(CultureInfo.InvariantCulture)); - Assert.Equal((uint)123, c.ToUInt32(CultureInfo.InvariantCulture)); - Assert.Equal((ulong)123, c.ToUInt64(CultureInfo.InvariantCulture)); - - blob = (byte[])c.ToType(typeof(byte[]), CultureInfo.InvariantCulture); - Assert.Equal(3, blob.Length); - Assert.Equal((byte)'1', blob[0]); - Assert.Equal((byte)'2', blob[1]); - Assert.Equal((byte)'3', blob[2]); - } - - [Fact] - public void CanBeDynamic() - { - RedisValue val = "abc"; - object o = val; - dynamic d = o; - byte[] blob = (byte[])d; // could be in a try/catch - Assert.Equal(3, blob.Length); - Assert.Equal((byte)'a', blob[0]); - Assert.Equal((byte)'b', blob[1]); - Assert.Equal((byte)'c', blob[2]); - } - } -} diff --git a/tests/StackExchange.Redis.Tests/Latency.cs b/tests/StackExchange.Redis.Tests/Latency.cs deleted file mode 100644 index 89f377f07..000000000 --- a/tests/StackExchange.Redis.Tests/Latency.cs +++ /dev/null @@ -1,88 +0,0 @@ -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - [Collection(SharedConnectionFixture.Key)] - public class Latency : TestBase - { - - public Latency(ITestOutputHelper output, SharedConnectionFixture fixture) : base(output, fixture) { } - - [Fact] - public async Task CanCallDoctor() - { - using (var conn = Create()) - { - var server = conn.GetServer(conn.GetEndPoints()[0]); - string doctor = server.LatencyDoctor(); - Assert.NotNull(doctor); - Assert.NotEqual("", doctor); - - doctor = await server.LatencyDoctorAsync(); - Assert.NotNull(doctor); - Assert.NotEqual("", doctor); - } - } - - [Fact] - public async Task CanReset() - { - using (var conn = Create()) - { - var server = conn.GetServer(conn.GetEndPoints()[0]); - _ = server.LatencyReset(); - var count = await server.LatencyResetAsync(new [] { "command" }); - Assert.Equal(0, count); - - count = await server.LatencyResetAsync(new [] { "command", "fast-command" }); - Assert.Equal(0, count); - } - } - - [Fact] - public async Task GetLatest() - { - using (var conn = Create(allowAdmin: true)) - { - var server = conn.GetServer(conn.GetEndPoints()[0]); - server.ConfigSet("latency-monitor-threshold", 100); - server.LatencyReset(); - var arr = server.LatencyLatest(); - Assert.Empty(arr); - - var now = await server.TimeAsync(); - server.Execute("debug", "sleep", "0.5"); // cause something to be slow - - arr = await server.LatencyLatestAsync(); - var item = Assert.Single(arr); - Assert.Equal("command", item.EventName); - Assert.True(item.DurationMilliseconds >= 400 && item.DurationMilliseconds <= 600); - Assert.Equal(item.DurationMilliseconds, item.MaxDurationMilliseconds); - Assert.True(item.Timestamp >= now.AddSeconds(-2) && item.Timestamp <= now.AddSeconds(2)); - } - } - - [Fact] - public async Task GetHistory() - { - using (var conn = Create(allowAdmin: true)) - { - var server = conn.GetServer(conn.GetEndPoints()[0]); - server.ConfigSet("latency-monitor-threshold", 100); - server.LatencyReset(); - var arr = server.LatencyHistory("command"); - Assert.Empty(arr); - - var now = await server.TimeAsync(); - server.Execute("debug", "sleep", "0.5"); // cause something to be slow - - arr = await server.LatencyHistoryAsync("command"); - var item = Assert.Single(arr); - Assert.True(item.DurationMilliseconds >= 400 && item.DurationMilliseconds <= 600); - Assert.True(item.Timestamp >= now.AddSeconds(-2) && item.Timestamp <= now.AddSeconds(2)); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/LatencyTests.cs b/tests/StackExchange.Redis.Tests/LatencyTests.cs new file mode 100644 index 000000000..42b4d7b05 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/LatencyTests.cs @@ -0,0 +1,81 @@ +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +[Collection(NonParallelCollection.Name)] +public class LatencyTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + [Fact] + public async Task CanCallDoctor() + { + await using var conn = Create(); + + var server = conn.GetServer(conn.GetEndPoints()[0]); + string? doctor = server.LatencyDoctor(); + Assert.NotNull(doctor); + Assert.NotEqual("", doctor); + + doctor = await server.LatencyDoctorAsync(); + Assert.NotNull(doctor); + Assert.NotEqual("", doctor); + } + + [Fact] + public async Task CanReset() + { + await using var conn = Create(); + + var server = conn.GetServer(conn.GetEndPoints()[0]); + _ = server.LatencyReset(); + var count = await server.LatencyResetAsync(["command"]); + Assert.Equal(0, count); + + count = await server.LatencyResetAsync(["command", "fast-command"]); + Assert.Equal(0, count); + } + + [Fact] + public async Task GetLatest() + { + Skip.UnlessLongRunning(); + await using var conn = Create(allowAdmin: true); + + var server = conn.GetServer(conn.GetEndPoints()[0]); + server.ConfigSet("latency-monitor-threshold", 50); + server.LatencyReset(); + var arr = server.LatencyLatest(); + Assert.Empty(arr); + + var now = await server.TimeAsync(); + server.Execute("debug", "sleep", "0.5"); // cause something to be slow + + arr = await server.LatencyLatestAsync(); + var item = Assert.Single(arr); + Assert.Equal("command", item.EventName); + Assert.True(item.DurationMilliseconds >= 400 && item.DurationMilliseconds <= 600); + Assert.Equal(item.DurationMilliseconds, item.MaxDurationMilliseconds); + Assert.True(item.Timestamp >= now.AddSeconds(-2) && item.Timestamp <= now.AddSeconds(2)); + } + + [Fact] + public async Task GetHistory() + { + Skip.UnlessLongRunning(); + await using var conn = Create(allowAdmin: true); + + var server = conn.GetServer(conn.GetEndPoints()[0]); + server.ConfigSet("latency-monitor-threshold", 50); + server.LatencyReset(); + var arr = server.LatencyHistory("command"); + Assert.Empty(arr); + + var now = await server.TimeAsync(); + server.Execute("debug", "sleep", "0.5"); // cause something to be slow + + arr = await server.LatencyHistoryAsync("command"); + var item = Assert.Single(arr); + Assert.True(item.DurationMilliseconds >= 400 && item.DurationMilliseconds <= 600); + Assert.True(item.Timestamp >= now.AddSeconds(-2) && item.Timestamp <= now.AddSeconds(2)); + } +} diff --git a/tests/StackExchange.Redis.Tests/Lex.cs b/tests/StackExchange.Redis.Tests/Lex.cs deleted file mode 100644 index e11657be3..000000000 --- a/tests/StackExchange.Redis.Tests/Lex.cs +++ /dev/null @@ -1,108 +0,0 @@ -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - [Collection(SharedConnectionFixture.Key)] - public class Lex : TestBase - { - public Lex(ITestOutputHelper output, SharedConnectionFixture fixture) : base(output, fixture) { } - - [Fact] - public void QueryRangeAndLengthByLex() - { - using (var conn = Create()) - { - var db = conn.GetDatabase(); - RedisKey key = Me(); - db.KeyDelete(key, CommandFlags.FireAndForget); - - db.SortedSetAdd(key, - new [] - { - new SortedSetEntry("a", 0), - new SortedSetEntry("b", 0), - new SortedSetEntry("c", 0), - new SortedSetEntry("d", 0), - new SortedSetEntry("e", 0), - new SortedSetEntry("f", 0), - new SortedSetEntry("g", 0), - }, CommandFlags.FireAndForget); - - var set = db.SortedSetRangeByValue(key, default(RedisValue), "c"); - var count = db.SortedSetLengthByValue(key, default(RedisValue), "c"); - Equate(set, count, "a", "b", "c"); - - set = db.SortedSetRangeByValue(key, default(RedisValue), "c", Exclude.Stop); - count = db.SortedSetLengthByValue(key, default(RedisValue), "c", Exclude.Stop); - Equate(set, count, "a", "b"); - - set = db.SortedSetRangeByValue(key, "aaa", "g", Exclude.Stop); - count = db.SortedSetLengthByValue(key, "aaa", "g", Exclude.Stop); - Equate(set, count, "b", "c", "d", "e", "f"); - - set = db.SortedSetRangeByValue(key, "aaa", "g", Exclude.Stop, 1, 3); - Equate(set, set.Length, "c", "d", "e"); - - set = db.SortedSetRangeByValue(key, "aaa", "g", Exclude.Stop, Order.Descending, 1, 3); - Equate(set, set.Length, "e", "d", "c"); - - set = db.SortedSetRangeByValue(key, "g", "aaa", Exclude.Start, Order.Descending, 1, 3); - Equate(set, set.Length, "e", "d", "c"); - - set = db.SortedSetRangeByValue(key, "e", default(RedisValue)); - count = db.SortedSetLengthByValue(key, "e", default(RedisValue)); - Equate(set, count, "e", "f", "g"); - } - } - - [Fact] - public void RemoveRangeByLex() - { - using (var conn = Create()) - { - var db = conn.GetDatabase(); - RedisKey key = Me(); - db.KeyDelete(key, CommandFlags.FireAndForget); - - db.SortedSetAdd(key, - new [] - { - new SortedSetEntry("aaaa", 0), - new SortedSetEntry("b", 0), - new SortedSetEntry("c", 0), - new SortedSetEntry("d", 0), - new SortedSetEntry("e", 0), - }, CommandFlags.FireAndForget); - db.SortedSetAdd(key, - new [] - { - new SortedSetEntry("foo", 0), - new SortedSetEntry("zap", 0), - new SortedSetEntry("zip", 0), - new SortedSetEntry("ALPHA", 0), - new SortedSetEntry("alpha", 0), - }, CommandFlags.FireAndForget); - - var set = db.SortedSetRangeByRank(key); - Equate(set, set.Length, "ALPHA", "aaaa", "alpha", "b", "c", "d", "e", "foo", "zap", "zip"); - - long removed = db.SortedSetRemoveRangeByValue(key, "alpha", "omega"); - Assert.Equal(6, removed); - - set = db.SortedSetRangeByRank(key); - Equate(set, set.Length, "ALPHA", "aaaa", "zap", "zip"); - } - } - - private void Equate(RedisValue[] actual, long count, params string[] expected) - { - Assert.Equal(expected.Length, count); - Assert.Equal(expected.Length, actual.Length); - for (int i = 0; i < actual.Length; i++) - { - Assert.Equal(expected[i], actual[i]); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/LexTests.cs b/tests/StackExchange.Redis.Tests/LexTests.cs new file mode 100644 index 000000000..b70fdda7e --- /dev/null +++ b/tests/StackExchange.Redis.Tests/LexTests.cs @@ -0,0 +1,108 @@ +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class LexTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + [Fact] + public async Task QueryRangeAndLengthByLex() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + RedisKey key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + db.SortedSetAdd( + key, + [ + new SortedSetEntry("a", 0), + new SortedSetEntry("b", 0), + new SortedSetEntry("c", 0), + new SortedSetEntry("d", 0), + new SortedSetEntry("e", 0), + new SortedSetEntry("f", 0), + new SortedSetEntry("g", 0), + ], + CommandFlags.FireAndForget); + + var set = db.SortedSetRangeByValue(key, default(RedisValue), "c"); + var count = db.SortedSetLengthByValue(key, default(RedisValue), "c"); + Equate(set, count, "a", "b", "c"); + + set = db.SortedSetRangeByValue(key, default(RedisValue), "c", Exclude.Stop); + count = db.SortedSetLengthByValue(key, default(RedisValue), "c", Exclude.Stop); + Equate(set, count, "a", "b"); + + set = db.SortedSetRangeByValue(key, "aaa", "g", Exclude.Stop); + count = db.SortedSetLengthByValue(key, "aaa", "g", Exclude.Stop); + Equate(set, count, "b", "c", "d", "e", "f"); + + set = db.SortedSetRangeByValue(key, "aaa", "g", Exclude.Stop, 1, 3); + Equate(set, set.Length, "c", "d", "e"); + + set = db.SortedSetRangeByValue(key, "aaa", "g", Exclude.Stop, Order.Descending, 1, 3); + Equate(set, set.Length, "e", "d", "c"); + + set = db.SortedSetRangeByValue(key, "g", "aaa", Exclude.Start, Order.Descending, 1, 3); + Equate(set, set.Length, "e", "d", "c"); + + set = db.SortedSetRangeByValue(key, "e", default(RedisValue)); + count = db.SortedSetLengthByValue(key, "e", default(RedisValue)); + Equate(set, count, "e", "f", "g"); + + set = db.SortedSetRangeByValue(key, RedisValue.Null, RedisValue.Null, Exclude.None, Order.Descending, 0, 3); // added to test Null-min- and max-param + Equate(set, set.Length, "g", "f", "e"); + } + + [Fact] + public async Task RemoveRangeByLex() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + RedisKey key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + db.SortedSetAdd( + key, + [ + new SortedSetEntry("aaaa", 0), + new SortedSetEntry("b", 0), + new SortedSetEntry("c", 0), + new SortedSetEntry("d", 0), + new SortedSetEntry("e", 0), + ], + CommandFlags.FireAndForget); + db.SortedSetAdd( + key, + [ + new SortedSetEntry("foo", 0), + new SortedSetEntry("zap", 0), + new SortedSetEntry("zip", 0), + new SortedSetEntry("ALPHA", 0), + new SortedSetEntry("alpha", 0), + ], + CommandFlags.FireAndForget); + + var set = db.SortedSetRangeByRank(key); + Equate(set, set.Length, "ALPHA", "aaaa", "alpha", "b", "c", "d", "e", "foo", "zap", "zip"); + + long removed = db.SortedSetRemoveRangeByValue(key, "alpha", "omega"); + Assert.Equal(6, removed); + + set = db.SortedSetRangeByRank(key); + Equate(set, set.Length, "ALPHA", "aaaa", "zap", "zip"); + } + + private static void Equate(RedisValue[] actual, long count, params string[] expected) + { + Assert.Equal(expected.Length, count); + Assert.Equal(expected.Length, actual.Length); + for (int i = 0; i < actual.Length; i++) + { + Assert.Equal(expected[i], actual[i]); + } + } +} diff --git a/tests/StackExchange.Redis.Tests/ListTests.cs b/tests/StackExchange.Redis.Tests/ListTests.cs new file mode 100644 index 000000000..cd0f2e0a3 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/ListTests.cs @@ -0,0 +1,957 @@ +using System; +using System.Linq; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +[RunPerProtocol] +public class ListTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + [Fact] + public async Task Ranges() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + RedisKey key = Me(); + + db.KeyDelete(key, CommandFlags.FireAndForget); + db.ListRightPush(key, "abcdefghijklmnopqrstuvwxyz".Select(x => (RedisValue)x.ToString()).ToArray(), CommandFlags.FireAndForget); + + Assert.Equal(26, db.ListLength(key)); + Assert.Equal("abcdefghijklmnopqrstuvwxyz", string.Concat(db.ListRange(key))); + + var last10 = db.ListRange(key, -10, -1); + Assert.Equal("qrstuvwxyz", string.Concat(last10)); + db.ListTrim(key, 0, -11, CommandFlags.FireAndForget); + + Assert.Equal(16, db.ListLength(key)); + Assert.Equal("abcdefghijklmnop", string.Concat(db.ListRange(key))); + } + + [Fact] + public async Task ListLeftPushEmptyValues() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + RedisKey key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + var result = db.ListLeftPush(key, Array.Empty(), When.Always, CommandFlags.None); + Assert.Equal(0, result); + } + + [Fact] + public async Task ListLeftPushKeyDoesNotExists() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + RedisKey key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + var result = db.ListLeftPush(key, ["testvalue"], When.Exists, CommandFlags.None); + Assert.Equal(0, result); + } + + [Fact] + public async Task ListLeftPushToExisitingKey() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + RedisKey key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + var pushResult = db.ListLeftPush(key, ["testvalue1"], CommandFlags.None); + Assert.Equal(1, pushResult); + var pushXResult = db.ListLeftPush(key, ["testvalue2"], When.Exists, CommandFlags.None); + Assert.Equal(2, pushXResult); + + var rangeResult = db.ListRange(key, 0, -1); + Assert.Equal(2, rangeResult.Length); + Assert.Equal("testvalue2", rangeResult[0]); + Assert.Equal("testvalue1", rangeResult[1]); + } + + [Fact] + public async Task ListLeftPushMultipleToExisitingKey() + { + await using var conn = Create(require: RedisFeatures.v4_0_0); + + var db = conn.GetDatabase(); + RedisKey key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + var pushResult = db.ListLeftPush(key, ["testvalue1"], CommandFlags.None); + Assert.Equal(1, pushResult); + var pushXResult = db.ListLeftPush(key, ["testvalue2", "testvalue3"], When.Exists, CommandFlags.None); + Assert.Equal(3, pushXResult); + + var rangeResult = db.ListRange(key, 0, -1); + Assert.Equal(3, rangeResult.Length); + Assert.Equal("testvalue3", rangeResult[0]); + Assert.Equal("testvalue2", rangeResult[1]); + Assert.Equal("testvalue1", rangeResult[2]); + } + + [Fact] + public async Task ListLeftPushAsyncEmptyValues() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + RedisKey key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + var result = await db.ListLeftPushAsync(key, Array.Empty(), When.Always, CommandFlags.None); + Assert.Equal(0, result); + } + + [Fact] + public async Task ListLeftPushAsyncKeyDoesNotExists() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + RedisKey key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + var result = await db.ListLeftPushAsync(key, ["testvalue"], When.Exists, CommandFlags.None); + Assert.Equal(0, result); + } + + [Fact] + public async Task ListLeftPushAsyncToExisitingKey() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + RedisKey key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + var pushResult = await db.ListLeftPushAsync(key, ["testvalue1"], CommandFlags.None); + Assert.Equal(1, pushResult); + var pushXResult = await db.ListLeftPushAsync(key, ["testvalue2"], When.Exists, CommandFlags.None); + Assert.Equal(2, pushXResult); + + var rangeResult = db.ListRange(key, 0, -1); + Assert.Equal(2, rangeResult.Length); + Assert.Equal("testvalue2", rangeResult[0]); + Assert.Equal("testvalue1", rangeResult[1]); + } + + [Fact] + public async Task ListLeftPushAsyncMultipleToExisitingKey() + { + await using var conn = Create(require: RedisFeatures.v4_0_0); + + var db = conn.GetDatabase(); + RedisKey key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + var pushResult = await db.ListLeftPushAsync(key, ["testvalue1"], CommandFlags.None); + Assert.Equal(1, pushResult); + var pushXResult = await db.ListLeftPushAsync(key, ["testvalue2", "testvalue3"], When.Exists, CommandFlags.None); + Assert.Equal(3, pushXResult); + + var rangeResult = db.ListRange(key, 0, -1); + Assert.Equal(3, rangeResult.Length); + Assert.Equal("testvalue3", rangeResult[0]); + Assert.Equal("testvalue2", rangeResult[1]); + Assert.Equal("testvalue1", rangeResult[2]); + } + + [Fact] + public async Task ListRightPushEmptyValues() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + RedisKey key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + var result = db.ListRightPush(key, Array.Empty(), When.Always, CommandFlags.None); + Assert.Equal(0, result); + } + + [Fact] + public async Task ListRightPushKeyDoesNotExists() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + RedisKey key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + var result = db.ListRightPush(key, ["testvalue"], When.Exists, CommandFlags.None); + Assert.Equal(0, result); + } + + [Fact] + public async Task ListRightPushToExisitingKey() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + RedisKey key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + var pushResult = db.ListRightPush(key, ["testvalue1"], CommandFlags.None); + Assert.Equal(1, pushResult); + var pushXResult = db.ListRightPush(key, ["testvalue2"], When.Exists, CommandFlags.None); + Assert.Equal(2, pushXResult); + + var rangeResult = db.ListRange(key, 0, -1); + Assert.Equal(2, rangeResult.Length); + Assert.Equal("testvalue1", rangeResult[0]); + Assert.Equal("testvalue2", rangeResult[1]); + } + + [Fact] + public async Task ListRightPushMultipleToExisitingKey() + { + await using var conn = Create(require: RedisFeatures.v4_0_0); + + var db = conn.GetDatabase(); + RedisKey key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + var pushResult = db.ListRightPush(key, ["testvalue1"], CommandFlags.None); + Assert.Equal(1, pushResult); + var pushXResult = db.ListRightPush(key, ["testvalue2", "testvalue3"], When.Exists, CommandFlags.None); + Assert.Equal(3, pushXResult); + + var rangeResult = db.ListRange(key, 0, -1); + Assert.Equal(3, rangeResult.Length); + Assert.Equal("testvalue1", rangeResult[0]); + Assert.Equal("testvalue2", rangeResult[1]); + Assert.Equal("testvalue3", rangeResult[2]); + } + + [Fact] + public async Task ListRightPushAsyncEmptyValues() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + RedisKey key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + var result = await db.ListRightPushAsync(key, Array.Empty(), When.Always, CommandFlags.None); + Assert.Equal(0, result); + } + + [Fact] + public async Task ListRightPushAsyncKeyDoesNotExists() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + RedisKey key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + var result = await db.ListRightPushAsync(key, ["testvalue"], When.Exists, CommandFlags.None); + Assert.Equal(0, result); + } + + [Fact] + public async Task ListRightPushAsyncToExisitingKey() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + RedisKey key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + var pushResult = await db.ListRightPushAsync(key, ["testvalue1"], CommandFlags.None); + Assert.Equal(1, pushResult); + var pushXResult = await db.ListRightPushAsync(key, ["testvalue2"], When.Exists, CommandFlags.None); + Assert.Equal(2, pushXResult); + + var rangeResult = db.ListRange(key, 0, -1); + Assert.Equal(2, rangeResult.Length); + Assert.Equal("testvalue1", rangeResult[0]); + Assert.Equal("testvalue2", rangeResult[1]); + } + + [Fact] + public async Task ListRightPushAsyncMultipleToExisitingKey() + { + await using var conn = Create(require: RedisFeatures.v4_0_0); + + var db = conn.GetDatabase(); + RedisKey key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + var pushResult = await db.ListRightPushAsync(key, ["testvalue1"], CommandFlags.None); + Assert.Equal(1, pushResult); + var pushXResult = await db.ListRightPushAsync(key, ["testvalue2", "testvalue3"], When.Exists, CommandFlags.None); + Assert.Equal(3, pushXResult); + + var rangeResult = db.ListRange(key, 0, -1); + Assert.Equal(3, rangeResult.Length); + Assert.Equal("testvalue1", rangeResult[0]); + Assert.Equal("testvalue2", rangeResult[1]); + Assert.Equal("testvalue3", rangeResult[2]); + } + + [Fact] + public async Task ListMove() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + RedisKey src = Me(); + RedisKey dest = Me() + "dest"; + db.KeyDelete(src, CommandFlags.FireAndForget); + + var pushResult = await db.ListRightPushAsync(src, ["testvalue1", "testvalue2"]); + Assert.Equal(2, pushResult); + + var rangeResult1 = db.ListMove(src, dest, ListSide.Left, ListSide.Right); + var rangeResult2 = db.ListMove(src, dest, ListSide.Left, ListSide.Left); + var rangeResult3 = db.ListMove(dest, src, ListSide.Right, ListSide.Right); + var rangeResult4 = db.ListMove(dest, src, ListSide.Right, ListSide.Left); + Assert.Equal("testvalue1", rangeResult1); + Assert.Equal("testvalue2", rangeResult2); + Assert.Equal("testvalue1", rangeResult3); + Assert.Equal("testvalue2", rangeResult4); + } + + [Fact] + public async Task ListMoveKeyDoesNotExist() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + RedisKey src = Me(); + RedisKey dest = Me() + "dest"; + db.KeyDelete(src, CommandFlags.FireAndForget); + + var rangeResult1 = db.ListMove(src, dest, ListSide.Left, ListSide.Right); + Assert.True(rangeResult1.IsNull); + } + + [Fact] + public async Task ListPositionHappyPath() + { + await using var conn = Create(require: RedisFeatures.v6_0_6); + + var db = conn.GetDatabase(); + var key = Me(); + const string val = "foo"; + db.KeyDelete(key); + + db.ListLeftPush(key, val); + var res = db.ListPosition(key, val); + + Assert.Equal(0, res); + } + + [Fact] + public async Task ListPositionEmpty() + { + await using var conn = Create(require: RedisFeatures.v6_0_6); + + var db = conn.GetDatabase(); + var key = Me(); + const string val = "foo"; + db.KeyDelete(key); + + var res = db.ListPosition(key, val); + + Assert.Equal(-1, res); + } + + [Fact] + public async Task ListPositionsHappyPath() + { + await using var conn = Create(require: RedisFeatures.v6_0_6); + + var db = conn.GetDatabase(); + var key = Me(); + const string foo = "foo", + bar = "bar", + baz = "baz"; + + db.KeyDelete(key); + + for (var i = 0; i < 10; i++) + { + db.ListLeftPush(key, foo); + db.ListLeftPush(key, bar); + db.ListLeftPush(key, baz); + } + + var res = db.ListPositions(key, foo, 5); + + foreach (var item in res) + { + Assert.Equal(2, item % 3); + } + + Assert.Equal(5, res.Length); + } + + [Fact] + public async Task ListPositionsTooFew() + { + await using var conn = Create(require: RedisFeatures.v6_0_6); + + var db = conn.GetDatabase(); + var key = Me(); + const string foo = "foo", + bar = "bar", + baz = "baz"; + + db.KeyDelete(key); + + for (var i = 0; i < 10; i++) + { + db.ListLeftPush(key, bar); + db.ListLeftPush(key, baz); + } + + db.ListLeftPush(key, foo); + + var res = db.ListPositions(key, foo, 5); + Assert.Single(res); + Assert.Equal(0, res.Single()); + } + + [Fact] + public async Task ListPositionsAll() + { + await using var conn = Create(require: RedisFeatures.v6_0_6); + + var db = conn.GetDatabase(); + var key = Me(); + const string foo = "foo", + bar = "bar", + baz = "baz"; + + db.KeyDelete(key); + + for (var i = 0; i < 10; i++) + { + db.ListLeftPush(key, foo); + db.ListLeftPush(key, bar); + db.ListLeftPush(key, baz); + } + + var res = db.ListPositions(key, foo, 0); + + foreach (var item in res) + { + Assert.Equal(2, item % 3); + } + + Assert.Equal(10, res.Length); + } + + [Fact] + public async Task ListPositionsAllLimitLength() + { + await using var conn = Create(require: RedisFeatures.v6_0_6); + + var db = conn.GetDatabase(); + var key = Me(); + const string foo = "foo", + bar = "bar", + baz = "baz"; + + db.KeyDelete(key); + + for (var i = 0; i < 10; i++) + { + db.ListLeftPush(key, foo); + db.ListLeftPush(key, bar); + db.ListLeftPush(key, baz); + } + + var res = db.ListPositions(key, foo, 0, maxLength: 15); + + foreach (var item in res) + { + Assert.Equal(2, item % 3); + } + + Assert.Equal(5, res.Length); + } + + [Fact] + public async Task ListPositionsEmpty() + { + await using var conn = Create(require: RedisFeatures.v6_0_6); + + var db = conn.GetDatabase(); + var key = Me(); + const string foo = "foo", + bar = "bar", + baz = "baz"; + + db.KeyDelete(key); + + for (var i = 0; i < 10; i++) + { + db.ListLeftPush(key, bar); + db.ListLeftPush(key, baz); + } + + var res = db.ListPositions(key, foo, 5); + + Assert.Empty(res); + } + + [Fact] + public async Task ListPositionByRank() + { + await using var conn = Create(require: RedisFeatures.v6_0_6); + + var db = conn.GetDatabase(); + var key = Me(); + const string foo = "foo", + bar = "bar", + baz = "baz"; + + db.KeyDelete(key); + + for (var i = 0; i < 10; i++) + { + db.ListLeftPush(key, foo); + db.ListLeftPush(key, bar); + db.ListLeftPush(key, baz); + } + + const int rank = 6; + + var res = db.ListPosition(key, foo, rank: rank); + + Assert.Equal((3 * rank) - 1, res); + } + + [Fact] + public async Task ListPositionLimitSoNull() + { + await using var conn = Create(require: RedisFeatures.v6_0_6); + + var db = conn.GetDatabase(); + var key = Me(); + const string foo = "foo", + bar = "bar", + baz = "baz"; + + db.KeyDelete(key); + + for (var i = 0; i < 10; i++) + { + db.ListLeftPush(key, bar); + db.ListLeftPush(key, baz); + } + + db.ListRightPush(key, foo); + + var res = db.ListPosition(key, foo, maxLength: 20); + + Assert.Equal(-1, res); + } + + [Fact] + public async Task ListPositionHappyPathAsync() + { + await using var conn = Create(require: RedisFeatures.v6_0_6); + + var db = conn.GetDatabase(); + var key = Me(); + const string val = "foo"; + await db.KeyDeleteAsync(key); + + await db.ListLeftPushAsync(key, val); + var res = await db.ListPositionAsync(key, val); + + Assert.Equal(0, res); + } + + [Fact] + public async Task ListPositionEmptyAsync() + { + await using var conn = Create(require: RedisFeatures.v6_0_6); + + var db = conn.GetDatabase(); + var key = Me(); + const string val = "foo"; + await db.KeyDeleteAsync(key); + + var res = await db.ListPositionAsync(key, val); + + Assert.Equal(-1, res); + } + + [Fact] + public async Task ListPositionsHappyPathAsync() + { + await using var conn = Create(require: RedisFeatures.v6_0_6); + + var db = conn.GetDatabase(); + var key = Me(); + const string foo = "foo", + bar = "bar", + baz = "baz"; + + await db.KeyDeleteAsync(key); + + for (var i = 0; i < 10; i++) + { + await db.ListLeftPushAsync(key, foo); + await db.ListLeftPushAsync(key, bar); + await db.ListLeftPushAsync(key, baz); + } + + var res = await db.ListPositionsAsync(key, foo, 5); + + foreach (var item in res) + { + Assert.Equal(2, item % 3); + } + + Assert.Equal(5, res.Length); + } + + [Fact] + public async Task ListPositionsTooFewAsync() + { + await using var conn = Create(require: RedisFeatures.v6_0_6); + + var db = conn.GetDatabase(); + var key = Me(); + const string foo = "foo", + bar = "bar", + baz = "baz"; + + await db.KeyDeleteAsync(key); + + for (var i = 0; i < 10; i++) + { + await db.ListLeftPushAsync(key, bar); + await db.ListLeftPushAsync(key, baz); + } + + db.ListLeftPush(key, foo); + + var res = await db.ListPositionsAsync(key, foo, 5); + Assert.Single(res); + Assert.Equal(0, res.Single()); + } + + [Fact] + public async Task ListPositionsAllAsync() + { + await using var conn = Create(require: RedisFeatures.v6_0_6); + + var db = conn.GetDatabase(); + var key = Me(); + const string foo = "foo", + bar = "bar", + baz = "baz"; + + await db.KeyDeleteAsync(key); + + for (var i = 0; i < 10; i++) + { + await db.ListLeftPushAsync(key, foo); + await db.ListLeftPushAsync(key, bar); + await db.ListLeftPushAsync(key, baz); + } + + var res = await db.ListPositionsAsync(key, foo, 0); + + foreach (var item in res) + { + Assert.Equal(2, item % 3); + } + + Assert.Equal(10, res.Length); + } + + [Fact] + public async Task ListPositionsAllLimitLengthAsync() + { + await using var conn = Create(require: RedisFeatures.v6_0_6); + + var db = conn.GetDatabase(); + var key = Me(); + const string foo = "foo", + bar = "bar", + baz = "baz"; + + await db.KeyDeleteAsync(key); + + for (var i = 0; i < 10; i++) + { + await db.ListLeftPushAsync(key, foo); + await db.ListLeftPushAsync(key, bar); + await db.ListLeftPushAsync(key, baz); + } + + var res = await db.ListPositionsAsync(key, foo, 0, maxLength: 15); + + foreach (var item in res) + { + Assert.Equal(2, item % 3); + } + + Assert.Equal(5, res.Length); + } + + [Fact] + public async Task ListPositionsEmptyAsync() + { + await using var conn = Create(require: RedisFeatures.v6_0_6); + + var db = conn.GetDatabase(); + var key = Me(); + const string foo = "foo", + bar = "bar", + baz = "baz"; + + await db.KeyDeleteAsync(key); + + for (var i = 0; i < 10; i++) + { + await db.ListLeftPushAsync(key, bar); + await db.ListLeftPushAsync(key, baz); + } + + var res = await db.ListPositionsAsync(key, foo, 5); + + Assert.Empty(res); + } + + [Fact] + public async Task ListPositionByRankAsync() + { + await using var conn = Create(require: RedisFeatures.v6_0_6); + + var db = conn.GetDatabase(); + var key = Me(); + const string foo = "foo", + bar = "bar", + baz = "baz"; + + await db.KeyDeleteAsync(key); + + for (var i = 0; i < 10; i++) + { + await db.ListLeftPushAsync(key, foo); + await db.ListLeftPushAsync(key, bar); + await db.ListLeftPushAsync(key, baz); + } + + const int rank = 6; + + var res = await db.ListPositionAsync(key, foo, rank: rank); + + Assert.Equal((3 * rank) - 1, res); + } + + [Fact] + public async Task ListPositionLimitSoNullAsync() + { + await using var conn = Create(require: RedisFeatures.v6_0_6); + + var db = conn.GetDatabase(); + var key = Me(); + const string foo = "foo", + bar = "bar", + baz = "baz"; + + await db.KeyDeleteAsync(key); + + for (var i = 0; i < 10; i++) + { + await db.ListLeftPushAsync(key, bar); + await db.ListLeftPushAsync(key, baz); + } + + await db.ListRightPushAsync(key, foo); + + var res = await db.ListPositionAsync(key, foo, maxLength: 20); + + Assert.Equal(-1, res); + } + + [Fact] + public async Task ListPositionFireAndForgetAsync() + { + await using var conn = Create(require: RedisFeatures.v6_0_6); + + var db = conn.GetDatabase(); + var key = Me(); + const string foo = "foo", + bar = "bar", + baz = "baz"; + + await db.KeyDeleteAsync(key); + + for (var i = 0; i < 10; i++) + { + await db.ListLeftPushAsync(key, foo); + await db.ListLeftPushAsync(key, bar); + await db.ListLeftPushAsync(key, baz); + } + + await db.ListRightPushAsync(key, foo); + + var res = await db.ListPositionAsync(key, foo, maxLength: 20, flags: CommandFlags.FireAndForget); + + Assert.Equal(-1, res); + } + + [Fact] + public async Task ListPositionFireAndForget() + { + await using var conn = Create(require: RedisFeatures.v6_0_6); + + var db = conn.GetDatabase(); + var key = Me(); + const string foo = "foo", + bar = "bar", + baz = "baz"; + + db.KeyDelete(key); + + for (var i = 0; i < 10; i++) + { + db.ListLeftPush(key, foo); + db.ListLeftPush(key, bar); + db.ListLeftPush(key, baz); + } + + db.ListRightPush(key, foo); + + var res = db.ListPosition(key, foo, maxLength: 20, flags: CommandFlags.FireAndForget); + + Assert.Equal(-1, res); + } + + [Fact] + public async Task ListMultiPopSingleKeyAsync() + { + await using var conn = Create(require: RedisFeatures.v7_0_0_rc1); + + var db = conn.GetDatabase(); + var key = Me(); + await db.KeyDeleteAsync(key); + + db.ListLeftPush(key, "yankees"); + db.ListLeftPush(key, "blue jays"); + db.ListLeftPush(key, "orioles"); + db.ListLeftPush(key, "red sox"); + db.ListLeftPush(key, "rays"); + + var res = await db.ListLeftPopAsync([key], 1); + + Assert.False(res.IsNull); + Assert.Single(res.Values); + Assert.Equal("rays", res.Values[0]); + + res = await db.ListRightPopAsync([key], 2); + + Assert.False(res.IsNull); + Assert.Equal(2, res.Values.Length); + Assert.Equal("yankees", res.Values[0]); + Assert.Equal("blue jays", res.Values[1]); + } + + [Fact] + public async Task ListMultiPopMultipleKeysAsync() + { + await using var conn = Create(require: RedisFeatures.v7_0_0_rc1); + + var db = conn.GetDatabase(); + var key = Me(); + await db.KeyDeleteAsync(key); + + db.ListLeftPush(key, "yankees"); + db.ListLeftPush(key, "blue jays"); + db.ListLeftPush(key, "orioles"); + db.ListLeftPush(key, "red sox"); + db.ListLeftPush(key, "rays"); + + var res = await db.ListLeftPopAsync(["empty-key", key, "also-empty"], 2); + + Assert.False(res.IsNull); + Assert.Equal(2, res.Values.Length); + Assert.Equal("rays", res.Values[0]); + Assert.Equal("red sox", res.Values[1]); + + res = await db.ListRightPopAsync(["empty-key", key, "also-empty"], 1); + + Assert.False(res.IsNull); + Assert.Single(res.Values); + Assert.Equal("yankees", res.Values[0]); + } + + [Fact] + public async Task ListMultiPopSingleKey() + { + await using var conn = Create(require: RedisFeatures.v7_0_0_rc1); + + var db = conn.GetDatabase(); + var key = Me(); + db.KeyDelete(key); + + db.ListLeftPush(key, "yankees"); + db.ListLeftPush(key, "blue jays"); + db.ListLeftPush(key, "orioles"); + db.ListLeftPush(key, "red sox"); + db.ListLeftPush(key, "rays"); + + var res = db.ListLeftPop([key], 1); + + Assert.False(res.IsNull); + Assert.Single(res.Values); + Assert.Equal("rays", res.Values[0]); + + res = db.ListRightPop([key], 2); + + Assert.False(res.IsNull); + Assert.Equal(2, res.Values.Length); + Assert.Equal("yankees", res.Values[0]); + Assert.Equal("blue jays", res.Values[1]); + } + + [Fact] + public async Task ListMultiPopZeroCount() + { + await using var conn = Create(require: RedisFeatures.v7_0_0_rc1); + + var db = conn.GetDatabase(); + var key = Me(); + db.KeyDelete(key); + + var exception = await Assert.ThrowsAsync(() => db.ListLeftPopAsync([key], 0)); + Assert.Contains("ERR count should be greater than 0", exception.Message); + } + + [Fact] + public async Task ListMultiPopEmpty() + { + await using var conn = Create(require: RedisFeatures.v7_0_0_rc1); + + var db = conn.GetDatabase(); + var key = Me(); + db.KeyDelete(key); + + var res = await db.ListLeftPopAsync([key], 1); + Assert.True(res.IsNull); + } + + [Fact] + public async Task ListMultiPopEmptyKeys() + { + await using var conn = Create(require: RedisFeatures.v7_0_0_rc1); + + var db = conn.GetDatabase(); + var exception = Assert.Throws(() => db.ListRightPop(Array.Empty(), 5)); + Assert.Contains("keys must have a size of at least 1", exception.Message); + + exception = Assert.Throws(() => db.ListLeftPop(Array.Empty(), 5)); + Assert.Contains("keys must have a size of at least 1", exception.Message); + } +} diff --git a/tests/StackExchange.Redis.Tests/Lists.cs b/tests/StackExchange.Redis.Tests/Lists.cs deleted file mode 100644 index 0c1fa332a..000000000 --- a/tests/StackExchange.Redis.Tests/Lists.cs +++ /dev/null @@ -1,316 +0,0 @@ -using System.Linq; -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - [Collection(SharedConnectionFixture.Key)] - public class Lists : TestBase - { - public Lists(ITestOutputHelper output, SharedConnectionFixture fixture) : base(output, fixture) { } - - [Fact] - public void Ranges() - { - using (var conn = Create()) - { - var db = conn.GetDatabase(); - RedisKey key = Me(); - - db.KeyDelete(key, CommandFlags.FireAndForget); - db.ListRightPush(key, "abcdefghijklmnopqrstuvwxyz".Select(x => (RedisValue)x.ToString()).ToArray(), CommandFlags.FireAndForget); - - Assert.Equal(26, db.ListLength(key)); - Assert.Equal("abcdefghijklmnopqrstuvwxyz", string.Concat(db.ListRange(key))); - - var last10 = db.ListRange(key, -10, -1); - Assert.Equal("qrstuvwxyz", string.Concat(last10)); - db.ListTrim(key, 0, -11, CommandFlags.FireAndForget); - - Assert.Equal(16, db.ListLength(key)); - Assert.Equal("abcdefghijklmnop", string.Concat(db.ListRange(key))); - } - } - - [Fact] - public void ListLeftPushEmptyValues() - { - using (var conn = Create()) - { - var db = conn.GetDatabase(); - RedisKey key = "testlist"; - db.KeyDelete(key, CommandFlags.FireAndForget); - var result = db.ListLeftPush(key, new RedisValue[0], When.Always, CommandFlags.None); - Assert.Equal(0, result); - } - } - - [Fact] - public void ListLeftPushKeyDoesNotExists() - { - using (var conn = Create()) - { - var db = conn.GetDatabase(); - RedisKey key = "testlist"; - db.KeyDelete(key, CommandFlags.FireAndForget); - var result = db.ListLeftPush(key, new RedisValue[] { "testvalue" }, When.Exists, CommandFlags.None); - Assert.Equal(0, result); - } - } - - [Fact] - public void ListLeftPushToExisitingKey() - { - using (var conn = Create()) - { - var db = conn.GetDatabase(); - RedisKey key = "testlist"; - db.KeyDelete(key, CommandFlags.FireAndForget); - - var pushResult = db.ListLeftPush(key, new RedisValue[] { "testvalue1" }, CommandFlags.None); - Assert.Equal(1, pushResult); - var pushXResult = db.ListLeftPush(key, new RedisValue[] { "testvalue2" }, When.Exists, CommandFlags.None); - Assert.Equal(2, pushXResult); - - var rangeResult = db.ListRange(key, 0, -1); - Assert.Equal(2, rangeResult.Length); - Assert.Equal("testvalue2", rangeResult[0]); - Assert.Equal("testvalue1", rangeResult[1]); - } - } - - [Fact] - public void ListLeftPushMultipleToExisitingKey() - { - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.PushMultiple), f => f.PushMultiple); - var db = conn.GetDatabase(); - RedisKey key = "testlist"; - db.KeyDelete(key, CommandFlags.FireAndForget); - - var pushResult = db.ListLeftPush(key, new RedisValue[] { "testvalue1" }, CommandFlags.None); - Assert.Equal(1, pushResult); - var pushXResult = db.ListLeftPush(key, new RedisValue[] { "testvalue2", "testvalue3" }, When.Exists, CommandFlags.None); - Assert.Equal(3, pushXResult); - - var rangeResult = db.ListRange(key, 0, -1); - Assert.Equal(3, rangeResult.Length); - Assert.Equal("testvalue3", rangeResult[0]); - Assert.Equal("testvalue2", rangeResult[1]); - Assert.Equal("testvalue1", rangeResult[2]); - } - } - - [Fact] - public async Task ListLeftPushAsyncEmptyValues() - { - using (var conn = Create()) - { - var db = conn.GetDatabase(); - RedisKey key = "testlist"; - db.KeyDelete(key, CommandFlags.FireAndForget); - var result = await db.ListLeftPushAsync(key, new RedisValue[0], When.Always, CommandFlags.None); - Assert.Equal(0, result); - } - } - - [Fact] - public async Task ListLeftPushAsyncKeyDoesNotExists() - { - using (var conn = Create()) - { - var db = conn.GetDatabase(); - RedisKey key = "testlist"; - db.KeyDelete(key, CommandFlags.FireAndForget); - var result = await db.ListLeftPushAsync(key, new RedisValue[] { "testvalue" }, When.Exists, CommandFlags.None); - Assert.Equal(0, result); - } - } - - [Fact] - public async Task ListLeftPushAsyncToExisitingKey() - { - using (var conn = Create()) - { - var db = conn.GetDatabase(); - RedisKey key = "testlist"; - db.KeyDelete(key, CommandFlags.FireAndForget); - - var pushResult = await db.ListLeftPushAsync(key, new RedisValue[] { "testvalue1" }, CommandFlags.None); - Assert.Equal(1, pushResult); - var pushXResult = await db.ListLeftPushAsync(key, new RedisValue[] { "testvalue2" }, When.Exists, CommandFlags.None); - Assert.Equal(2, pushXResult); - - var rangeResult = db.ListRange(key, 0, -1); - Assert.Equal(2, rangeResult.Length); - Assert.Equal("testvalue2", rangeResult[0]); - Assert.Equal("testvalue1", rangeResult[1]); - } - } - - [Fact] - public async Task ListLeftPushAsyncMultipleToExisitingKey() - { - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.PushMultiple), f => f.PushMultiple); - var db = conn.GetDatabase(); - RedisKey key = "testlist"; - db.KeyDelete(key, CommandFlags.FireAndForget); - - var pushResult = await db.ListLeftPushAsync(key, new RedisValue[] { "testvalue1" }, CommandFlags.None); - Assert.Equal(1, pushResult); - var pushXResult = await db.ListLeftPushAsync(key, new RedisValue[] { "testvalue2", "testvalue3" }, When.Exists, CommandFlags.None); - Assert.Equal(3, pushXResult); - - var rangeResult = db.ListRange(key, 0, -1); - Assert.Equal(3, rangeResult.Length); - Assert.Equal("testvalue3", rangeResult[0]); - Assert.Equal("testvalue2", rangeResult[1]); - Assert.Equal("testvalue1", rangeResult[2]); - } - } - - [Fact] - public void ListRightPushEmptyValues() - { - using (var conn = Create()) - { - var db = conn.GetDatabase(); - RedisKey key = "testlist"; - db.KeyDelete(key, CommandFlags.FireAndForget); - var result = db.ListRightPush(key, new RedisValue[0], When.Always, CommandFlags.None); - Assert.Equal(0, result); - } - } - - [Fact] - public void ListRightPushKeyDoesNotExists() - { - using (var conn = Create()) - { - var db = conn.GetDatabase(); - RedisKey key = "testlist"; - db.KeyDelete(key, CommandFlags.FireAndForget); - var result = db.ListRightPush(key, new RedisValue[] { "testvalue" }, When.Exists, CommandFlags.None); - Assert.Equal(0, result); - } - } - - [Fact] - public void ListRightPushToExisitingKey() - { - using (var conn = Create()) - { - var db = conn.GetDatabase(); - RedisKey key = "testlist"; - db.KeyDelete(key, CommandFlags.FireAndForget); - - var pushResult = db.ListRightPush(key, new RedisValue[] { "testvalue1" }, CommandFlags.None); - Assert.Equal(1, pushResult); - var pushXResult = db.ListRightPush(key, new RedisValue[] { "testvalue2" }, When.Exists, CommandFlags.None); - Assert.Equal(2, pushXResult); - - var rangeResult = db.ListRange(key, 0, -1); - Assert.Equal(2, rangeResult.Length); - Assert.Equal("testvalue1", rangeResult[0]); - Assert.Equal("testvalue2", rangeResult[1]); - } - } - - [Fact] - public void ListRightPushMultipleToExisitingKey() - { - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.PushMultiple), f => f.PushMultiple); - var db = conn.GetDatabase(); - RedisKey key = "testlist"; - db.KeyDelete(key, CommandFlags.FireAndForget); - - var pushResult = db.ListRightPush(key, new RedisValue[] { "testvalue1" }, CommandFlags.None); - Assert.Equal(1, pushResult); - var pushXResult = db.ListRightPush(key, new RedisValue[] { "testvalue2", "testvalue3" }, When.Exists, CommandFlags.None); - Assert.Equal(3, pushXResult); - - var rangeResult = db.ListRange(key, 0, -1); - Assert.Equal(3, rangeResult.Length); - Assert.Equal("testvalue1", rangeResult[0]); - Assert.Equal("testvalue2", rangeResult[1]); - Assert.Equal("testvalue3", rangeResult[2]); - } - } - - [Fact] - public async Task ListRightPushAsyncEmptyValues() - { - using (var conn = Create()) - { - var db = conn.GetDatabase(); - RedisKey key = "testlist"; - db.KeyDelete(key, CommandFlags.FireAndForget); - var result = await db.ListRightPushAsync(key, new RedisValue[0], When.Always, CommandFlags.None); - Assert.Equal(0, result); - } - } - - [Fact] - public async Task ListRightPushAsyncKeyDoesNotExists() - { - using (var conn = Create()) - { - var db = conn.GetDatabase(); - RedisKey key = "testlist"; - db.KeyDelete(key, CommandFlags.FireAndForget); - var result = await db.ListRightPushAsync(key, new RedisValue[] { "testvalue" }, When.Exists, CommandFlags.None); - Assert.Equal(0, result); - } - } - - [Fact] - public async Task ListRightPushAsyncToExisitingKey() - { - using (var conn = Create()) - { - var db = conn.GetDatabase(); - RedisKey key = "testlist"; - db.KeyDelete(key, CommandFlags.FireAndForget); - - var pushResult = await db.ListRightPushAsync(key, new RedisValue[] { "testvalue1" }, CommandFlags.None); - Assert.Equal(1, pushResult); - var pushXResult = await db.ListRightPushAsync(key, new RedisValue[] { "testvalue2" }, When.Exists, CommandFlags.None); - Assert.Equal(2, pushXResult); - - var rangeResult = db.ListRange(key, 0, -1); - Assert.Equal(2, rangeResult.Length); - Assert.Equal("testvalue1", rangeResult[0]); - Assert.Equal("testvalue2", rangeResult[1]); - } - } - - [Fact] - public async Task ListRightPushAsyncMultipleToExisitingKey() - { - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.PushMultiple), f => f.PushMultiple); - var db = conn.GetDatabase(); - RedisKey key = "testlist"; - db.KeyDelete(key, CommandFlags.FireAndForget); - - var pushResult = await db.ListRightPushAsync(key, new RedisValue[] { "testvalue1" }, CommandFlags.None); - Assert.Equal(1, pushResult); - var pushXResult = await db.ListRightPushAsync(key, new RedisValue[] { "testvalue2", "testvalue3" }, When.Exists, CommandFlags.None); - Assert.Equal(3, pushXResult); - - var rangeResult = db.ListRange(key, 0, -1); - Assert.Equal(3, rangeResult.Length); - Assert.Equal("testvalue1", rangeResult[0]); - Assert.Equal("testvalue2", rangeResult[1]); - Assert.Equal("testvalue3", rangeResult[2]); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/Locking.cs b/tests/StackExchange.Redis.Tests/Locking.cs deleted file mode 100644 index 65b412737..000000000 --- a/tests/StackExchange.Redis.Tests/Locking.cs +++ /dev/null @@ -1,252 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Threading; -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - [Collection(NonParallelCollection.Name)] - public class Locking : TestBase - { - protected override string GetConfiguration() => TestConfig.Current.MasterServerAndPort; - public Locking(ITestOutputHelper output) : base (output) { } - - public enum TestMode - { - MultiExec, - NoMultiExec, - Twemproxy - } - - public static IEnumerable TestModes() - { - yield return new object[] { TestMode.MultiExec }; - yield return new object[] { TestMode.NoMultiExec }; - yield return new object[] { TestMode.Twemproxy }; - } - - [Theory, MemberData(nameof(TestModes))] - public void AggressiveParallel(TestMode testMode) - { - int count = 2; - int errorCount = 0; - int bgErrorCount = 0; - var evt = new ManualResetEvent(false); - var key = Me(); - using (var c1 = Create(testMode)) - using (var c2 = Create(testMode)) - { - void cb(object obj) - { - try - { - var conn = (IDatabase)obj; - conn.Multiplexer.ErrorMessage += delegate { Interlocked.Increment(ref errorCount); }; - - for (int i = 0; i < 1000; i++) - { - conn.LockTakeAsync(key, "def", TimeSpan.FromSeconds(5)); - } - conn.Ping(); - if (Interlocked.Decrement(ref count) == 0) evt.Set(); - } - catch - { - Interlocked.Increment(ref bgErrorCount); - } - } - int db = testMode == TestMode.Twemproxy ? 0 : 2; - ThreadPool.QueueUserWorkItem(cb, c1.GetDatabase(db)); - ThreadPool.QueueUserWorkItem(cb, c2.GetDatabase(db)); - evt.WaitOne(8000); - } - Assert.Equal(0, Interlocked.CompareExchange(ref errorCount, 0, 0)); - Assert.Equal(0, bgErrorCount); - } - - [Fact] - public void TestOpCountByVersionLocal_UpLevel() - { - using (var conn = Create()) - { - TestLockOpCountByVersion(conn, 1, false); - TestLockOpCountByVersion(conn, 1, true); - } - } - - private void TestLockOpCountByVersion(IConnectionMultiplexer conn, int expectedOps, bool existFirst) - { - const int LockDuration = 30; - RedisKey Key = Me(); - - var db = conn.GetDatabase(); - db.KeyDelete(Key, CommandFlags.FireAndForget); - RedisValue newVal = "us:" + Guid.NewGuid().ToString(); - RedisValue expectedVal = newVal; - if (existFirst) - { - expectedVal = "other:" + Guid.NewGuid().ToString(); - db.StringSet(Key, expectedVal, TimeSpan.FromSeconds(LockDuration), flags: CommandFlags.FireAndForget); - } - long countBefore = GetServer(conn).GetCounters().Interactive.OperationCount; - - var taken = db.LockTake(Key, newVal, TimeSpan.FromSeconds(LockDuration)); - - long countAfter = GetServer(conn).GetCounters().Interactive.OperationCount; - var valAfter = db.StringGet(Key); - - Assert.Equal(!existFirst, taken); - Assert.Equal(expectedVal, valAfter); - Assert.Equal(expectedOps, countAfter - countBefore); - // note we get a ping from GetCounters - } - - private IConnectionMultiplexer Create(TestMode mode) - { - switch (mode) - { - case TestMode.MultiExec: - return Create(); - case TestMode.NoMultiExec: - return Create(disabledCommands: new[] { "multi", "exec" }); - case TestMode.Twemproxy: - return Create(proxy: Proxy.Twemproxy); - default: - throw new NotSupportedException(mode.ToString()); - } - } - - [Theory, MemberData(nameof(TestModes))] - public async Task TakeLockAndExtend(TestMode mode) - { - bool withTran = mode == TestMode.MultiExec; - using (var conn = Create(mode)) - { - RedisValue right = Guid.NewGuid().ToString(), - wrong = Guid.NewGuid().ToString(); - - int DB = mode == TestMode.Twemproxy ? 0 : 7; - RedisKey Key = Me(); - - var db = conn.GetDatabase(DB); - - db.KeyDelete(Key, CommandFlags.FireAndForget); - - var t1 = db.LockTakeAsync(Key, right, TimeSpan.FromSeconds(20)); - var t1b = db.LockTakeAsync(Key, wrong, TimeSpan.FromSeconds(10)); - var t2 = db.LockQueryAsync(Key); - var t3 = withTran ? db.LockReleaseAsync(Key, wrong) : null; - var t4 = db.LockQueryAsync(Key); - var t5 = withTran ? db.LockExtendAsync(Key, wrong, TimeSpan.FromSeconds(60)) : null; - var t6 = db.LockQueryAsync(Key); - var t7 = db.KeyTimeToLiveAsync(Key); - var t8 = db.LockExtendAsync(Key, right, TimeSpan.FromSeconds(60)); - var t9 = db.LockQueryAsync(Key); - var t10 = db.KeyTimeToLiveAsync(Key); - var t11 = db.LockReleaseAsync(Key, right); - var t12 = db.LockQueryAsync(Key); - var t13 = db.LockTakeAsync(Key, wrong, TimeSpan.FromSeconds(10)); - - Assert.NotEqual(default(RedisValue), right); - Assert.NotEqual(default(RedisValue), wrong); - Assert.NotEqual(right, wrong); - Assert.True(await t1, "1"); - Assert.False(await t1b, "1b"); - Assert.Equal(right, await t2); - if (withTran) Assert.False(await t3, "3"); - Assert.Equal(right, await t4); - if (withTran) Assert.False(await t5, "5"); - Assert.Equal(right, await t6); - var ttl = (await t7).Value.TotalSeconds; - Assert.True(ttl > 0 && ttl <= 20, "7"); - Assert.True(await t8, "8"); - Assert.Equal(right, await t9); - ttl = (await t10).Value.TotalSeconds; - Assert.True(ttl > 50 && ttl <= 60, "10"); - Assert.True(await t11, "11"); - Assert.Null((string)await t12); - Assert.True(await t13, "13"); - } - } - - //public void TestManualLockOpCountByVersion(RedisConnection conn, int expected, bool existFirst) - //{ - // const int DB = 0, LockDuration = 30; - // const string Key = "TestManualLockOpCountByVersion"; - // conn.Wait(conn.Open()); - // conn.Keys.Remove(DB, Key); - // var newVal = "us:" + CreateUniqueName(); - // string expectedVal = newVal; - // if (existFirst) - // { - // expectedVal = "other:" + CreateUniqueName(); - // conn.Strings.Set(DB, Key, expectedVal, LockDuration); - // } - // int countBefore = conn.GetCounters().MessagesSent; - - // var tran = conn.CreateTransaction(); - // tran.AddCondition(Condition.KeyNotExists(DB, Key)); - // tran.Strings.Set(DB, Key, newVal, LockDuration); - // var taken = conn.Wait(tran.Execute()); - - // int countAfter = conn.GetCounters().MessagesSent; - // var valAfter = conn.Wait(conn.Strings.GetString(DB, Key)); - // Assert.Equal(!existFirst, taken, "lock taken (manual)"); - // Assert.Equal(expectedVal, valAfter, "taker (manual)"); - // Assert.Equal(expected, (countAfter - countBefore) - 1, "expected ops (including ping) (manual)"); - // // note we get a ping from GetCounters - //} - - [Theory, MemberData(nameof(TestModes))] - public async Task TestBasicLockNotTaken(TestMode testMode) - { - using (var conn = Create(testMode)) - { - int errorCount = 0; - conn.ErrorMessage += delegate { Interlocked.Increment(ref errorCount); }; - Task taken = null; - Task newValue = null; - Task ttl = null; - - const int LOOP = 50; - var db = conn.GetDatabase(); - var key = Me(); - for (int i = 0; i < LOOP; i++) - { - _ = db.KeyDeleteAsync(key); - taken = db.LockTakeAsync(key, "new-value", TimeSpan.FromSeconds(10)); - newValue = db.StringGetAsync(key); - ttl = db.KeyTimeToLiveAsync(key); - } - Assert.True(await taken, "taken"); - Assert.Equal("new-value", await newValue); - var ttlValue = (await ttl).Value.TotalSeconds; - Assert.True(ttlValue >= 8 && ttlValue <= 10, "ttl"); - - Assert.Equal(0, errorCount); - } - } - - [Theory, MemberData(nameof(TestModes))] - public async Task TestBasicLockTaken(TestMode testMode) - { - using (var conn = Create(testMode)) - { - var db = conn.GetDatabase(); - var key = Me(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.StringSet(key, "old-value", TimeSpan.FromSeconds(20), flags: CommandFlags.FireAndForget); - var taken = db.LockTakeAsync(key, "new-value", TimeSpan.FromSeconds(10)); - var newValue = db.StringGetAsync(key); - var ttl = db.KeyTimeToLiveAsync(key); - - Assert.False(await taken, "taken"); - Assert.Equal("old-value", await newValue); - var ttlValue = (await ttl).Value.TotalSeconds; - Assert.True(ttlValue >= 18 && ttlValue <= 20, "ttl"); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/LockingTests.cs b/tests/StackExchange.Redis.Tests/LockingTests.cs new file mode 100644 index 000000000..52d03bb83 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/LockingTests.cs @@ -0,0 +1,208 @@ +using System; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +[Collection(NonParallelCollection.Name)] +public class LockingTests(ITestOutputHelper output) : TestBase(output) +{ + public enum TestMode + { + MultiExec, + NoMultiExec, + Twemproxy, + } + + public static IEnumerable> TestModes() + { + yield return new(TestMode.MultiExec); + yield return new(TestMode.NoMultiExec); + yield return new(TestMode.Twemproxy); + } + + [Theory, MemberData(nameof(TestModes))] + public void AggressiveParallel(TestMode testMode) + { + int count = 2; + int errorCount = 0; + int bgErrorCount = 0; + var evt = new ManualResetEvent(false); + var key = Me() + testMode; + using (var conn1 = Create(testMode)) + using (var conn2 = Create(testMode)) + { + void Inner(object? obj) + { + try + { + var conn = (IDatabase?)obj!; + conn.Multiplexer.ErrorMessage += (sender, e) => Interlocked.Increment(ref errorCount); + + for (int i = 0; i < 1000; i++) + { + conn.LockTakeAsync(key, "def", TimeSpan.FromSeconds(5)); + } + conn.Ping(); + if (Interlocked.Decrement(ref count) == 0) evt.Set(); + } + catch + { + Interlocked.Increment(ref bgErrorCount); + } + } + int db = testMode == TestMode.Twemproxy ? 0 : 2; + ThreadPool.QueueUserWorkItem(Inner, conn1.GetDatabase(db)); + ThreadPool.QueueUserWorkItem(Inner, conn2.GetDatabase(db)); + evt.WaitOne(8000); + } + Assert.Equal(0, Interlocked.CompareExchange(ref errorCount, 0, 0)); + Assert.Equal(0, bgErrorCount); + } + + [Fact] + public async Task TestOpCountByVersionLocal_UpLevel() + { + await using var conn = Create(shared: false); + + TestLockOpCountByVersion(conn, 1, false); + TestLockOpCountByVersion(conn, 1, true); + } + + private void TestLockOpCountByVersion(IConnectionMultiplexer conn, int expectedOps, bool existFirst) + { + const int LockDuration = 30; + RedisKey key = Me(); + + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + RedisValue newVal = "us:" + Guid.NewGuid().ToString(); + RedisValue expectedVal = newVal; + if (existFirst) + { + expectedVal = "other:" + Guid.NewGuid().ToString(); + db.StringSet(key, expectedVal, TimeSpan.FromSeconds(LockDuration), flags: CommandFlags.FireAndForget); + } + long countBefore = GetServer(conn).GetCounters().Interactive.OperationCount; + + var taken = db.LockTake(key, newVal, TimeSpan.FromSeconds(LockDuration)); + + long countAfter = GetServer(conn).GetCounters().Interactive.OperationCount; + var valAfter = db.StringGet(key); + + Assert.Equal(!existFirst, taken); + Assert.Equal(expectedVal, valAfter); + // note we get a ping from GetCounters + Assert.True(countAfter - countBefore >= expectedOps, $"({countAfter} - {countBefore}) >= {expectedOps}"); + } + + private IConnectionMultiplexer Create(TestMode mode) => mode switch + { + TestMode.MultiExec => Create(), + TestMode.NoMultiExec => Create(disabledCommands: ["multi", "exec"]), + TestMode.Twemproxy => Create(proxy: Proxy.Twemproxy), + _ => throw new NotSupportedException(mode.ToString()), + }; + + [Theory, MemberData(nameof(TestModes))] + public async Task TakeLockAndExtend(TestMode testMode) + { + await using var conn = Create(testMode); + + RedisValue right = Guid.NewGuid().ToString(), + wrong = Guid.NewGuid().ToString(); + + int dbId = testMode == TestMode.Twemproxy ? 0 : 7; + RedisKey key = Me() + testMode; + + var db = conn.GetDatabase(dbId); + + db.KeyDelete(key, CommandFlags.FireAndForget); + + bool withTran = testMode == TestMode.MultiExec; + var t1 = db.LockTakeAsync(key, right, TimeSpan.FromSeconds(20)); + var t1b = db.LockTakeAsync(key, wrong, TimeSpan.FromSeconds(10)); + var t2 = db.LockQueryAsync(key); + var t3 = withTran ? db.LockReleaseAsync(key, wrong) : null; + var t4 = db.LockQueryAsync(key); + var t5 = withTran ? db.LockExtendAsync(key, wrong, TimeSpan.FromSeconds(60)) : null; + var t6 = db.LockQueryAsync(key); + var t7 = db.KeyTimeToLiveAsync(key); + var t8 = db.LockExtendAsync(key, right, TimeSpan.FromSeconds(60)); + var t9 = db.LockQueryAsync(key); + var t10 = db.KeyTimeToLiveAsync(key); + var t11 = db.LockReleaseAsync(key, right); + var t12 = db.LockQueryAsync(key); + var t13 = db.LockTakeAsync(key, wrong, TimeSpan.FromSeconds(10)); + + Assert.NotEqual(default(RedisValue), right); + Assert.NotEqual(default(RedisValue), wrong); + Assert.NotEqual(right, wrong); + Assert.True(await t1, "1"); + Assert.False(await t1b, "1b"); + Assert.Equal(right, await t2); + if (withTran) Assert.False(await t3!, "3"); + Assert.Equal(right, await t4); + if (withTran) Assert.False(await t5!, "5"); + Assert.Equal(right, await t6); + var ttl = (await t7)!.Value.TotalSeconds; + Assert.True(ttl > 0 && ttl <= 20, "7"); + Assert.True(await t8, "8"); + Assert.Equal(right, await t9); + ttl = (await t10)!.Value.TotalSeconds; + Assert.True(ttl > 50 && ttl <= 60, "10"); + Assert.True(await t11, "11"); + Assert.Null((string?)await t12); + Assert.True(await t13, "13"); + } + + [Theory, MemberData(nameof(TestModes))] + public async Task TestBasicLockNotTaken(TestMode testMode) + { + await using var conn = Create(testMode); + + int errorCount = 0; + conn.ErrorMessage += (sender, e) => Interlocked.Increment(ref errorCount); + Task? taken = null; + Task? newValue = null; + Task? ttl = null; + + const int LOOP = 50; + var db = conn.GetDatabase(); + var key = Me() + testMode; + for (int i = 0; i < LOOP; i++) + { + _ = db.KeyDeleteAsync(key); + taken = db.LockTakeAsync(key, "new-value", TimeSpan.FromSeconds(10)); + newValue = db.StringGetAsync(key); + ttl = db.KeyTimeToLiveAsync(key); + } + Assert.True(await taken!, "taken"); + Assert.Equal("new-value", await newValue!); + var ttlValue = (await ttl!)!.Value.TotalSeconds; + Assert.True(ttlValue >= 8 && ttlValue <= 10, "ttl"); + + Assert.Equal(0, errorCount); + } + + [Theory, MemberData(nameof(TestModes))] + public async Task TestBasicLockTaken(TestMode testMode) + { + await using var conn = Create(testMode); + + var db = conn.GetDatabase(); + var key = Me() + testMode; + db.KeyDelete(key, CommandFlags.FireAndForget); + db.StringSet(key, "old-value", TimeSpan.FromSeconds(20), flags: CommandFlags.FireAndForget); + var taken = db.LockTakeAsync(key, "new-value", TimeSpan.FromSeconds(10)); + var newValue = db.StringGetAsync(key); + var ttl = db.KeyTimeToLiveAsync(key); + + Assert.False(await taken, "taken"); + Assert.Equal("old-value", await newValue); + var ttlValue = (await ttl)!.Value.TotalSeconds; + Assert.True(ttlValue >= 18 && ttlValue <= 20, "ttl"); + } +} diff --git a/tests/StackExchange.Redis.Tests/LoggerTests.cs b/tests/StackExchange.Redis.Tests/LoggerTests.cs new file mode 100644 index 000000000..682856baa --- /dev/null +++ b/tests/StackExchange.Redis.Tests/LoggerTests.cs @@ -0,0 +1,129 @@ +using System; +using System.IO; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Logging.Abstractions; +using Xunit; + +namespace StackExchange.Redis.Tests; + +[Collection(NonParallelCollection.Name)] +public class LoggerTests(ITestOutputHelper output) : TestBase(output) +{ + [Fact] + public async Task BasicLoggerConfig() + { + var traceLogger = new TestLogger(LogLevel.Trace, Writer); + var debugLogger = new TestLogger(LogLevel.Debug, Writer); + var infoLogger = new TestLogger(LogLevel.Information, Writer); + var warningLogger = new TestLogger(LogLevel.Warning, Writer); + var errorLogger = new TestLogger(LogLevel.Error, Writer); + var criticalLogger = new TestLogger(LogLevel.Critical, Writer); + + var options = ConfigurationOptions.Parse(GetConfiguration()); + options.LoggerFactory = new TestWrapperLoggerFactory(new TestMultiLogger(traceLogger, debugLogger, infoLogger, warningLogger, errorLogger, criticalLogger)); + + await using var conn = await ConnectionMultiplexer.ConnectAsync(options); + // We expect more at the trace level: GET, ECHO, PING on commands + Assert.True(traceLogger.CallCount > debugLogger.CallCount); + // Many calls for all log lines - don't set exact here since every addition would break the test + Assert.True(debugLogger.CallCount > 30); + Assert.True(infoLogger.CallCount > 30); + // No debug calls at this time + // We expect no error/critical level calls to have happened here + Assert.Equal(0, errorLogger.CallCount); + Assert.Equal(0, criticalLogger.CallCount); + } + + [Fact] + public async Task WrappedLogger() + { + var options = ConfigurationOptions.Parse(GetConfiguration()); + var wrapped = new TestWrapperLoggerFactory(NullLogger.Instance); + options.LoggerFactory = wrapped; + + await using var conn = await ConnectionMultiplexer.ConnectAsync(options); + Assert.True(wrapped.Logger.LogCount > 0); + } + + public class TestWrapperLoggerFactory(ILogger logger) : ILoggerFactory + { + public TestWrapperLogger Logger { get; } = new TestWrapperLogger(logger); + + public void AddProvider(ILoggerProvider provider) { } + public ILogger CreateLogger(string categoryName) => Logger; + public void Dispose() { } + } + + public class TestWrapperLogger(ILogger toWrap) : ILogger + { + public int LogCount = 0; + private ILogger Inner { get; } = toWrap; + +#if NET8_0_OR_GREATER + public IDisposable? BeginScope(TState state) where TState : notnull => Inner.BeginScope(state); +#else + public IDisposable BeginScope(TState state) => Inner.BeginScope(state); +#endif + public bool IsEnabled(LogLevel logLevel) => Inner.IsEnabled(logLevel); + public void Log(LogLevel logLevel, EventId eventId, TState state, Exception? exception, Func formatter) + { + Interlocked.Increment(ref LogCount); + Inner.Log(logLevel, eventId, state, exception, formatter); + } + } + + /// + /// To save on test time, no reason to spin up n connections just to test n logging implementations... + /// + private sealed class TestMultiLogger(params ILogger[] loggers) : ILogger + { +#if NET8_0_OR_GREATER + public IDisposable? BeginScope(TState state) where TState : notnull => null; +#else + public IDisposable BeginScope(TState state) => null!; +#endif + public bool IsEnabled(LogLevel logLevel) => true; + public void Log(LogLevel logLevel, EventId eventId, TState state, Exception? exception, Func formatter) + { + foreach (var logger in loggers) + { + logger.Log(logLevel, eventId, state, exception, formatter); + } + } + } + + private sealed class TestLogger : ILogger + { + private readonly StringBuilder sb = new StringBuilder(); + private long _callCount; + private readonly LogLevel _logLevel; + private readonly TextWriter _output; + public TestLogger(LogLevel logLevel, TextWriter output) => + (_logLevel, _output) = (logLevel, output); + +#if NET8_0_OR_GREATER + public IDisposable? BeginScope(TState state) where TState : notnull => null; +#else + public IDisposable BeginScope(TState state) => null!; +#endif + public bool IsEnabled(LogLevel logLevel) => logLevel >= _logLevel; + public void Log(LogLevel logLevel, EventId eventId, TState state, Exception? exception, Func formatter) + { + if (!IsEnabled(logLevel)) + { + return; + } + + Interlocked.Increment(ref _callCount); + var logLine = $"{_logLevel}> [LogLevel: {logLevel}, EventId: {eventId}]: {formatter?.Invoke(state, exception)}"; + sb.AppendLine(logLine); + _output.WriteLine(logLine); + } + + public long CallCount => Interlocked.Read(ref _callCount); + public override string ToString() => sb.ToString(); + } +} diff --git a/tests/StackExchange.Redis.Tests/MSetTests.cs b/tests/StackExchange.Redis.Tests/MSetTests.cs new file mode 100644 index 000000000..8657c9d5a --- /dev/null +++ b/tests/StackExchange.Redis.Tests/MSetTests.cs @@ -0,0 +1,166 @@ +using System; +using System.Collections.Generic; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class MSetTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + [Theory] + [InlineData(0, When.Always)] + [InlineData(1, When.Always)] + [InlineData(2, When.Always)] + [InlineData(10, When.Always)] + [InlineData(0, When.NotExists)] + [InlineData(1, When.NotExists)] + [InlineData(2, When.NotExists)] + [InlineData(10, When.NotExists)] + [InlineData(0, When.NotExists, true)] + [InlineData(1, When.NotExists, true)] + [InlineData(2, When.NotExists, true)] + [InlineData(10, When.NotExists, true)] + [InlineData(0, When.Exists)] + [InlineData(1, When.Exists)] + [InlineData(2, When.Exists)] + [InlineData(10, When.Exists)] + [InlineData(0, When.Exists, true)] + [InlineData(1, When.Exists, true)] + [InlineData(2, When.Exists, true)] + [InlineData(10, When.Exists, true)] + public async Task AddWithoutExpiration(int count, When when, bool precreate = false) + { + await using var conn = Create(require: (when == When.Exists && count > 1) ? RedisFeatures.v8_4_0_rc1 : null); + var pairs = new KeyValuePair[count]; + var key = Me(); + for (int i = 0; i < count; i++) + { + // note the unusual braces; this is to force (on cluster) a hash-slot based on key + pairs[i] = new KeyValuePair($"{{{key}}}_{i}", $"value {i}"); + } + + var keys = Array.ConvertAll(pairs, pair => pair.Key); + var db = conn.GetDatabase(); + // set initial state + await db.KeyDeleteAsync(keys, flags: CommandFlags.FireAndForget); + if (precreate) + { + foreach (var pair in pairs) + { + await db.StringSetAsync(pair.Key, "dummy value", flags: CommandFlags.FireAndForget); + } + } + + bool expected = count != 0 & when switch + { + When.Always => true, + When.Exists => precreate, + When.NotExists => !precreate, + _ => throw new ArgumentOutOfRangeException(nameof(when)), + }; + + // issue the test command + var actualPending = db.StringSetAsync(pairs, when); + var values = await db.StringGetAsync(keys); // pipelined + var actual = await actualPending; + + // check the state *after* the command + Assert.Equal(expected, actual); + Assert.Equal(count, values.Length); + for (int i = 0; i < count; i++) + { + if (expected) + { + Assert.Equal(pairs[i].Value, values[i]); + } + else + { + Assert.NotEqual(pairs[i].Value, values[i]); + } + } + } + + [Theory] + [InlineData(0, When.Always)] + [InlineData(1, When.Always)] + [InlineData(2, When.Always)] + [InlineData(10, When.Always)] + [InlineData(0, When.NotExists)] + [InlineData(1, When.NotExists)] + [InlineData(2, When.NotExists)] + [InlineData(10, When.NotExists)] + [InlineData(0, When.NotExists, true)] + [InlineData(1, When.NotExists, true)] + [InlineData(2, When.NotExists, true)] + [InlineData(10, When.NotExists, true)] + [InlineData(0, When.Exists)] + [InlineData(1, When.Exists)] + [InlineData(2, When.Exists)] + [InlineData(10, When.Exists)] + [InlineData(0, When.Exists, true)] + [InlineData(1, When.Exists, true)] + [InlineData(2, When.Exists, true)] + [InlineData(10, When.Exists, true)] + public async Task AddWithRelativeExpiration(int count, When when, bool precreate = false) + { + await using var conn = Create(require: count > 1 ? RedisFeatures.v8_4_0_rc1 : null); + var pairs = new KeyValuePair[count]; + var key = Me(); + for (int i = 0; i < count; i++) + { + // note the unusual braces; this is to force (on cluster) a hash-slot based on key + pairs[i] = new KeyValuePair($"{{{key}}}_{i}", $"value {i}"); + } + var expiry = TimeSpan.FromMinutes(10); + + var keys = Array.ConvertAll(pairs, pair => pair.Key); + var db = conn.GetDatabase(); + // set initial state + await db.KeyDeleteAsync(keys, flags: CommandFlags.FireAndForget); + if (precreate) + { + foreach (var pair in pairs) + { + await db.StringSetAsync(pair.Key, "dummy value", flags: CommandFlags.FireAndForget); + } + } + + bool expected = count != 0 & when switch + { + When.Always => true, + When.Exists => precreate, + When.NotExists => !precreate, + _ => throw new ArgumentOutOfRangeException(nameof(when)), + }; + + // issue the test command + var actualPending = db.StringSetAsync(pairs, when, expiry); + Task[] ttls = new Task[count]; + for (int i = 0; i < count; i++) + { + ttls[i] = db.KeyTimeToLiveAsync(keys[i]); + } + await Task.WhenAll(ttls); + var values = await db.StringGetAsync(keys); // pipelined + var actual = await actualPending; + + // check the state *after* the command + Assert.Equal(expected, actual); + Assert.Equal(count, values.Length); + for (int i = 0; i < count; i++) + { + var ttl = await ttls[i]; + if (expected) + { + Assert.Equal(pairs[i].Value, values[i]); + Assert.NotNull(ttl); + Assert.True(ttl > TimeSpan.Zero && ttl <= expiry); + } + else + { + Assert.NotEqual(pairs[i].Value, values[i]); + Assert.Null(ttl); + } + } + } +} diff --git a/tests/StackExchange.Redis.Tests/MassiveOps.cs b/tests/StackExchange.Redis.Tests/MassiveOps.cs deleted file mode 100644 index a0d6a4fa0..000000000 --- a/tests/StackExchange.Redis.Tests/MassiveOps.cs +++ /dev/null @@ -1,119 +0,0 @@ -using System.Diagnostics; -using System.Threading; -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - [Collection(NonParallelCollection.Name)] - public class MassiveOps : TestBase - { - public MassiveOps(ITestOutputHelper output) : base(output) { } - - [FactLongRunning] - public async Task LongRunning() - { - var key = Me(); - using (var conn = Create()) - { - var db = conn.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.StringSet(key, "test value", flags: CommandFlags.FireAndForget); - for (var i = 0; i < 200; i++) - { - var val = await db.StringGetAsync(key).ForAwait(); - Assert.Equal("test value", val); - await Task.Delay(50).ForAwait(); - } - } - } - - [Theory] - [InlineData(true)] - [InlineData(false)] - public async Task MassiveBulkOpsAsync(bool withContinuation) - { - using (var muxer = Create()) - { - RedisKey key = Me(); - var conn = muxer.GetDatabase(); - await conn.PingAsync().ForAwait(); - void nonTrivial(Task _) - { - Thread.SpinWait(5); - } - var watch = Stopwatch.StartNew(); - for (int i = 0; i <= AsyncOpsQty; i++) - { - var t = conn.StringSetAsync(key, i); -#pragma warning disable CS4014 // Because this call is not awaited, execution of the current method continues before the call is completed - if (withContinuation) t.ContinueWith(nonTrivial); -#pragma warning restore CS4014 // Because this call is not awaited, execution of the current method continues before the call is completed - } - Assert.Equal(AsyncOpsQty, await conn.StringGetAsync(key).ForAwait()); - watch.Stop(); - Log("{2}: Time for {0} ops: {1}ms ({3}, any order); ops/s: {4}", AsyncOpsQty, watch.ElapsedMilliseconds, Me(), - withContinuation ? "with continuation" : "no continuation", AsyncOpsQty / watch.Elapsed.TotalSeconds); - } - } - - [TheoryLongRunning] - [InlineData(1)] - [InlineData(5)] - [InlineData(10)] - [InlineData(50)] - public void MassiveBulkOpsSync(int threads) - { - int workPerThread = SyncOpsQty / threads; - using (var muxer = Create(syncTimeout: 30000)) - { - RedisKey key = Me(); - var conn = muxer.GetDatabase(); - conn.KeyDelete(key, CommandFlags.FireAndForget); - var timeTaken = RunConcurrent(delegate - { - for (int i = 0; i < workPerThread; i++) - { - conn.StringIncrement(key, flags: CommandFlags.FireAndForget); - } - }, threads); - - int val = (int)conn.StringGet(key); - Assert.Equal(workPerThread * threads, val); - Log("{2}: Time for {0} ops on {3} threads: {1}ms (any order); ops/s: {4}", - threads * workPerThread, timeTaken.TotalMilliseconds, Me(), threads, (workPerThread * threads) / timeTaken.TotalSeconds); - } - } - - [Theory] - [InlineData(1)] - [InlineData(5)] - public void MassiveBulkOpsFireAndForget(int threads) - { - using (var muxer = Create(syncTimeout: 30000)) - { - RedisKey key = Me(); - var conn = muxer.GetDatabase(); - conn.Ping(); - - conn.KeyDelete(key, CommandFlags.FireAndForget); - int perThread = AsyncOpsQty / threads; - var elapsed = RunConcurrent(delegate - { - for (int i = 0; i < perThread; i++) - { - conn.StringIncrement(key, flags: CommandFlags.FireAndForget); - } - conn.Ping(); - }, threads); - var val = (long)conn.StringGet(key); - Assert.Equal(perThread * threads, val); - - Log("{2}: Time for {0} ops over {4} threads: {1:###,###}ms (any order); ops/s: {3:###,###,##0}", - val, elapsed.TotalMilliseconds, Me(), - val / elapsed.TotalSeconds, threads); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/MassiveOpsTests.cs b/tests/StackExchange.Redis.Tests/MassiveOpsTests.cs new file mode 100644 index 000000000..0140c7c97 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/MassiveOpsTests.cs @@ -0,0 +1,115 @@ +using System.Diagnostics; +using System.Threading; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +[Collection(NonParallelCollection.Name)] +public class MassiveOpsTests(ITestOutputHelper output) : TestBase(output) +{ + [Fact] + public async Task LongRunning() + { + Skip.UnlessLongRunning(); + await using var conn = Create(); + + var key = Me(); + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.StringSet(key, "test value", flags: CommandFlags.FireAndForget); + for (var i = 0; i < 200; i++) + { + var val = await db.StringGetAsync(key).ForAwait(); + Assert.Equal("test value", val); + await Task.Delay(50).ForAwait(); + } + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task MassiveBulkOpsAsync(bool withContinuation) + { + await using var conn = Create(); + + RedisKey key = Me(); + var db = conn.GetDatabase(); + await db.PingAsync().ForAwait(); + static void NonTrivial(Task unused) + { + Thread.SpinWait(5); + } + var watch = Stopwatch.StartNew(); + for (int i = 0; i <= AsyncOpsQty; i++) + { + var t = db.StringSetAsync(key, i); + if (withContinuation) + { + // Intentionally unawaited + _ = t.ContinueWith(NonTrivial); + } + } + Assert.Equal(AsyncOpsQty, await db.StringGetAsync(key).ForAwait()); + watch.Stop(); + Log($"{Me()}: Time for {AsyncOpsQty} ops: {watch.ElapsedMilliseconds}ms ({(withContinuation ? "with continuation" : "no continuation")}, any order); ops/s: {AsyncOpsQty / watch.Elapsed.TotalSeconds}"); + } + + [Theory] + [InlineData(1)] + [InlineData(5)] + [InlineData(10)] + [InlineData(50)] + public async Task MassiveBulkOpsSync(int threads) + { + Skip.UnlessLongRunning(); + await using var conn = Create(syncTimeout: 30000); + + RedisKey key = Me(); + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + int workPerThread = SyncOpsQty / threads; + var timeTaken = RunConcurrent( + () => + { + for (int i = 0; i < workPerThread; i++) + { + db.StringIncrement(key, flags: CommandFlags.FireAndForget); + } + }, + threads); + + int val = (int)db.StringGet(key); + Assert.Equal(workPerThread * threads, val); + Log($"{Me()}: Time for {threads * workPerThread} ops on {threads} threads: {timeTaken.TotalMilliseconds}ms (any order); ops/s: {(workPerThread * threads) / timeTaken.TotalSeconds}"); + } + + [Theory] + [InlineData(1)] + [InlineData(5)] + public async Task MassiveBulkOpsFireAndForget(int threads) + { + await using var conn = Create(syncTimeout: 30000); + + RedisKey key = Me(); + var db = conn.GetDatabase(); + await db.PingAsync(); + + db.KeyDelete(key, CommandFlags.FireAndForget); + int perThread = AsyncOpsQty / threads; + var elapsed = RunConcurrent( + () => + { + for (int i = 0; i < perThread; i++) + { + db.StringIncrement(key, flags: CommandFlags.FireAndForget); + } + db.Ping(); + }, + threads); + var val = (long)db.StringGet(key); + Assert.Equal(perThread * threads, val); + + Log($"{Me()}: Time for {val} ops over {threads} threads: {elapsed.TotalMilliseconds:###,###}ms (any order); ops/s: {val / elapsed.TotalSeconds:###,###,##0}"); + } +} diff --git a/tests/StackExchange.Redis.Tests/Memory.cs b/tests/StackExchange.Redis.Tests/Memory.cs deleted file mode 100644 index ba0d37674..000000000 --- a/tests/StackExchange.Redis.Tests/Memory.cs +++ /dev/null @@ -1,84 +0,0 @@ -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - [Collection(SharedConnectionFixture.Key)] - public class Memory : TestBase - { - public Memory(ITestOutputHelper output, SharedConnectionFixture fixture) : base(output, fixture) { } - - [Fact] - public async Task CanCallDoctor() - { - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Memory), r => r.Streams); - var server = conn.GetServer(conn.GetEndPoints()[0]); - string doctor = server.MemoryDoctor(); - Assert.NotNull(doctor); - Assert.NotEqual("", doctor); - - doctor = await server.MemoryDoctorAsync(); - Assert.NotNull(doctor); - Assert.NotEqual("", doctor); - } - } - - [Fact] - public async Task CanPurge() - { - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Memory), r => r.Streams); - var server = conn.GetServer(conn.GetEndPoints()[0]); - server.MemoryPurge(); - await server.MemoryPurgeAsync(); - - await server.MemoryPurgeAsync(); - } - } - - [Fact] - public async Task GetAllocatorStats() - { - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Memory), r => r.Streams); - var server = conn.GetServer(conn.GetEndPoints()[0]); - - var stats = server.MemoryAllocatorStats(); - Assert.False(string.IsNullOrWhiteSpace(stats)); - - stats = await server.MemoryAllocatorStatsAsync(); - Assert.False(string.IsNullOrWhiteSpace(stats)); - } - } - - [Fact] - public async Task GetStats() - { - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Memory), r => r.Streams); - var server = conn.GetServer(conn.GetEndPoints()[0]); - var stats = server.MemoryStats(); - Assert.Equal(ResultType.MultiBulk, stats.Type); - - var parsed = stats.ToDictionary(); - - var alloc = parsed["total.allocated"]; - Assert.Equal(ResultType.Integer, alloc.Type); - Assert.True(alloc.AsInt64() > 0); - - stats = await server.MemoryStatsAsync(); - Assert.Equal(ResultType.MultiBulk, stats.Type); - - alloc = parsed["total.allocated"]; - Assert.Equal(ResultType.Integer, alloc.Type); - Assert.True(alloc.AsInt64() > 0); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/MemoryTests.cs b/tests/StackExchange.Redis.Tests/MemoryTests.cs new file mode 100644 index 000000000..48d3ea705 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/MemoryTests.cs @@ -0,0 +1,73 @@ +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class MemoryTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + [Fact] + public async Task CanCallDoctor() + { + await using var conn = Create(require: RedisFeatures.v4_0_0); + + var server = conn.GetServer(conn.GetEndPoints()[0]); + string? doctor = server.MemoryDoctor(); + Assert.NotNull(doctor); + Assert.NotEqual("", doctor); + + doctor = await server.MemoryDoctorAsync(); + Assert.NotNull(doctor); + Assert.NotEqual("", doctor); + } + + [Fact] + public async Task CanPurge() + { + await using var conn = Create(require: RedisFeatures.v4_0_0); + + var server = conn.GetServer(conn.GetEndPoints()[0]); + server.MemoryPurge(); + await server.MemoryPurgeAsync(); + + await server.MemoryPurgeAsync(); + } + + [Fact] + public async Task GetAllocatorStats() + { + await using var conn = Create(require: RedisFeatures.v4_0_0); + + var server = conn.GetServer(conn.GetEndPoints()[0]); + + var stats = server.MemoryAllocatorStats(); + Assert.False(string.IsNullOrWhiteSpace(stats)); + + stats = await server.MemoryAllocatorStatsAsync(); + Assert.False(string.IsNullOrWhiteSpace(stats)); + } + + [Fact] + public async Task GetStats() + { + await using var conn = Create(require: RedisFeatures.v4_0_0); + + var server = conn.GetServer(conn.GetEndPoints()[0]); + var stats = server.MemoryStats(); + Assert.NotNull(stats); + Assert.Equal(ResultType.Array, stats.Resp2Type); + + var parsed = stats.ToDictionary(); + + var alloc = parsed["total.allocated"]; + Assert.Equal(ResultType.Integer, alloc.Resp2Type); + Assert.True(alloc.AsInt64() > 0); + + stats = await server.MemoryStatsAsync(); + Assert.NotNull(stats); + Assert.Equal(ResultType.Array, stats.Resp2Type); + + alloc = parsed["total.allocated"]; + Assert.Equal(ResultType.Integer, alloc.Resp2Type); + Assert.True(alloc.AsInt64() > 0); + } +} diff --git a/tests/StackExchange.Redis.Tests/Migrate.cs b/tests/StackExchange.Redis.Tests/Migrate.cs deleted file mode 100644 index 3aa797fc8..000000000 --- a/tests/StackExchange.Redis.Tests/Migrate.cs +++ /dev/null @@ -1,60 +0,0 @@ -#pragma warning disable RCS1090 // Call 'ConfigureAwait(false)'. - -using System; -using System.Linq; -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - public class Migrate : TestBase - { - public Migrate(ITestOutputHelper output) : base (output) { } - - [Fact] - public async Task Basic() - { - var fromConfig = new ConfigurationOptions { EndPoints = { { TestConfig.Current.SecureServer, TestConfig.Current.SecurePort } }, Password = TestConfig.Current.SecurePassword, AllowAdmin = true }; - var toConfig = new ConfigurationOptions { EndPoints = { { TestConfig.Current.MasterServer, TestConfig.Current.MasterPort } }, AllowAdmin = true }; - using (var from = ConnectionMultiplexer.Connect(fromConfig, Writer)) - using (var to = ConnectionMultiplexer.Connect(toConfig, Writer)) - { - if (await IsWindows(from) || await IsWindows(to)) - Skip.Inconclusive("'migrate' is unreliable on redis-64"); - - RedisKey key = Me(); - var fromDb = from.GetDatabase(); - var toDb = to.GetDatabase(); - fromDb.KeyDelete(key, CommandFlags.FireAndForget); - toDb.KeyDelete(key, CommandFlags.FireAndForget); - fromDb.StringSet(key, "foo", flags: CommandFlags.FireAndForget); - var dest = to.GetEndPoints(true).Single(); - Log("Migrating key..."); - fromDb.KeyMigrate(key, dest, migrateOptions: MigrateOptions.Replace); - Log("Migration command complete"); - - // this is *meant* to be synchronous at the redis level, but - // we keep seeing it fail on the CI server where the key has *left* the origin, but - // has *not* yet arrived at the destination; adding a pause while we investigate with - // the redis folks - await UntilCondition(TimeSpan.FromSeconds(15), () => !fromDb.KeyExists(key) && toDb.KeyExists(key)); - - Assert.False(fromDb.KeyExists(key), "Exists at source"); - Assert.True(toDb.KeyExists(key), "Exists at destination"); - string s = toDb.StringGet(key); - Assert.Equal("foo", s); - } - } - - private async Task IsWindows(ConnectionMultiplexer conn) - { - var server = conn.GetServer(conn.GetEndPoints().First()); - var section = (await server.InfoAsync("server")).Single(); - var os = section.FirstOrDefault( - x => string.Equals("os", x.Key, StringComparison.OrdinalIgnoreCase)); - // note: WSL returns things like "os:Linux 4.4.0-17134-Microsoft x86_64" - return string.Equals("windows", os.Value, StringComparison.OrdinalIgnoreCase); - } - } -} diff --git a/tests/StackExchange.Redis.Tests/MigrateTests.cs b/tests/StackExchange.Redis.Tests/MigrateTests.cs new file mode 100644 index 000000000..9939e0632 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/MigrateTests.cs @@ -0,0 +1,55 @@ +using System; +using System.Linq; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class MigrateTests(ITestOutputHelper output) : TestBase(output) +{ + [Fact] + public async Task Basic() + { + Skip.UnlessLongRunning(); + var fromConfig = new ConfigurationOptions { EndPoints = { { TestConfig.Current.SecureServer, TestConfig.Current.SecurePort } }, Password = TestConfig.Current.SecurePassword, AllowAdmin = true }; + var toConfig = new ConfigurationOptions { EndPoints = { { TestConfig.Current.PrimaryServer, TestConfig.Current.PrimaryPort } }, AllowAdmin = true }; + + await using var fromConn = ConnectionMultiplexer.Connect(fromConfig, Writer); + await using var toConn = ConnectionMultiplexer.Connect(toConfig, Writer); + + if (await IsWindows(fromConn) || await IsWindows(toConn)) + Assert.Skip("'migrate' is unreliable on redis-64"); + + RedisKey key = Me(); + var fromDb = fromConn.GetDatabase(); + var toDb = toConn.GetDatabase(); + fromDb.KeyDelete(key, CommandFlags.FireAndForget); + toDb.KeyDelete(key, CommandFlags.FireAndForget); + fromDb.StringSet(key, "foo", flags: CommandFlags.FireAndForget); + var dest = toConn.GetEndPoints(true).Single(); + Log("Migrating key..."); + fromDb.KeyMigrate(key, dest, migrateOptions: MigrateOptions.Replace); + Log("Migration command complete"); + + // this is *meant* to be synchronous at the redis level, but + // we keep seeing it fail on the CI server where the key has *left* the origin, but + // has *not* yet arrived at the destination; adding a pause while we investigate with + // the redis folks + await UntilConditionAsync(TimeSpan.FromSeconds(15), () => !fromDb.KeyExists(key) && toDb.KeyExists(key)); + + Assert.False(fromDb.KeyExists(key), "Exists at source"); + Assert.True(toDb.KeyExists(key), "Exists at destination"); + string? s = toDb.StringGet(key); + Assert.Equal("foo", s); + } + + private static async Task IsWindows(ConnectionMultiplexer conn) + { + var server = conn.GetServer(conn.GetEndPoints().First()); + var section = (await server.InfoAsync("server")).Single(); + var os = section.FirstOrDefault( + x => string.Equals("os", x.Key, StringComparison.OrdinalIgnoreCase)); + // note: WSL returns things like "os:Linux 4.4.0-17134-Microsoft x86_64" + return string.Equals("windows", os.Value, StringComparison.OrdinalIgnoreCase); + } +} diff --git a/tests/StackExchange.Redis.Tests/MovedTestServer.cs b/tests/StackExchange.Redis.Tests/MovedTestServer.cs new file mode 100644 index 000000000..89a8567d0 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/MovedTestServer.cs @@ -0,0 +1,119 @@ +using System; +using System.Collections.Concurrent; +using System.Collections.Generic; +using System.Net; +using System.Net.Sockets; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using StackExchange.Redis.Server; +using Xunit; + +namespace StackExchange.Redis.Tests; + +/// +/// Test Redis/Valkey server that simulates MOVED errors pointing to the same endpoint. +/// Used to verify client reconnection behavior when the server is behind DNS/load balancers/proxies. +/// When a MOVED error points to the same endpoint, it signals the client to reconnect before retrying the command, +/// allowing the DNS record/proxy/load balancer to route the connection to a different underlying server host. +/// +public class MovedTestServer : InProcessTestServer +{ + /// + /// Represents the simulated server host state behind a proxy/load balancer. + /// + private enum SimulatedHost + { + /// + /// Old server that returns MOVED errors for the trigger key (pre-migration state). + /// + OldServer, + + /// + /// New server that handles requests normally (post-migration state). + /// + NewServer, + } + + private int _setCmdCount = 0; + private int _movedResponseCount = 0; + + private SimulatedHost _currentServerHost = SimulatedHost.OldServer; + + private readonly RedisKey _triggerKey; + + public MovedTestServer(in RedisKey triggerKey, ITestOutputHelper? log = null) : base(log) + { + _triggerKey = triggerKey; + } + + private sealed class MovedTestClient(MovedTestServer server, Node node, SimulatedHost assignedHost) : RedisClient(node) + { + public SimulatedHost AssignedHost => assignedHost; + + public override void OnKey(in RedisKey key, KeyFlags flags) + { + if (AssignedHost == SimulatedHost.OldServer && key == server._triggerKey) + { + server.OnTrigger(Id, key, assignedHost); + } + base.OnKey(in key, flags); + } + } + + /// + /// Called when a new client connection is established. + /// Assigns the client to the current server host state (simulating proxy/load balancer routing). + /// + public override RedisClient CreateClient(Node node) => new MovedTestClient(this, node, _currentServerHost); + + public override void OnClientConnected(RedisClient client, object state) + { + if (client is MovedTestClient movedClient) + { + Log($"[{client}] connected (assigned to {movedClient.AssignedHost}), total connections: {TotalClientCount}"); + } + base.OnClientConnected(client, state); + } + + /// + /// Handles SET commands. Returns MOVED error for the trigger key when requested by clients + /// connected to the old server, simulating a server migration behind a proxy/load balancer. + /// + protected override TypedRedisValue Set(RedisClient client, in RedisRequest request) + { + Interlocked.Increment(ref _setCmdCount); + return base.Set(client, request); + } + + private void OnTrigger(int clientId, in RedisKey key, SimulatedHost assignedHost) + { + // Transition server to new host (so future connections know they're on the new server) + _currentServerHost = SimulatedHost.NewServer; + + Interlocked.Increment(ref _movedResponseCount); + + Log($"Triggering MOVED on Client {clientId} ({assignedHost}) with key: {key}"); + KeyMovedException.Throw(key); + } + + /// + /// Gets the number of SET commands executed. + /// + public int SetCmdCount => _setCmdCount; + + /// + /// Gets the number of times MOVED response was returned. + /// + public int MovedResponseCount => _movedResponseCount; + + /// + /// Resets all counters for test reusability. + /// + public override void ResetCounters() + { + Interlocked.Exchange(ref _setCmdCount, 0); + Interlocked.Exchange(ref _movedResponseCount, 0); + base.ResetCounters(); + } +} diff --git a/tests/StackExchange.Redis.Tests/MovedUnitTests.cs b/tests/StackExchange.Redis.Tests/MovedUnitTests.cs new file mode 100644 index 000000000..5618adf27 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/MovedUnitTests.cs @@ -0,0 +1,157 @@ +using System.Collections.Generic; +using System.Runtime.CompilerServices; +using System.Threading.Tasks; +using StackExchange.Redis.Configuration; +using Xunit; + +namespace StackExchange.Redis.Tests; + +/// +/// Integration tests for MOVED-to-same-endpoint error handling. +/// When a MOVED error points to the same endpoint, the client should reconnect before retrying, +/// allowing the DNS record/proxy/load balancer to route to a different underlying server host. +/// +[RunPerProtocol] +public class MovedUnitTests(ITestOutputHelper log) +{ + private RedisKey Me([CallerMemberName] string callerName = "") => callerName; + + [Theory] + [InlineData(ServerType.Cluster)] + [InlineData(ServerType.Standalone)] + public async Task CrossSlotDisallowed(ServerType serverType) + { + // intentionally sending as strings (not keys) via execute to prevent the + // client library from getting in our way + string keyA = "abc", keyB = "def"; // known to be on different slots + + using var server = new InProcessTestServer(log) { ServerType = serverType }; + await using var muxer = await server.ConnectAsync(); + + var db = muxer.GetDatabase(); + await db.StringSetAsync(keyA, "value", flags: CommandFlags.FireAndForget); + + var pending = db.ExecuteAsync("rename", keyA, keyB); + if (serverType == ServerType.Cluster) + { + var ex = await Assert.ThrowsAsync(() => pending); + Assert.Contains("CROSSSLOT", ex.Message); + + Assert.Equal("value", await db.StringGetAsync(keyA)); + Assert.False(await db.KeyExistsAsync(keyB)); + } + else + { + await pending; + Assert.False(await db.KeyExistsAsync(keyA)); + Assert.Equal("value", await db.StringGetAsync(keyB)); + } + } + + [Theory] + [InlineData(true, false)] + [InlineData(false, false)] + [InlineData(true, true)] + [InlineData(false, true)] + public async Task KeyMigrationFollowed(bool allowFollowRedirects, bool toNewUnknownNode) + { + RedisKey key = Me(); + using var server = new InProcessTestServer(log) { ServerType = ServerType.Cluster }; + // depending on the test, we might not want the client to know about the second node yet + var secondNode = toNewUnknownNode ? null : server.AddEmptyNode(); + + await using var muxer = await server.ConnectAsync(); + var db = muxer.GetDatabase(); + + await db.StringSetAsync(key, "value"); + var value = await db.StringGetAsync(key); + Assert.Equal("value", (string?)value); + + if (toNewUnknownNode) // if deferred, the client doesn't know about this yet + { + secondNode = server.AddEmptyNode(); + } + + server.Migrate(key, secondNode); + + if (allowFollowRedirects) + { + value = await db.StringGetAsync(key, flags: CommandFlags.None); + Assert.Equal("value", (string?)value); + } + else + { + var ex = await Assert.ThrowsAsync(() => db.StringGetAsync(key, flags: CommandFlags.NoRedirect)); + Assert.Contains("MOVED", ex.Message); + } + } + + /// + /// Integration test: Verifies that when a MOVED error points to the same endpoint, + /// the client reconnects and successfully retries the operation. + /// + /// Test scenario: + /// 1. Client connects to test server + /// 2. Client sends SET command for trigger key + /// 3. Server returns MOVED error pointing to same endpoint + /// 4. Client detects MOVED-to-same-endpoint and triggers reconnection + /// 5. Client retries SET command after reconnection + /// 6. Server processes SET normally on retry + /// + /// Expected behavior: + /// - SET command count should increase by 2 (initial attempt + retry) + /// - MOVED response count should increase by 1 (only on first attempt) + /// - Connection count should increase by 1 (reconnection after MOVED) + /// - Final SET operation should succeed with value stored. + /// + [Theory] + [InlineData(ServerType.Cluster)] + [InlineData(ServerType.Standalone)] + public async Task MovedToSameEndpoint_TriggersReconnectAndRetry_CommandSucceeds(ServerType serverType) + { + RedisKey key = Me(); + + using var testServer = new MovedTestServer( + triggerKey: key, + log: log) { ServerType = serverType, }; + + // Act: Connect to the test server + await using var conn = await testServer.ConnectAsync(); + // Ping the server to ensure it's responsive + var server = conn.GetServer(testServer.DefaultEndPoint); + + var id = await server.ExecuteAsync("client", "id"); + log?.WriteLine($"Client id before: {id}"); + + await server.PingAsync(); // init everything + // Verify server is detected as per test config + Assert.Equal(serverType, server.ServerType); + var db = conn.GetDatabase(); + + // Record baseline counters after initial connection + Assert.Equal(0, testServer.SetCmdCount); + Assert.Equal(0, testServer.MovedResponseCount); + var initialConnectionCount = testServer.TotalClientCount; + + // Execute SET command: This should receive MOVED → reconnect → retry → succeed + var setResult = await db.StringSetAsync(key, "testvalue"); + + // Assert: Verify SET command succeeded + Assert.True(setResult, "SET command should return true (OK)"); + + // Verify the value was actually stored (proving retry succeeded) + var retrievedValue = await db.StringGetAsync(key); + Assert.Equal("testvalue", (string?)retrievedValue); + + // Verify SET command was executed twice: once with MOVED response, once successfully + Assert.Equal(2, testServer.SetCmdCount); + + // Verify MOVED response was returned exactly once + Assert.Equal(1, testServer.MovedResponseCount); + + // Verify reconnection occurred: connection count should have increased by 1 + Assert.Equal(initialConnectionCount + 1, testServer.TotalClientCount); + id = await server.ExecuteAsync("client", "id"); + log?.WriteLine($"Client id after: {id}"); + } +} diff --git a/tests/StackExchange.Redis.Tests/MultiAdd.cs b/tests/StackExchange.Redis.Tests/MultiAdd.cs deleted file mode 100644 index 7bf48ec51..000000000 --- a/tests/StackExchange.Redis.Tests/MultiAdd.cs +++ /dev/null @@ -1,118 +0,0 @@ -using System.Linq; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - [Collection(SharedConnectionFixture.Key)] - public class MultiAdd : TestBase - { - public MultiAdd(ITestOutputHelper output, SharedConnectionFixture fixture) : base(output, fixture) { } - - [Fact] - public void AddSortedSetEveryWay() - { - using (var conn = Create()) - { - var db = conn.GetDatabase(); - - RedisKey key = Me(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.SortedSetAdd(key, "a", 1, CommandFlags.FireAndForget); - db.SortedSetAdd(key, new[] { - new SortedSetEntry("b", 2) }, CommandFlags.FireAndForget); - db.SortedSetAdd(key, new[] { - new SortedSetEntry("c", 3), - new SortedSetEntry("d", 4)}, CommandFlags.FireAndForget); - db.SortedSetAdd(key, new[] { - new SortedSetEntry("e", 5), - new SortedSetEntry("f", 6), - new SortedSetEntry("g", 7)}, CommandFlags.FireAndForget); - db.SortedSetAdd(key, new[] { - new SortedSetEntry("h", 8), - new SortedSetEntry("i", 9), - new SortedSetEntry("j", 10), - new SortedSetEntry("k", 11)}, CommandFlags.FireAndForget); - var vals = db.SortedSetRangeByScoreWithScores(key); - string s = string.Join(",", vals.OrderByDescending(x => x.Score).Select(x => x.Element)); - Assert.Equal("k,j,i,h,g,f,e,d,c,b,a", s); - s = string.Join(",", vals.OrderBy(x => x.Score).Select(x => x.Score)); - Assert.Equal("1,2,3,4,5,6,7,8,9,10,11", s); - } - } - - [Fact] - public void AddHashEveryWay() - { - using (var conn = Create()) - { - var db = conn.GetDatabase(); - - RedisKey key = Me(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.HashSet(key, "a", 1, flags: CommandFlags.FireAndForget); - db.HashSet(key, new[] { - new HashEntry("b", 2) }, CommandFlags.FireAndForget); - db.HashSet(key, new[] { - new HashEntry("c", 3), - new HashEntry("d", 4)}, CommandFlags.FireAndForget); - db.HashSet(key, new[] { - new HashEntry("e", 5), - new HashEntry("f", 6), - new HashEntry("g", 7)}, CommandFlags.FireAndForget); - db.HashSet(key, new[] { - new HashEntry("h", 8), - new HashEntry("i", 9), - new HashEntry("j", 10), - new HashEntry("k", 11)}, CommandFlags.FireAndForget); - var vals = db.HashGetAll(key); - string s = string.Join(",", vals.OrderByDescending(x => (double)x.Value).Select(x => x.Name)); - Assert.Equal("k,j,i,h,g,f,e,d,c,b,a", s); - s = string.Join(",", vals.OrderBy(x => (double)x.Value).Select(x => x.Value)); - Assert.Equal("1,2,3,4,5,6,7,8,9,10,11", s); - } - } - - [Fact] - public void AddSetEveryWay() - { - using (var conn = Create()) - { - var db = conn.GetDatabase(); - - RedisKey key = Me(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.SetAdd(key, "a", CommandFlags.FireAndForget); - db.SetAdd(key, new RedisValue[] { "b" }, CommandFlags.FireAndForget); - db.SetAdd(key, new RedisValue[] { "c", "d" }, CommandFlags.FireAndForget); - db.SetAdd(key, new RedisValue[] { "e", "f", "g" }, CommandFlags.FireAndForget); - db.SetAdd(key, new RedisValue[] { "h", "i", "j", "k" }, CommandFlags.FireAndForget); - - var vals = db.SetMembers(key); - string s = string.Join(",", vals.OrderByDescending(x => x)); - Assert.Equal("k,j,i,h,g,f,e,d,c,b,a", s); - } - } - - [Fact] - public void AddSetEveryWayNumbers() - { - using (var conn = Create()) - { - var db = conn.GetDatabase(); - - RedisKey key = Me(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.SetAdd(key, "a", CommandFlags.FireAndForget); - db.SetAdd(key, new RedisValue[] { "1" }, CommandFlags.FireAndForget); - db.SetAdd(key, new RedisValue[] { "11", "2" }, CommandFlags.FireAndForget); - db.SetAdd(key, new RedisValue[] { "10", "3", "1.5" }, CommandFlags.FireAndForget); - db.SetAdd(key, new RedisValue[] { "2.2", "-1", "s", "t" }, CommandFlags.FireAndForget); - - var vals = db.SetMembers(key); - string s = string.Join(",", vals.OrderByDescending(x => x)); - Assert.Equal("t,s,a,11,10,3,2.2,2,1.5,1,-1", s); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/MultiAddTests.cs b/tests/StackExchange.Redis.Tests/MultiAddTests.cs new file mode 100644 index 000000000..f5fb66335 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/MultiAddTests.cs @@ -0,0 +1,141 @@ +using System.Linq; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class MultiAddTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + [Fact] + public async Task AddSortedSetEveryWay() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + RedisKey key = Me(); + + db.KeyDelete(key, CommandFlags.FireAndForget); + db.SortedSetAdd(key, "a", 1, CommandFlags.FireAndForget); + db.SortedSetAdd( + key, + [ + new SortedSetEntry("b", 2), + ], + CommandFlags.FireAndForget); + db.SortedSetAdd( + key, + [ + new SortedSetEntry("c", 3), + new SortedSetEntry("d", 4), + ], + CommandFlags.FireAndForget); + db.SortedSetAdd( + key, + [ + new SortedSetEntry("e", 5), + new SortedSetEntry("f", 6), + new SortedSetEntry("g", 7), + ], + CommandFlags.FireAndForget); + db.SortedSetAdd( + key, + [ + new SortedSetEntry("h", 8), + new SortedSetEntry("i", 9), + new SortedSetEntry("j", 10), + new SortedSetEntry("k", 11), + ], + CommandFlags.FireAndForget); + var vals = db.SortedSetRangeByScoreWithScores(key); + string s = string.Join(",", vals.OrderByDescending(x => x.Score).Select(x => x.Element)); + Assert.Equal("k,j,i,h,g,f,e,d,c,b,a", s); + s = string.Join(",", vals.OrderBy(x => x.Score).Select(x => x.Score)); + Assert.Equal("1,2,3,4,5,6,7,8,9,10,11", s); + } + + [Fact] + public async Task AddHashEveryWay() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + RedisKey key = Me(); + + db.KeyDelete(key, CommandFlags.FireAndForget); + db.HashSet(key, "a", 1, flags: CommandFlags.FireAndForget); + db.HashSet( + key, + [ + new HashEntry("b", 2), + ], + CommandFlags.FireAndForget); + db.HashSet( + key, + [ + new HashEntry("c", 3), + new HashEntry("d", 4), + ], + CommandFlags.FireAndForget); + db.HashSet( + key, + [ + new HashEntry("e", 5), + new HashEntry("f", 6), + new HashEntry("g", 7), + ], + CommandFlags.FireAndForget); + db.HashSet( + key, + [ + new HashEntry("h", 8), + new HashEntry("i", 9), + new HashEntry("j", 10), + new HashEntry("k", 11), + ], + CommandFlags.FireAndForget); + var vals = db.HashGetAll(key); + string s = string.Join(",", vals.OrderByDescending(x => (double)x.Value).Select(x => x.Name)); + Assert.Equal("k,j,i,h,g,f,e,d,c,b,a", s); + s = string.Join(",", vals.OrderBy(x => (double)x.Value).Select(x => x.Value)); + Assert.Equal("1,2,3,4,5,6,7,8,9,10,11", s); + } + + [Fact] + public async Task AddSetEveryWay() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + RedisKey key = Me(); + + db.KeyDelete(key, CommandFlags.FireAndForget); + db.SetAdd(key, "a", CommandFlags.FireAndForget); + db.SetAdd(key, ["b"], CommandFlags.FireAndForget); + db.SetAdd(key, ["c", "d"], CommandFlags.FireAndForget); + db.SetAdd(key, ["e", "f", "g"], CommandFlags.FireAndForget); + db.SetAdd(key, ["h", "i", "j", "k"], CommandFlags.FireAndForget); + + var vals = db.SetMembers(key); + string s = string.Join(",", vals.OrderByDescending(x => x)); + Assert.Equal("k,j,i,h,g,f,e,d,c,b,a", s); + } + + [Fact] + public async Task AddSetEveryWayNumbers() + { + await using var conn = Create(); + var db = conn.GetDatabase(); + RedisKey key = Me(); + + db.KeyDelete(key, CommandFlags.FireAndForget); + db.SetAdd(key, "a", CommandFlags.FireAndForget); + db.SetAdd(key, ["1"], CommandFlags.FireAndForget); + db.SetAdd(key, ["11", "2"], CommandFlags.FireAndForget); + db.SetAdd(key, ["10", "3", "1.5"], CommandFlags.FireAndForget); + db.SetAdd(key, ["2.2", "-1", "s", "t"], CommandFlags.FireAndForget); + + var vals = db.SetMembers(key); + string s = string.Join(",", vals.OrderByDescending(x => x)); + Assert.Equal("t,s,a,11,10,3,2.2,2,1.5,1,-1", s); + } +} diff --git a/tests/StackExchange.Redis.Tests/MultiMaster.cs b/tests/StackExchange.Redis.Tests/MultiMaster.cs deleted file mode 100644 index 12823f054..000000000 --- a/tests/StackExchange.Redis.Tests/MultiMaster.cs +++ /dev/null @@ -1,97 +0,0 @@ -using System.Collections.Generic; -using System.IO; -using System.Linq; -using System.Text; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - public class MultiMaster : TestBase - { - protected override string GetConfiguration() => - TestConfig.Current.MasterServerAndPort + "," + TestConfig.Current.SecureServerAndPort + ",password=" + TestConfig.Current.SecurePassword; - public MultiMaster(ITestOutputHelper output) : base (output) { } - - [Fact] - public void CannotFlushReplica() - { - var ex = Assert.Throws(() => - { - using (var conn = ConnectionMultiplexer.Connect(TestConfig.Current.ReplicaServerAndPort + ",allowAdmin=true")) - { - var servers = conn.GetEndPoints().Select(e => conn.GetServer(e)); - var replica = servers.FirstOrDefault(x => x.IsReplica); - Assert.NotNull(replica); // replica not found, ruh roh - replica.FlushDatabase(); - } - }); - Assert.Equal("Command cannot be issued to a replica: FLUSHDB", ex.Message); - } - - [Fact] - public void TestMultiNoTieBreak() - { - using (var log = new StringWriter()) - using (Create(log: log, tieBreaker: "")) - { - Log(log.ToString()); - Assert.Contains("Choosing master arbitrarily", log.ToString()); - } - } - - public static IEnumerable GetConnections() - { - yield return new object[] { TestConfig.Current.MasterServerAndPort, TestConfig.Current.MasterServerAndPort, TestConfig.Current.MasterServerAndPort }; - yield return new object[] { TestConfig.Current.SecureServerAndPort, TestConfig.Current.SecureServerAndPort, TestConfig.Current.SecureServerAndPort }; - yield return new object[] { TestConfig.Current.SecureServerAndPort, TestConfig.Current.MasterServerAndPort, null }; - yield return new object[] { TestConfig.Current.MasterServerAndPort, TestConfig.Current.SecureServerAndPort, null }; - - yield return new object[] { null, TestConfig.Current.MasterServerAndPort, TestConfig.Current.MasterServerAndPort }; - yield return new object[] { TestConfig.Current.MasterServerAndPort, null, TestConfig.Current.MasterServerAndPort }; - yield return new object[] { null, TestConfig.Current.SecureServerAndPort, TestConfig.Current.SecureServerAndPort }; - yield return new object[] { TestConfig.Current.SecureServerAndPort, null, TestConfig.Current.SecureServerAndPort }; - yield return new object[] { null, null, null }; - } - - [Theory, MemberData(nameof(GetConnections))] - public void TestMultiWithTiebreak(string a, string b, string elected) - { - const string TieBreak = "__tie__"; - // set the tie-breakers to the expected state - using (var aConn = ConnectionMultiplexer.Connect(TestConfig.Current.MasterServerAndPort)) - { - aConn.GetDatabase().StringSet(TieBreak, a); - } - using (var aConn = ConnectionMultiplexer.Connect(TestConfig.Current.SecureServerAndPort + ",password=" + TestConfig.Current.SecurePassword)) - { - aConn.GetDatabase().StringSet(TieBreak, b); - } - - // see what happens - var log = new StringBuilder(); - Writer.EchoTo(log); - - using (Create(log: Writer, tieBreaker: TieBreak)) - { - string text = log.ToString(); - Assert.False(text.Contains("failed to nominate"), "failed to nominate"); - if (elected != null) - { - Assert.True(text.Contains("Elected: " + elected), "elected"); - } - int nullCount = (a == null ? 1 : 0) + (b == null ? 1 : 0); - if ((a == b && nullCount == 0) || nullCount == 1) - { - Assert.True(text.Contains("Election: Tie-breaker unanimous"), "unanimous"); - Assert.False(text.Contains("Election: Choosing master arbitrarily"), "arbitrarily"); - } - else - { - Assert.False(text.Contains("Election: Tie-breaker unanimous"), "unanimous"); - Assert.True(text.Contains("Election: Choosing master arbitrarily"), "arbitrarily"); - } - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/MultiPrimaryTests.cs b/tests/StackExchange.Redis.Tests/MultiPrimaryTests.cs new file mode 100644 index 000000000..3d88e097c --- /dev/null +++ b/tests/StackExchange.Redis.Tests/MultiPrimaryTests.cs @@ -0,0 +1,93 @@ +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class MultiPrimaryTests(ITestOutputHelper output) : TestBase(output) +{ + protected override string GetConfiguration() => + TestConfig.Current.PrimaryServerAndPort + "," + TestConfig.Current.SecureServerAndPort + ",password=" + TestConfig.Current.SecurePassword; + + [Fact] + public async Task CannotFlushReplica() + { + var ex = await Assert.ThrowsAsync(async () => + { + await using var conn = await ConnectionMultiplexer.ConnectAsync(TestConfig.Current.ReplicaServerAndPort + ",allowAdmin=true"); + + var servers = conn.GetEndPoints().Select(e => conn.GetServer(e)); + var replica = servers.FirstOrDefault(x => x.IsReplica); + Assert.NotNull(replica); // replica not found, ruh roh + replica.FlushDatabase(); + }); + Assert.Equal("Command cannot be issued to a replica: FLUSHDB", ex.Message); + } + + [Fact] + public void TestMultiNoTieBreak() + { + var log = new StringBuilder(); + Writer.EchoTo(log); + using (Create(log: Writer, tieBreaker: "")) + { + Assert.Contains("Choosing primary arbitrarily", log.ToString()); + } + } + + public static IEnumerable GetConnections() + { + yield return new object[] { TestConfig.Current.PrimaryServerAndPort, TestConfig.Current.PrimaryServerAndPort, TestConfig.Current.PrimaryServerAndPort }; + yield return new object[] { TestConfig.Current.SecureServerAndPort, TestConfig.Current.SecureServerAndPort, TestConfig.Current.SecureServerAndPort }; + yield return new object?[] { TestConfig.Current.SecureServerAndPort, TestConfig.Current.PrimaryServerAndPort, null }; + yield return new object?[] { TestConfig.Current.PrimaryServerAndPort, TestConfig.Current.SecureServerAndPort, null }; + + yield return new object?[] { null, TestConfig.Current.PrimaryServerAndPort, null }; + yield return new object?[] { TestConfig.Current.PrimaryServerAndPort, null, TestConfig.Current.PrimaryServerAndPort }; + yield return new object?[] { null, TestConfig.Current.SecureServerAndPort, TestConfig.Current.SecureServerAndPort }; + yield return new object?[] { TestConfig.Current.SecureServerAndPort, null, TestConfig.Current.SecureServerAndPort }; + yield return new object?[] { null, null, null }; + } + + [Theory, MemberData(nameof(GetConnections))] + public void TestMultiWithTiebreak(string a, string b, string elected) + { + const string TieBreak = "__tie__"; + // set the tie-breakers to the expected state + using (var aConn = ConnectionMultiplexer.Connect(TestConfig.Current.PrimaryServerAndPort)) + { + aConn.GetDatabase().StringSet(TieBreak, a); + } + using (var aConn = ConnectionMultiplexer.Connect(TestConfig.Current.SecureServerAndPort + ",password=" + TestConfig.Current.SecurePassword)) + { + aConn.GetDatabase().StringSet(TieBreak, b); + } + + // see what happens + var log = new StringBuilder(); + Writer.EchoTo(log); + + using (Create(log: Writer, tieBreaker: TieBreak)) + { + string text = log.ToString(); + Assert.False(text.Contains("failed to nominate"), "failed to nominate"); + if (elected != null) + { + Assert.True(text.Contains("Elected: " + elected), "elected"); + } + int nullCount = (a == null ? 1 : 0) + (b == null ? 1 : 0); + if ((a == b && nullCount == 0) || nullCount == 1) + { + Assert.True(text.Contains("Election: Tie-breaker unanimous"), "unanimous"); + Assert.False(text.Contains("Election: Choosing primary arbitrarily"), "arbitrarily"); + } + else + { + Assert.False(text.Contains("Election: Tie-breaker unanimous"), "unanimous"); + Assert.True(text.Contains("Election: Choosing primary arbitrarily"), "arbitrarily"); + } + } + } +} diff --git a/tests/StackExchange.Redis.Tests/Naming.cs b/tests/StackExchange.Redis.Tests/Naming.cs deleted file mode 100644 index ca9071f09..000000000 --- a/tests/StackExchange.Redis.Tests/Naming.cs +++ /dev/null @@ -1,246 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Linq; -using System.Reflection; -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - public class Naming : TestBase - { - public Naming(ITestOutputHelper output) : base(output) { } - - [Theory] - [InlineData(typeof(IDatabase), false)] - [InlineData(typeof(IDatabaseAsync), true)] - [InlineData(typeof(Condition), false)] - public void CheckSignatures(Type type, bool isAsync) - { - // check that all methods and interfaces look appropriate for their sync/async nature - CheckName(type, isAsync); - var members = type.GetMethods(BindingFlags.Public | BindingFlags.Instance | BindingFlags.Static | BindingFlags.DeclaredOnly); - foreach (var member in members) - { - if (member.Name.StartsWith("get_") || member.Name.StartsWith("set_") || member.Name.StartsWith("add_") || member.Name.StartsWith("remove_")) continue; - CheckMethod(member, isAsync); - } - } - - [Fact] - public void ShowReadOnlyOperations() - { - var msg = typeof(ConnectionMultiplexer).Assembly.GetType("StackExchange.Redis.Message"); - Assert.NotNull(msg); - var cmd = typeof(ConnectionMultiplexer).Assembly.GetType("StackExchange.Redis.RedisCommand"); - Assert.NotNull(cmd); - var masterOnlyMethod = msg.GetMethod(nameof(Message.IsMasterOnly), BindingFlags.Static | BindingFlags.NonPublic | BindingFlags.Public); - Assert.NotNull(masterOnlyMethod); - object[] args = new object[1]; - - List masterReplica = new List(); - List masterOnly = new List(); - foreach (var val in Enum.GetValues(cmd)) - { - args[0] = val; - bool isMasterOnly = (bool)masterOnlyMethod.Invoke(null, args); - (isMasterOnly ? masterOnly : masterReplica).Add(val); - - if (!isMasterOnly) - { - Log(val?.ToString()); - } - } - Log("master-only: {0}, vs master/replica: {1}", masterOnly.Count, masterReplica.Count); - Log(""); - Log("master-only:"); - foreach (var val in masterOnly) - { - Log(val?.ToString()); - } - Log(""); - Log("master/replica:"); - foreach (var val in masterReplica) - { - Log(val?.ToString()); - } - } - - [Theory] - [InlineData(typeof(IDatabase))] - [InlineData(typeof(IDatabaseAsync))] - public void CheckDatabaseMethodsUseKeys(Type type) - { - foreach (var method in type.GetMethods(BindingFlags.Public | BindingFlags.Instance | BindingFlags.DeclaredOnly)) - { - if (IgnoreMethodConventions(method)) continue; - - switch (method.Name) - { - case nameof(IDatabase.KeyRandom): - case nameof(IDatabaseAsync.KeyRandomAsync): - case nameof(IDatabase.Publish): - case nameof(IDatabaseAsync.PublishAsync): - case nameof(IDatabase.Execute): - case nameof(IDatabaseAsync.ExecuteAsync): - case nameof(IDatabase.ScriptEvaluate): - case nameof(IDatabaseAsync.ScriptEvaluateAsync): - case nameof(IDatabase.StreamRead): - case nameof(IDatabase.StreamReadAsync): - case nameof(IDatabase.StreamReadGroup): - case nameof(IDatabase.StreamReadGroupAsync): - continue; // they're fine, but don't want to widen check to return type - } - - bool usesKey = method.GetParameters().Any(p => UsesKey(p.ParameterType)); - Assert.True(usesKey, type.Name + ":" + method.Name); - } - } - - private static bool UsesKey(Type type) - { - if (type == typeof(RedisKey)) return true; - - if (type.IsArray) - { - if (UsesKey(type.GetElementType())) return true; - } - if (type.IsGenericType) // KVP, etc - { - var args = type.GetGenericArguments(); - if (args.Any(UsesKey)) return true; - } - return false; - } - - private static bool IgnoreMethodConventions(MethodInfo method) - { - string name = method.Name; - if (name.StartsWith("get_") || name.StartsWith("set_") || name.StartsWith("add_") || name.StartsWith("remove_")) return true; - switch (name) - { - case nameof(IDatabase.CreateBatch): - case nameof(IDatabase.CreateTransaction): - case nameof(IDatabase.Execute): - case nameof(IDatabaseAsync.ExecuteAsync): - case nameof(IDatabase.IsConnected): - case nameof(IDatabase.SetScan): - case nameof(IDatabase.SortedSetScan): - case nameof(IDatabase.HashScan): - case nameof(ISubscriber.SubscribedEndpoint): - return true; - } - return false; - } - - [Theory] - [InlineData(typeof(IDatabase), typeof(IDatabaseAsync))] - [InlineData(typeof(IDatabaseAsync), typeof(IDatabase))] - public void CheckSyncAsyncMethodsMatch(Type from, Type to) - { - const BindingFlags flags = BindingFlags.Public | BindingFlags.Instance | BindingFlags.DeclaredOnly; - int count = 0; - foreach (var method in from.GetMethods(flags)) - { - if (IgnoreMethodConventions(method)) continue; - - string name = method.Name, huntName; - - if (name.EndsWith("Async")) huntName = name.Substring(0, name.Length - 5); - else huntName = name + "Async"; - var pFrom = method.GetParameters(); - Type[] args = pFrom.Select(x => x.ParameterType).ToArray(); - Log("Checking: {0}.{1}", from.Name, method.Name); - Assert.Equal(typeof(CommandFlags), args.Last()); - var found = to.GetMethod(huntName, flags, null, method.CallingConvention, args, null); - Assert.NotNull(found); // "Found " + name + ", no " + huntName - var pTo = found.GetParameters(); - - for (int i = 0; i < pFrom.Length; i++) - { - Assert.Equal(pFrom[i].Name, pTo[i].Name); // method.Name + ":" + pFrom[i].Name - Assert.Equal(pFrom[i].ParameterType, pTo[i].ParameterType); // method.Name + ":" + pFrom[i].Name - } - - count++; - } - Log("Validated: {0} ({1} methods)", from.Name, count); - } - - private void CheckMethod(MethodInfo method, bool isAsync) - { - string shortName = method.Name, fullName = method.DeclaringType.Name + "." + shortName; - - switch (shortName) - { - case nameof(IDatabaseAsync.IsConnected): - return; - case nameof(IDatabase.CreateBatch): - case nameof(IDatabase.CreateTransaction): - case nameof(IDatabase.IdentifyEndpoint): - case nameof(IDatabase.Sort): - case nameof(IDatabase.SortAndStore): - case nameof(IDatabaseAsync.IdentifyEndpointAsync): - case nameof(IDatabaseAsync.SortAsync): - case nameof(IDatabaseAsync.SortAndStoreAsync): - CheckName(method, isAsync); - break; - default: - CheckName(method, isAsync); - var isValid = shortName.StartsWith("Debug") - || shortName.StartsWith("Execute") - || shortName.StartsWith("Geo") - || shortName.StartsWith("Hash") - || shortName.StartsWith("HyperLogLog") - || shortName.StartsWith("Key") - || shortName.StartsWith("List") - || shortName.StartsWith("Lock") - || shortName.StartsWith("Publish") - || shortName.StartsWith("Set") - || shortName.StartsWith("Script") - || shortName.StartsWith("SortedSet") - || shortName.StartsWith("String") - || shortName.StartsWith("Stream"); - Log(fullName + ": " + (isValid ? "valid" : "invalid")); - Assert.True(isValid, fullName + ":Prefix"); - break; - } - - Assert.False(shortName.Contains("If"), fullName + ":If"); // should probably be a When option - - var returnType = method.ReturnType ?? typeof(void); - - if (isAsync) - { - Assert.True(IsAsyncMethod(returnType), fullName + ":Task"); - } - else - { - Assert.False(IsAsyncMethod(returnType), fullName + ":Task"); - } - - static bool IsAsyncMethod(Type returnType) - { - if (returnType == typeof(Task)) return true; - if (returnType == typeof(ValueTask)) return true; - - if (returnType.IsGenericType) - { - var genDef = returnType.GetGenericTypeDefinition(); - if (genDef == typeof(Task<>)) return true; - if (genDef == typeof(ValueTask<>)) return true; - if (genDef == typeof(IAsyncEnumerable<>)) return true; - } - - return false; - } - } - - private void CheckName(MemberInfo member, bool isAsync) - { - if (isAsync) Assert.True(member.Name.EndsWith("Async"), member.Name + ":Name - end *Async"); - else Assert.False(member.Name.EndsWith("Async"), member.Name + ":Name - don't end *Async"); - } - } -} diff --git a/tests/StackExchange.Redis.Tests/NamingTests.cs b/tests/StackExchange.Redis.Tests/NamingTests.cs new file mode 100644 index 000000000..9d9e032ad --- /dev/null +++ b/tests/StackExchange.Redis.Tests/NamingTests.cs @@ -0,0 +1,238 @@ +using System; +using System.Collections.Generic; +using System.ComponentModel; +using System.Linq; +using System.Reflection; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class NamingTests(ITestOutputHelper output) : TestBase(output) +{ + [Theory] + [InlineData(typeof(IDatabase), false)] + [InlineData(typeof(IDatabaseAsync), true)] + [InlineData(typeof(Condition), false)] + public void CheckSignatures(Type type, bool isAsync) + { + // check that all methods and interfaces look appropriate for their sync/async nature + CheckName(type, isAsync); + var members = type.GetMethods(BindingFlags.Public | BindingFlags.Instance | BindingFlags.Static | BindingFlags.DeclaredOnly); + foreach (var member in members) + { + if (member.Name.StartsWith("get_") || member.Name.StartsWith("set_") || member.Name.StartsWith("add_") || member.Name.StartsWith("remove_")) continue; + CheckMethod(member, isAsync); + } + } + + /// + /// This test iterates over all s to ensure we have everything accounted for as primary-only or not. + /// + [Fact] + public void CheckReadOnlyOperations() + { + List primaryReplica = new(), + primaryOnly = new(); + foreach (var val in (RedisCommand[])Enum.GetValues(typeof(RedisCommand))) + { + bool isPrimaryOnly = val.IsPrimaryOnly(); + (isPrimaryOnly ? primaryOnly : primaryReplica).Add(val); + + if (!isPrimaryOnly) + { + Log(val.ToString()); + } + } + // Ensure an unknown command from nowhere would violate the check above, as any not-yet-added one would. + Assert.Throws(() => ((RedisCommand)99999).IsPrimaryOnly()); + + Log("primary-only: {0}, vs primary/replica: {1}", primaryOnly.Count, primaryReplica.Count); + Log(""); + Log("primary-only:"); + foreach (var val in primaryOnly) + { + Log(val.ToString()); + } + Log(""); + Log("primary/replica:"); + foreach (var val in primaryReplica) + { + Log(val.ToString()); + } + } + + [Theory] + [InlineData(typeof(IDatabase))] + [InlineData(typeof(IDatabaseAsync))] + public void CheckDatabaseMethodsUseKeys(Type type) + { + foreach (var method in type.GetMethods(BindingFlags.Public | BindingFlags.Instance | BindingFlags.DeclaredOnly)) + { + if (IgnoreMethodConventions(method)) continue; + + switch (method.Name) + { + case nameof(IDatabase.KeyRandom): + case nameof(IDatabaseAsync.KeyRandomAsync): + case nameof(IDatabase.Publish): + case nameof(IDatabaseAsync.PublishAsync): + case nameof(IDatabase.Execute): + case nameof(IDatabaseAsync.ExecuteAsync): + case nameof(IDatabase.ScriptEvaluate): + case nameof(IDatabaseAsync.ScriptEvaluateAsync): + case nameof(IDatabase.StreamRead): + case nameof(IDatabase.StreamReadAsync): + case nameof(IDatabase.StreamReadGroup): + case nameof(IDatabase.StreamReadGroupAsync): + continue; // they're fine, but don't want to widen check to return type + } + + bool usesKey = method.GetParameters().Any(p => UsesKey(p.ParameterType)); + Assert.True(usesKey, type.Name + ":" + method.Name); + } + } + + private static bool UsesKey(Type type) => + type == typeof(RedisKey) + || (type.IsArray && UsesKey(type.GetElementType()!)) + || (type.IsGenericType && type.GetGenericArguments().Any(UsesKey)); + + private static bool IgnoreMethodConventions(MethodInfo method) + { + string name = method.Name; + if (name.StartsWith("get_") || name.StartsWith("set_") || name.StartsWith("add_") || name.StartsWith("remove_")) return true; + switch (name) + { + case nameof(IDatabase.CreateBatch): + case nameof(IDatabase.CreateTransaction): + case nameof(IDatabase.Execute): + case nameof(IDatabaseAsync.ExecuteAsync): + case nameof(IDatabase.IsConnected): + case nameof(IDatabase.SetScan): + case nameof(IDatabase.SortedSetScan): + case nameof(IDatabase.HashScan): + case nameof(IDatabase.HashScanNoValues): + case nameof(ISubscriber.SubscribedEndpoint): + return true; + } + return false; + } + + [Theory] + [InlineData(typeof(IDatabase), typeof(IDatabaseAsync))] + [InlineData(typeof(IDatabaseAsync), typeof(IDatabase))] + public void CheckSyncAsyncMethodsMatch(Type from, Type to) + { + const BindingFlags flags = BindingFlags.Public | BindingFlags.Instance | BindingFlags.DeclaredOnly; + int count = 0; + foreach (var method in from.GetMethods(flags)) + { + if (IgnoreMethodConventions(method)) continue; + + string name = method.Name, huntName; + + if (name.EndsWith("Async")) huntName = name.Substring(0, name.Length - 5); + else huntName = name + "Async"; + var pFrom = method.GetParameters(); + Type[] args = pFrom.Select(x => x.ParameterType).ToArray(); + Log("Checking: {0}.{1}", from.Name, method.Name); + if (method.GetCustomAttribute() is EditorBrowsableAttribute attr && attr.State == EditorBrowsableState.Never) + { + // For compatibility overloads, explicitly don't ensure CommandFlags is last + } + else + { + Assert.Equal(typeof(CommandFlags), args.Last()); + } + var found = to.GetMethod(huntName, flags, null, method.CallingConvention, args, null); + Assert.NotNull(found); // "Found " + name + ", no " + huntName + var pTo = found.GetParameters(); + + for (int i = 0; i < pFrom.Length; i++) + { + Assert.Equal(pFrom[i].Name, pTo[i].Name); // method.Name + ":" + pFrom[i].Name + Assert.Equal(pFrom[i].ParameterType, pTo[i].ParameterType); // method.Name + ":" + pFrom[i].Name + } + + count++; + } + Log("Validated: {0} ({1} methods)", from.Name, count); + } + + private void CheckMethod(MethodInfo method, bool isAsync) + { + string shortName = method.Name, fullName = method.DeclaringType?.Name + "." + shortName; + + switch (shortName) + { + case nameof(IDatabaseAsync.IsConnected): + return; + case nameof(IDatabase.CreateBatch): + case nameof(IDatabase.CreateTransaction): + case nameof(IDatabase.IdentifyEndpoint): + case nameof(IDatabase.Sort): + case nameof(IDatabase.SortAndStore): + case nameof(IDatabaseAsync.IdentifyEndpointAsync): + case nameof(IDatabaseAsync.SortAsync): + case nameof(IDatabaseAsync.SortAndStoreAsync): + CheckName(method, isAsync); + break; + default: + CheckName(method, isAsync); + var isValid = shortName.StartsWith("Debug") + || shortName.StartsWith("Execute") + || shortName.StartsWith("Geo") + || shortName.StartsWith("Hash") + || shortName.StartsWith("HyperLogLog") + || shortName.StartsWith("Key") + || shortName.StartsWith("List") + || shortName.StartsWith("Lock") + || shortName.StartsWith("Publish") + || shortName.StartsWith("Set") + || shortName.StartsWith("Script") + || shortName.StartsWith("SortedSet") + || shortName.StartsWith("String") + || shortName.StartsWith("Stream") + || shortName.StartsWith("VectorSet"); + Log(fullName + ": " + (isValid ? "valid" : "invalid")); + Assert.True(isValid, fullName + ":Prefix"); + break; + } + + Assert.False(shortName.Contains("If"), fullName + ":If"); // should probably be a When option + + var returnType = method.ReturnType ?? typeof(void); + + if (isAsync) + { + Assert.True(IsAsyncMethod(returnType), fullName + ":Task"); + } + else + { + Assert.False(IsAsyncMethod(returnType), fullName + ":Task"); + } + + static bool IsAsyncMethod(Type returnType) + { + if (returnType == typeof(Task)) return true; + if (returnType == typeof(ValueTask)) return true; + + if (returnType.IsGenericType) + { + var genDef = returnType.GetGenericTypeDefinition(); + if (genDef == typeof(Task<>)) return true; + if (genDef == typeof(ValueTask<>)) return true; + if (genDef == typeof(IAsyncEnumerable<>)) return true; + } + + return false; + } + } + + private static void CheckName(MemberInfo member, bool isAsync) + { + if (isAsync) Assert.True(member.Name.EndsWith("Async"), member.Name + ":Name - end *Async"); + else Assert.False(member.Name.EndsWith("Async"), member.Name + ":Name - don't end *Async"); + } +} diff --git a/tests/StackExchange.Redis.Tests/OverloadCompatTests.cs b/tests/StackExchange.Redis.Tests/OverloadCompatTests.cs new file mode 100644 index 000000000..0acadc74b --- /dev/null +++ b/tests/StackExchange.Redis.Tests/OverloadCompatTests.cs @@ -0,0 +1,248 @@ +using System; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +/// +/// This test set is for when we add an overload, to making sure all +/// past versions work correctly and aren't source breaking. +/// +public class OverloadCompatTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + [Fact] + public async Task KeyExpire() + { + await using var conn = Create(); + var db = conn.GetDatabase(); + var key = Me(); + var expiresIn = TimeSpan.FromSeconds(10); + var expireTime = DateTime.UtcNow.AddHours(1); + var when = ExpireWhen.Always; + var flags = CommandFlags.None; + + db.KeyExpire(key, expiresIn); + db.KeyExpire(key, expiresIn, when); + db.KeyExpire(key, expiresIn, when: when); + db.KeyExpire(key, expiresIn, flags); + db.KeyExpire(key, expiresIn, flags: flags); + db.KeyExpire(key, expiresIn, when, flags); + db.KeyExpire(key, expiresIn, when: when, flags: flags); + + db.KeyExpire(key, expireTime); + db.KeyExpire(key, expireTime, when); + db.KeyExpire(key, expireTime, when: when); + db.KeyExpire(key, expireTime, flags); + db.KeyExpire(key, expireTime, flags: flags); + db.KeyExpire(key, expireTime, when, flags); + db.KeyExpire(key, expireTime, when: when, flags: flags); + + // Async + await db.KeyExpireAsync(key, expiresIn); + await db.KeyExpireAsync(key, expiresIn, when); + await db.KeyExpireAsync(key, expiresIn, when: when); + await db.KeyExpireAsync(key, expiresIn, flags); + await db.KeyExpireAsync(key, expiresIn, flags: flags); + await db.KeyExpireAsync(key, expiresIn, when, flags); + await db.KeyExpireAsync(key, expiresIn, when: when, flags: flags); + + await db.KeyExpireAsync(key, expireTime); + await db.KeyExpireAsync(key, expireTime, when); + await db.KeyExpireAsync(key, expireTime, when: when); + await db.KeyExpireAsync(key, expireTime, flags); + await db.KeyExpireAsync(key, expireTime, flags: flags); + await db.KeyExpireAsync(key, expireTime, when, flags); + await db.KeyExpireAsync(key, expireTime, when: when, flags: flags); + } + + [Fact] + public async Task StringBitCount() + { + await using var conn = Create(require: RedisFeatures.v2_6_0); + + var db = conn.GetDatabase(); + var key = Me(); + var flags = CommandFlags.None; + + db.KeyDelete(key, flags: CommandFlags.FireAndForget); + db.StringSet(key, "foobar", flags: CommandFlags.FireAndForget); + + db.StringBitCount(key); + db.StringBitCount(key, 1); + db.StringBitCount(key, 0, 0); + db.StringBitCount(key, start: 1); + db.StringBitCount(key, end: 1); + db.StringBitCount(key, start: 1, end: 1); + + db.StringBitCount(key, flags: flags); + db.StringBitCount(key, 0, 0, flags); + db.StringBitCount(key, 1, flags: flags); + db.StringBitCount(key, 1, 1, flags: flags); + db.StringBitCount(key, start: 1, flags: flags); + db.StringBitCount(key, end: 1, flags: flags); + db.StringBitCount(key, start: 1, end: 1, flags); + db.StringBitCount(key, start: 1, end: 1, flags: flags); + + // Async + await db.StringBitCountAsync(key); + await db.StringBitCountAsync(key, 1); + await db.StringBitCountAsync(key, 0, 0); + await db.StringBitCountAsync(key, start: 1); + await db.StringBitCountAsync(key, end: 1); + await db.StringBitCountAsync(key, start: 1, end: 1); + + await db.StringBitCountAsync(key, flags: flags); + await db.StringBitCountAsync(key, 0, 0, flags); + await db.StringBitCountAsync(key, 1, flags: flags); + await db.StringBitCountAsync(key, 1, 1, flags: flags); + await db.StringBitCountAsync(key, start: 1, flags: flags); + await db.StringBitCountAsync(key, end: 1, flags: flags); + await db.StringBitCountAsync(key, start: 1, end: 1, flags); + await db.StringBitCountAsync(key, start: 1, end: 1, flags: flags); + } + + [Fact] + public async Task StringBitPosition() + { + await using var conn = Create(require: RedisFeatures.v2_6_0); + + var db = conn.GetDatabase(); + var key = Me(); + var flags = CommandFlags.None; + + db.KeyDelete(key, flags: CommandFlags.FireAndForget); + db.StringSet(key, "foo", flags: CommandFlags.FireAndForget); + + db.StringBitPosition(key, true); + db.StringBitPosition(key, true, 1); + db.StringBitPosition(key, true, 1, 3); + db.StringBitPosition(key, bit: true); + db.StringBitPosition(key, bit: true, start: 1); + db.StringBitPosition(key, bit: true, end: 1); + db.StringBitPosition(key, bit: true, start: 1, end: 1); + db.StringBitPosition(key, true, start: 1, end: 1); + + db.StringBitPosition(key, true, flags: flags); + db.StringBitPosition(key, true, 1, 3, flags); + db.StringBitPosition(key, true, 1, flags: flags); + db.StringBitPosition(key, bit: true, flags: flags); + db.StringBitPosition(key, bit: true, start: 1, flags: flags); + db.StringBitPosition(key, bit: true, end: 1, flags: flags); + db.StringBitPosition(key, bit: true, start: 1, end: 1, flags: flags); + db.StringBitPosition(key, true, start: 1, end: 1, flags: flags); + + // Async + await db.StringBitPositionAsync(key, true); + await db.StringBitPositionAsync(key, true, 1); + await db.StringBitPositionAsync(key, true, 1, 3); + await db.StringBitPositionAsync(key, bit: true); + await db.StringBitPositionAsync(key, bit: true, start: 1); + await db.StringBitPositionAsync(key, bit: true, end: 1); + await db.StringBitPositionAsync(key, bit: true, start: 1, end: 1); + await db.StringBitPositionAsync(key, true, start: 1, end: 1); + + await db.StringBitPositionAsync(key, true, flags: flags); + await db.StringBitPositionAsync(key, true, 1, 3, flags); + await db.StringBitPositionAsync(key, true, 1, flags: flags); + await db.StringBitPositionAsync(key, bit: true, flags: flags); + await db.StringBitPositionAsync(key, bit: true, start: 1, flags: flags); + await db.StringBitPositionAsync(key, bit: true, end: 1, flags: flags); + await db.StringBitPositionAsync(key, bit: true, start: 1, end: 1, flags: flags); + await db.StringBitPositionAsync(key, true, start: 1, end: 1, flags: flags); + } + + [Fact] + public async Task SortedSetAdd() + { + await using var conn = Create(); + var db = conn.GetDatabase(); + RedisKey key = Me(); + RedisValue val = "myval"; + var score = 1.0d; + var values = new SortedSetEntry[] { new SortedSetEntry(val, score) }; + var when = When.Exists; + var flags = CommandFlags.None; + + db.SortedSetAdd(key, val, score); + db.SortedSetAdd(key, val, score, when); + db.SortedSetAdd(key, val, score, when: when); + db.SortedSetAdd(key, val, score, flags); + db.SortedSetAdd(key, val, score, flags: flags); + db.SortedSetAdd(key, val, score, when, flags); + db.SortedSetAdd(key, val, score, when, flags: flags); + db.SortedSetAdd(key, val, score, when: when, flags); + db.SortedSetAdd(key, val, score, when: when, flags: flags); + + db.SortedSetAdd(key, values); + db.SortedSetAdd(key, values, when); + db.SortedSetAdd(key, values, when: when); + db.SortedSetAdd(key, values, flags); + db.SortedSetAdd(key, values, flags: flags); + db.SortedSetAdd(key, values, when, flags); + db.SortedSetAdd(key, values, when, flags: flags); + db.SortedSetAdd(key, values, when: when, flags); + db.SortedSetAdd(key, values, when: when, flags: flags); + + // Async + await db.SortedSetAddAsync(key, val, score); + await db.SortedSetAddAsync(key, val, score, when); + await db.SortedSetAddAsync(key, val, score, when: when); + await db.SortedSetAddAsync(key, val, score, flags); + await db.SortedSetAddAsync(key, val, score, flags: flags); + await db.SortedSetAddAsync(key, val, score, when, flags); + await db.SortedSetAddAsync(key, val, score, when, flags: flags); + await db.SortedSetAddAsync(key, val, score, when: when, flags); + await db.SortedSetAddAsync(key, val, score, when: when, flags: flags); + + await db.SortedSetAddAsync(key, values); + await db.SortedSetAddAsync(key, values, when); + await db.SortedSetAddAsync(key, values, when: when); + await db.SortedSetAddAsync(key, values, flags); + await db.SortedSetAddAsync(key, values, flags: flags); + await db.SortedSetAddAsync(key, values, when, flags); + await db.SortedSetAddAsync(key, values, when, flags: flags); + await db.SortedSetAddAsync(key, values, when: when, flags); + await db.SortedSetAddAsync(key, values, when: when, flags: flags); + } + + [Fact] + public async Task StringSet() + { + await using var conn = Create(); + var db = conn.GetDatabase(); + var key = Me(); + var val = "myval"; + var expiresIn = TimeSpan.FromSeconds(10); + var when = When.Always; + var flags = CommandFlags.None; + + db.StringSet(key, val); + db.StringSet(key, val, expiry: expiresIn); + db.StringSet(key, val, when: when); + db.StringSet(key, val, flags: flags); + db.StringSet(key, val, expiry: expiresIn, when: when); + db.StringSet(key, val, expiry: expiresIn, when: when, flags: flags); + db.StringSet(key, val, expiry: expiresIn, when: when, flags: flags); + + db.StringSet(key, val, expiresIn, When.NotExists); + db.StringSet(key, val, expiresIn, When.NotExists, flags); + db.StringSet(key, val, expiry: default); + db.StringSet(key, val, null, When.NotExists); + db.StringSet(key, val, null, When.NotExists, flags); + + // Async + await db.StringSetAsync(key, val); + await db.StringSetAsync(key, val, expiry: expiresIn); + await db.StringSetAsync(key, val, when: when); + await db.StringSetAsync(key, val, flags: flags); + await db.StringSetAsync(key, val, expiry: expiresIn, when: when); + await db.StringSetAsync(key, val, expiry: expiresIn, when: when, flags: flags); + await db.StringSetAsync(key, val, expiry: expiresIn, when: when, flags: flags); + + await db.StringSetAsync(key, val, expiresIn, When.NotExists); + await db.StringSetAsync(key, val, expiresIn, When.NotExists, flags); + await db.StringSetAsync(key, val, expiry: default); + await db.StringSetAsync(key, val, null, When.NotExists); + await db.StringSetAsync(key, val, null, When.NotExists, flags); + } +} diff --git a/tests/StackExchange.Redis.Tests/Parse.cs b/tests/StackExchange.Redis.Tests/Parse.cs deleted file mode 100644 index 354262646..000000000 --- a/tests/StackExchange.Redis.Tests/Parse.cs +++ /dev/null @@ -1,100 +0,0 @@ -using System; -using System.Buffers; -using System.Collections.Generic; -using System.Text; -using Pipelines.Sockets.Unofficial.Arenas; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - public class ParseTests : TestBase - { - public ParseTests(ITestOutputHelper output) : base(output) { } - - public static IEnumerable GetTestData() - { - yield return new object[] { "$4\r\nPING\r\n$4\r\nPON", 1 }; - yield return new object[] { "$4\r\nPING\r\n$4\r\nPONG", 1 }; - yield return new object[] { "$4\r\nPING\r\n$4\r\nPONG\r", 1 }; - yield return new object[] { "$4\r\nPING\r\n$4\r\nPONG\r\n", 2 }; - yield return new object[] { "$4\r\nPING\r\n$4\r\nPONG\r\n$4", 2 }; - yield return new object[] { "$4\r\nPING\r\n$4\r\nPONG\r\n$4\r", 2 }; - yield return new object[] { "$4\r\nPING\r\n$4\r\nPONG\r\n$4\r\n", 2 }; - yield return new object[] { "$4\r\nPING\r\n$4\r\nPONG\r\n$4\r\nP", 2 }; - yield return new object[] { "$4\r\nPING\r\n$4\r\nPONG\r\n$4\r\nPO", 2 }; - yield return new object[] { "$4\r\nPING\r\n$4\r\nPONG\r\n$4\r\nPON", 2 }; - yield return new object[] { "$4\r\nPING\r\n$4\r\nPONG\r\n$4\r\nPONG", 2 }; - yield return new object[] { "$4\r\nPING\r\n$4\r\nPONG\r\n$4\r\nPONG\r", 2 }; - yield return new object[] { "$4\r\nPING\r\n$4\r\nPONG\r\n$4\r\nPONG\r\n", 3 }; - yield return new object[] { "$4\r\nPING\r\n$4\r\nPONG\r\n$4\r\nPONG\r\n$", 3 }; - } - - [Theory] - [MemberData(nameof(GetTestData))] - public void ParseAsSingleChunk(string ascii, int expected) - { - var buffer = new ReadOnlySequence(Encoding.ASCII.GetBytes(ascii)); - using (var arena = new Arena()) - { - ProcessMessages(arena, buffer, expected); - } - } - - [Theory] - [MemberData(nameof(GetTestData))] - public void ParseAsLotsOfChunks(string ascii, int expected) - { - var bytes = Encoding.ASCII.GetBytes(ascii); - FragmentedSegment chain = null, tail = null; - for (int i = 0; i < bytes.Length; i++) - { - var next = new FragmentedSegment(i, new ReadOnlyMemory(bytes, i, 1)); - if (tail == null) - { - chain = next; - } - else - { - tail.Next = next; - } - tail = next; - } - var buffer = new ReadOnlySequence(chain, 0, tail, 1); - Assert.Equal(bytes.Length, buffer.Length); - using (var arena = new Arena()) - { - ProcessMessages(arena, buffer, expected); - } - } - - private void ProcessMessages(Arena arena, ReadOnlySequence buffer, int expected) - { - Writer.WriteLine($"chain: {buffer.Length}"); - var reader = new BufferReader(buffer); - RawResult result; - int found = 0; - while (!(result = PhysicalConnection.TryParseResult(arena, buffer, ref reader, false, null, false)).IsNull) - { - Writer.WriteLine($"{result} - {result.GetString()}"); - found++; - } - Assert.Equal(expected, found); - } - - private class FragmentedSegment : ReadOnlySequenceSegment - { - public FragmentedSegment(long runningIndex, ReadOnlyMemory memory) - { - RunningIndex = runningIndex; - Memory = memory; - } - - public new FragmentedSegment Next - { - get => (FragmentedSegment)base.Next; - set => base.Next = value; - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/ParseTests.cs b/tests/StackExchange.Redis.Tests/ParseTests.cs new file mode 100644 index 000000000..2621ddab9 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/ParseTests.cs @@ -0,0 +1,96 @@ +using System; +using System.Buffers; +using System.Collections.Generic; +using System.Text; +using Pipelines.Sockets.Unofficial.Arenas; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class ParseTests(ITestOutputHelper output) : TestBase(output) +{ + public static IEnumerable GetTestData() + { + yield return new object[] { "$4\r\nPING\r\n$4\r\nPON", 1 }; + yield return new object[] { "$4\r\nPING\r\n$4\r\nPONG", 1 }; + yield return new object[] { "$4\r\nPING\r\n$4\r\nPONG\r", 1 }; + yield return new object[] { "$4\r\nPING\r\n$4\r\nPONG\r\n", 2 }; + yield return new object[] { "$4\r\nPING\r\n$4\r\nPONG\r\n$4", 2 }; + yield return new object[] { "$4\r\nPING\r\n$4\r\nPONG\r\n$4\r", 2 }; + yield return new object[] { "$4\r\nPING\r\n$4\r\nPONG\r\n$4\r\n", 2 }; + yield return new object[] { "$4\r\nPING\r\n$4\r\nPONG\r\n$4\r\nP", 2 }; + yield return new object[] { "$4\r\nPING\r\n$4\r\nPONG\r\n$4\r\nPO", 2 }; + yield return new object[] { "$4\r\nPING\r\n$4\r\nPONG\r\n$4\r\nPON", 2 }; + yield return new object[] { "$4\r\nPING\r\n$4\r\nPONG\r\n$4\r\nPONG", 2 }; + yield return new object[] { "$4\r\nPING\r\n$4\r\nPONG\r\n$4\r\nPONG\r", 2 }; + yield return new object[] { "$4\r\nPING\r\n$4\r\nPONG\r\n$4\r\nPONG\r\n", 3 }; + yield return new object[] { "$4\r\nPING\r\n$4\r\nPONG\r\n$4\r\nPONG\r\n$", 3 }; + } + + [Theory] + [MemberData(nameof(GetTestData))] + public void ParseAsSingleChunk(string ascii, int expected) + { + var buffer = new ReadOnlySequence(Encoding.ASCII.GetBytes(ascii)); + using (var arena = new Arena()) + { + ProcessMessages(arena, buffer, expected); + } + } + + [Theory] + [MemberData(nameof(GetTestData))] + public void ParseAsLotsOfChunks(string ascii, int expected) + { + var bytes = Encoding.ASCII.GetBytes(ascii); + FragmentedSegment? chain = null, tail = null; + for (int i = 0; i < bytes.Length; i++) + { + var next = new FragmentedSegment(i, new ReadOnlyMemory(bytes, i, 1)); + if (tail == null) + { + chain = next; + } + else + { + tail.Next = next; + } + tail = next; + } + var buffer = new ReadOnlySequence(chain!, 0, tail!, 1); + Assert.Equal(bytes.Length, buffer.Length); + using (var arena = new Arena()) + { + ProcessMessages(arena, buffer, expected); + } + } + + private void ProcessMessages(Arena arena, ReadOnlySequence buffer, int expected) + { + Log($"chain: {buffer.Length}"); + var reader = new BufferReader(buffer); + RawResult result; + int found = 0; + while (!(result = PhysicalConnection.TryParseResult(false, arena, buffer, ref reader, false, null, false)).IsNull) + { + Log($"{result} - {result.GetString()}"); + found++; + } + Assert.Equal(expected, found); + } + + private sealed class FragmentedSegment : ReadOnlySequenceSegment + { + public FragmentedSegment(long runningIndex, ReadOnlyMemory memory) + { + RunningIndex = runningIndex; + Memory = memory; + } + + public new FragmentedSegment? Next + { + get => (FragmentedSegment?)base.Next; + set => base.Next = value; + } + } +} diff --git a/tests/StackExchange.Redis.Tests/Performance.cs b/tests/StackExchange.Redis.Tests/Performance.cs deleted file mode 100644 index c88921243..000000000 --- a/tests/StackExchange.Redis.Tests/Performance.cs +++ /dev/null @@ -1,132 +0,0 @@ -using System.Diagnostics; -using System.Text; -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - [Collection(NonParallelCollection.Name)] - public class Performance : TestBase - { - public Performance(ITestOutputHelper output) : base(output) { } - - [FactLongRunning] - public void VerifyPerformanceImprovement() - { - int asyncTimer, sync, op = 0, asyncFaF, syncFaF; - var key = Me(); - using (var muxer = Create()) - { - // do these outside the timings, just to ensure the core methods are JITted etc - for (int db = 0; db < 5; db++) - { - muxer.GetDatabase(db).KeyDeleteAsync(key); - } - - var timer = Stopwatch.StartNew(); - for (int i = 0; i < 100; i++) - { - // want to test multiplex scenario; test each db, but to make it fair we'll - // do in batches of 10 on each - for (int db = 0; db < 5; db++) - { - var conn = muxer.GetDatabase(db); - for (int j = 0; j < 10; j++) - conn.StringIncrementAsync(key); - } - } - asyncFaF = (int)timer.ElapsedMilliseconds; - var final = new Task[5]; - for (int db = 0; db < 5; db++) - final[db] = muxer.GetDatabase(db).StringGetAsync(key); - muxer.WaitAll(final); - timer.Stop(); - asyncTimer = (int)timer.ElapsedMilliseconds; - Log("async to completion (local): {0}ms", timer.ElapsedMilliseconds); - for (int db = 0; db < 5; db++) - { - Assert.Equal(1000, (long)final[db].Result); // "async, db:" + db - } - } - - using (var conn = new RedisSharp.Redis(TestConfig.Current.MasterServer, TestConfig.Current.MasterPort)) - { - // do these outside the timings, just to ensure the core methods are JITted etc - for (int db = 0; db < 5; db++) - { - conn.Db = db; - conn.Remove(key); - } - - var timer = Stopwatch.StartNew(); - for (int i = 0; i < 100; i++) - { - // want to test multiplex scenario; test each db, but to make it fair we'll - // do in batches of 10 on each - for (int db = 0; db < 5; db++) - { - conn.Db = db; - op++; - for (int j = 0; j < 10; j++) - { - conn.Increment(key); - op++; - } - } - } - syncFaF = (int)timer.ElapsedMilliseconds; - string[] final = new string[5]; - for (int db = 0; db < 5; db++) - { - conn.Db = db; - final[db] = Encoding.ASCII.GetString(conn.Get(key)); - } - timer.Stop(); - sync = (int)timer.ElapsedMilliseconds; - Log("sync to completion (local): {0}ms", timer.ElapsedMilliseconds); - for (int db = 0; db < 5; db++) - { - Assert.Equal("1000", final[db]); // "async, db:" + db - } - } - int effectiveAsync = ((10 * asyncTimer) + 3) / 10; - int effectiveSync = ((10 * sync) + (op * 3)) / 10; - Log("async to completion with assumed 0.3ms LAN latency: " + effectiveAsync); - Log("sync to completion with assumed 0.3ms LAN latency: " + effectiveSync); - Log("fire-and-forget: {0}ms sync vs {1}ms async ", syncFaF, asyncFaF); - Assert.True(effectiveAsync < effectiveSync, "Everything"); - Assert.True(asyncFaF < syncFaF, "Fire and Forget"); - } - - [Fact] - public async Task BasicStringGetPerf() - { - using (var conn = Create()) - { - RedisKey key = Me(); - var db = conn.GetDatabase(); - await db.StringSetAsync(key, "some value").ForAwait(); - - // this is just to JIT everything before we try testing - var syncVal = db.StringGet(key); - var asyncVal = await db.StringGetAsync(key).ForAwait(); - - var syncTimer = Stopwatch.StartNew(); - syncVal = db.StringGet(key); - syncTimer.Stop(); - - var asyncTimer = Stopwatch.StartNew(); - asyncVal = await db.StringGetAsync(key).ForAwait(); - asyncTimer.Stop(); - - Log($"Sync: {syncTimer.ElapsedMilliseconds}; Async: {asyncTimer.ElapsedMilliseconds}"); - Assert.Equal("some value", syncVal); - Assert.Equal("some value", asyncVal); - // let's allow 20% async overhead - // But with a floor, since the base can often be zero - Assert.True(asyncTimer.ElapsedMilliseconds <= System.Math.Max(syncTimer.ElapsedMilliseconds * 1.2M, 50)); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/PerformanceTests.cs b/tests/StackExchange.Redis.Tests/PerformanceTests.cs new file mode 100644 index 000000000..b308bf0ac --- /dev/null +++ b/tests/StackExchange.Redis.Tests/PerformanceTests.cs @@ -0,0 +1,130 @@ +using System.Diagnostics; +using System.Text; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +[Collection(NonParallelCollection.Name)] +public class PerformanceTests(ITestOutputHelper output) : TestBase(output) +{ + [Fact] + public async Task VerifyPerformanceImprovement() + { + Skip.UnlessLongRunning(); + int asyncTimer, sync, op = 0, asyncFaF, syncFaF; + var key = Me(); + await using (var conn = Create()) + { + // do these outside the timings, just to ensure the core methods are JITted etc + for (int dbId = 0; dbId < 5; dbId++) + { + _ = conn.GetDatabase(dbId).KeyDeleteAsync(key); + } + + var timer = Stopwatch.StartNew(); + for (int i = 0; i < 100; i++) + { + // want to test multiplex scenario; test each db, but to make it fair we'll + // do in batches of 10 on each + for (int dbId = 0; dbId < 5; dbId++) + { + var db = conn.GetDatabase(dbId); + for (int j = 0; j < 10; j++) + { + _ = db.StringIncrementAsync(key); + } + } + } + asyncFaF = (int)timer.ElapsedMilliseconds; + var final = new Task[5]; + for (int db = 0; db < 5; db++) + final[db] = conn.GetDatabase(db).StringGetAsync(key); + conn.WaitAll(final); + timer.Stop(); + asyncTimer = (int)timer.ElapsedMilliseconds; + Log("async to completion (local): {0}ms", timer.ElapsedMilliseconds); + for (int db = 0; db < 5; db++) + { + Assert.Equal(1000, (long)final[db].Result); // "async, db:" + db + } + } + + using (var conn = new RedisSharp.Redis(TestConfig.Current.PrimaryServer, TestConfig.Current.PrimaryPort)) + { + // do these outside the timings, just to ensure the core methods are JITted etc + for (int db = 0; db < 5; db++) + { + conn.Db = db; + conn.Remove(key); + } + + var timer = Stopwatch.StartNew(); + for (int i = 0; i < 100; i++) + { + // want to test multiplex scenario; test each db, but to make it fair we'll + // do in batches of 10 on each + for (int db = 0; db < 5; db++) + { + conn.Db = db; + op++; + for (int j = 0; j < 10; j++) + { + conn.Increment(key); + op++; + } + } + } + syncFaF = (int)timer.ElapsedMilliseconds; + string[] final = new string[5]; + for (int db = 0; db < 5; db++) + { + conn.Db = db; + final[db] = Encoding.ASCII.GetString(conn.Get(key)); + } + timer.Stop(); + sync = (int)timer.ElapsedMilliseconds; + Log("sync to completion (local): {0}ms", timer.ElapsedMilliseconds); + for (int db = 0; db < 5; db++) + { + Assert.Equal("1000", final[db]); // "async, db:" + db + } + } + int effectiveAsync = ((10 * asyncTimer) + 3) / 10; + int effectiveSync = ((10 * sync) + (op * 3)) / 10; + Log("async to completion with assumed 0.3ms LAN latency: " + effectiveAsync); + Log("sync to completion with assumed 0.3ms LAN latency: " + effectiveSync); + Log("fire-and-forget: {0}ms sync vs {1}ms async ", syncFaF, asyncFaF); + Assert.True(effectiveAsync < effectiveSync, "Everything"); + Assert.True(asyncFaF < syncFaF, "Fire and Forget"); + } + + [Fact] + public async Task BasicStringGetPerf() + { + await using var conn = Create(); + + RedisKey key = Me(); + var db = conn.GetDatabase(); + await db.StringSetAsync(key, "some value").ForAwait(); + + // this is just to JIT everything before we try testing + var syncVal = db.StringGet(key); + var asyncVal = await db.StringGetAsync(key).ForAwait(); + + var syncTimer = Stopwatch.StartNew(); + syncVal = db.StringGet(key); + syncTimer.Stop(); + + var asyncTimer = Stopwatch.StartNew(); + asyncVal = await db.StringGetAsync(key).ForAwait(); + asyncTimer.Stop(); + + Log($"Sync: {syncTimer.ElapsedMilliseconds}; Async: {asyncTimer.ElapsedMilliseconds}"); + Assert.Equal("some value", syncVal); + Assert.Equal("some value", asyncVal); + // let's allow 20% async overhead + // But with a floor, since the base can often be zero + Assert.True(asyncTimer.ElapsedMilliseconds <= System.Math.Max(syncTimer.ElapsedMilliseconds * 1.2M, 50)); + } +} diff --git a/tests/StackExchange.Redis.Tests/PreserveOrder.cs b/tests/StackExchange.Redis.Tests/PreserveOrder.cs deleted file mode 100644 index ac2bd9361..000000000 --- a/tests/StackExchange.Redis.Tests/PreserveOrder.cs +++ /dev/null @@ -1,70 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Diagnostics; -using System.Threading; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - [Collection(SharedConnectionFixture.Key)] - public class PreserveOrder : TestBase - { - public PreserveOrder(ITestOutputHelper output, SharedConnectionFixture fixture) : base (output, fixture) { } - - [Fact] - public void Execute() - { - using (var conn = Create()) - { - var sub = conn.GetSubscriber(); - var channel = Me(); - var received = new List(); - Log("Subscribing..."); - const int COUNT = 500; - sub.Subscribe(channel, (_, message) => - { - lock (received) - { - received.Add((int)message); - if (received.Count == COUNT) - Monitor.PulseAll(received); // wake the test rig - } - }); - Log(""); - Log("Sending (any order)..."); - lock (received) - { - received.Clear(); - // we'll also use received as a wait-detection mechanism; sneaky - - // note: this does not do any cheating; - // it all goes to the server and back - for (int i = 0; i < COUNT; i++) - { - sub.Publish(channel, i, CommandFlags.FireAndForget); - } - - Log("Allowing time for delivery etc..."); - var watch = Stopwatch.StartNew(); - if (!Monitor.Wait(received, 10000)) - { - Log("Timed out; expect less data"); - } - watch.Stop(); - Log("Checking..."); - lock (received) - { - Log("Received: {0} in {1}ms", received.Count, watch.ElapsedMilliseconds); - int wrongOrder = 0; - for (int i = 0; i < Math.Min(COUNT, received.Count); i++) - { - if (received[i] != i) wrongOrder++; - } - Log("Out of order: " + wrongOrder); - } - } - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/PreserveOrderTests.cs b/tests/StackExchange.Redis.Tests/PreserveOrderTests.cs new file mode 100644 index 000000000..ce20ccf7b --- /dev/null +++ b/tests/StackExchange.Redis.Tests/PreserveOrderTests.cs @@ -0,0 +1,65 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Threading; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class PreserveOrderTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + [Fact] + public async Task Execute() + { + await using var conn = Create(); + + var sub = conn.GetSubscriber(); + var channel = Me(); + var received = new List(); + Log("Subscribing..."); + const int COUNT = 500; + sub.Subscribe(RedisChannel.Literal(channel), (_, message) => + { + lock (received) + { + received.Add((int)message); + if (received.Count == COUNT) + Monitor.PulseAll(received); // wake the test rig + } + }); + Log(""); + Log("Sending (any order)..."); + lock (received) + { + received.Clear(); + // we'll also use received as a wait-detection mechanism; sneaky + + // note: this does not do any cheating; + // it all goes to the server and back + for (int i = 0; i < COUNT; i++) + { + sub.Publish(RedisChannel.Literal(channel), i, CommandFlags.FireAndForget); + } + + Log("Allowing time for delivery etc..."); + var watch = Stopwatch.StartNew(); + if (!Monitor.Wait(received, 10000)) + { + Log("Timed out; expect less data"); + } + watch.Stop(); + Log("Checking..."); + lock (received) + { + Log("Received: {0} in {1}ms", received.Count, watch.ElapsedMilliseconds); + int wrongOrder = 0; + for (int i = 0; i < Math.Min(COUNT, received.Count); i++) + { + if (received[i] != i) wrongOrder++; + } + Log("Out of order: " + wrongOrder); + } + } + } +} diff --git a/tests/StackExchange.Redis.Tests/Profiling.cs b/tests/StackExchange.Redis.Tests/Profiling.cs deleted file mode 100644 index da69e5069..000000000 --- a/tests/StackExchange.Redis.Tests/Profiling.cs +++ /dev/null @@ -1,407 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Linq; -using System.Threading.Tasks; -using System.Threading; -using System.Collections.Concurrent; -using Xunit; -using Xunit.Abstractions; -using StackExchange.Redis.Profiling; - -namespace StackExchange.Redis.Tests -{ - [Collection(NonParallelCollection.Name)] - public class Profiling : TestBase - { - public Profiling(ITestOutputHelper output) : base(output) { } - - [Fact] - public void Simple() - { - using (var conn = Create()) - { - var key = Me(); - - var session = new ProfilingSession(); - - conn.RegisterProfiler(() => session); - - var dbId = TestConfig.GetDedicatedDB(); - var db = conn.GetDatabase(dbId); - db.StringSet(key, "world"); - var result = db.ScriptEvaluate(LuaScript.Prepare("return redis.call('get', @key)"), new { key = (RedisKey)key }); - Assert.Equal("world", result.AsString()); - var val = db.StringGet(key); - Assert.Equal("world", val); - var s = (string)db.Execute("ECHO", "fii"); - Assert.Equal("fii", s); - - var cmds = session.FinishProfiling(); - var i = 0; - foreach (var cmd in cmds) - { - Log("Command {0} (DB: {1}): {2}", i++, cmd.Db, cmd.ToString().Replace("\n", ", ")); - } - - var all = string.Join(",", cmds.Select(x => x.Command)); - Assert.Equal("SET,EVAL,GET,ECHO", all); - Log("Checking for SET"); - var set = cmds.SingleOrDefault(cmd => cmd.Command == "SET"); - Assert.NotNull(set); - Log("Checking for GET"); - var get = cmds.SingleOrDefault(cmd => cmd.Command == "GET"); - Assert.NotNull(get); - Log("Checking for EVAL"); - var eval = cmds.SingleOrDefault(cmd => cmd.Command == "EVAL"); - Assert.NotNull(eval); - Log("Checking for ECHO"); - var echo = cmds.SingleOrDefault(cmd => cmd.Command == "ECHO"); - Assert.NotNull(echo); - - Assert.Equal(4, cmds.Count()); - - Assert.True(set.CommandCreated <= eval.CommandCreated); - Assert.True(eval.CommandCreated <= get.CommandCreated); - - AssertProfiledCommandValues(set, conn, dbId); - - AssertProfiledCommandValues(get, conn, dbId); - - AssertProfiledCommandValues(eval, conn, dbId); - - AssertProfiledCommandValues(echo, conn, dbId); - } - } - - private static void AssertProfiledCommandValues(IProfiledCommand command, IConnectionMultiplexer conn, int dbId) - { - Assert.Equal(dbId, command.Db); - Assert.Equal(conn.GetEndPoints()[0], command.EndPoint); - Assert.True(command.CreationToEnqueued > TimeSpan.Zero, nameof(command.CreationToEnqueued)); - Assert.True(command.EnqueuedToSending > TimeSpan.Zero, nameof(command.EnqueuedToSending)); - Assert.True(command.SentToResponse > TimeSpan.Zero, nameof(command.SentToResponse)); - Assert.True(command.ResponseToCompletion >= TimeSpan.Zero, nameof(command.ResponseToCompletion)); - Assert.True(command.ElapsedTime > TimeSpan.Zero, nameof(command.ElapsedTime)); - Assert.True(command.ElapsedTime > command.CreationToEnqueued && command.ElapsedTime > command.EnqueuedToSending && command.ElapsedTime > command.SentToResponse, "Comparisons"); - Assert.True(command.RetransmissionOf == null, nameof(command.RetransmissionOf)); - Assert.True(command.RetransmissionReason == null, nameof(command.RetransmissionReason)); - } - - [FactLongRunning] - public void ManyThreads() - { - using (var conn = Create()) - { - var session = new ProfilingSession(); - var prefix = Me(); - - conn.RegisterProfiler(() => session); - - var threads = new List(); - const int CountPer = 100; - for (var i = 1; i <= 16; i++) - { - var db = conn.GetDatabase(i); - - threads.Add(new Thread(() => - { - var threadTasks = new List(); - - for (var j = 0; j < CountPer; j++) - { - var task = db.StringSetAsync(prefix + j, "" + j); - threadTasks.Add(task); - } - - Task.WaitAll(threadTasks.ToArray()); - })); - } - - threads.ForEach(thread => thread.Start()); - threads.ForEach(thread => thread.Join()); - - var allVals = session.FinishProfiling(); - var relevant = allVals.Where(cmd => cmd.Db > 0).ToList(); - - var kinds = relevant.Select(cmd => cmd.Command).Distinct().ToList(); - foreach (var k in kinds) - { - Log("Kind Seen: " + k); - } - Assert.True(kinds.Count <= 2); - Assert.Contains("SET", kinds); - if (kinds.Count == 2 && !kinds.Contains("SELECT") && !kinds.Contains("GET")) - { - Assert.True(false, "Non-SET, Non-SELECT, Non-GET command seen"); - } - - Assert.Equal(16 * CountPer, relevant.Count); - Assert.Equal(16, relevant.Select(cmd => cmd.Db).Distinct().Count()); - - for (var i = 1; i <= 16; i++) - { - var setsInDb = relevant.Count(cmd => cmd.Db == i); - Assert.Equal(CountPer, setsInDb); - } - } - } - - [FactLongRunning] - public void ManyContexts() - { - using (var conn = Create()) - { - var profiler = new AsyncLocalProfiler(); - var prefix = Me(); - conn.RegisterProfiler(profiler.GetSession); - - var tasks = new Task[16]; - - var results = new ProfiledCommandEnumerable[tasks.Length]; - - for (var i = 0; i < tasks.Length; i++) - { - var ix = i; - tasks[ix] = Task.Run(async () => - { - var db = conn.GetDatabase(ix); - - var allTasks = new List(); - - for (var j = 0; j < 1000; j++) - { - var g = db.StringGetAsync(prefix + ix); - var s = db.StringSetAsync(prefix + ix, "world" + ix); - // overlap the g+s, just for fun - await g; - await s; - } - - results[ix] = profiler.GetSession().FinishProfiling(); - }); - } - Task.WhenAll(tasks).Wait(); - - for (var i = 0; i < results.Length; i++) - { - var res = results[i]; - - var numGets = res.Count(r => r.Command == "GET"); - var numSets = res.Count(r => r.Command == "SET"); - - Assert.Equal(1000, numGets); - Assert.Equal(1000, numSets); - Assert.True(res.All(cmd => cmd.Db == i)); - } - } - } - - internal class PerThreadProfiler - { - private readonly ThreadLocal perThreadSession = new ThreadLocal(() => new ProfilingSession()); - - public ProfilingSession GetSession() => perThreadSession.Value; - } - - internal class AsyncLocalProfiler - { - private readonly AsyncLocal perThreadSession = new AsyncLocal(); - - public ProfilingSession GetSession() - { - var val = perThreadSession.Value; - if (val == null) - { - perThreadSession.Value = val = new ProfilingSession(); - } - return val; - } - } - - [Fact] - public void LowAllocationEnumerable() - { - const int OuterLoop = 1000; - - using (var conn = Create()) - { - var session = new ProfilingSession(); - conn.RegisterProfiler(() => session); - - var prefix = Me(); - var db = conn.GetDatabase(1); - - var allTasks = new List>(); - - foreach (var i in Enumerable.Range(0, OuterLoop)) - { - var t = - db.StringSetAsync(prefix + i, "bar" + i) - .ContinueWith( - async _ => (string)(await db.StringGetAsync(prefix + i).ForAwait()) - ); - - var finalResult = t.Unwrap(); - allTasks.Add(finalResult); - } - - conn.WaitAll(allTasks.ToArray()); - - var res = session.FinishProfiling(); - Assert.True(res.GetType().IsValueType); - - using (var e = res.GetEnumerator()) - { - Assert.True(e.GetType().IsValueType); - - Assert.True(e.MoveNext()); - var i = e.Current; - - e.Reset(); - Assert.True(e.MoveNext()); - var j = e.Current; - - Assert.True(ReferenceEquals(i, j)); - } - - Assert.Equal(OuterLoop, res.Count(r => r.Command == "GET" && r.Db > 0)); - Assert.Equal(OuterLoop, res.Count(r => r.Command == "SET" && r.Db > 0)); - Assert.Equal(OuterLoop * 2, res.Count(r => r.Db > 0)); - } - } - - [FactLongRunning] - public void ProfilingMD_Ex1() - { - using (var c = Create()) - { - IConnectionMultiplexer conn = c; - var session = new ProfilingSession(); - var prefix = Me(); - - conn.RegisterProfiler(() => session); - - var threads = new List(); - - for (var i = 0; i < 16; i++) - { - var db = conn.GetDatabase(i); - - var thread = new Thread(() => - { - var threadTasks = new List(); - - for (var j = 0; j < 1000; j++) - { - var task = db.StringSetAsync(prefix + j, "" + j); - threadTasks.Add(task); - } - - Task.WaitAll(threadTasks.ToArray()); - }); - - threads.Add(thread); - } - - threads.ForEach(thread => thread.Start()); - threads.ForEach(thread => thread.Join()); - - IEnumerable timings = session.FinishProfiling(); - - Assert.Equal(16000, timings.Count()); - } - } - - [FactLongRunning] - public void ProfilingMD_Ex2() - { - using (var c = Create()) - { - IConnectionMultiplexer conn = c; - var profiler = new PerThreadProfiler(); - var prefix = Me(); - - conn.RegisterProfiler(profiler.GetSession); - - var threads = new List(); - - var perThreadTimings = new ConcurrentDictionary>(); - - for (var i = 0; i < 16; i++) - { - var db = conn.GetDatabase(i); - - var thread = new Thread(() => - { - var threadTasks = new List(); - - for (var j = 0; j < 1000; j++) - { - var task = db.StringSetAsync(prefix + j, "" + j); - threadTasks.Add(task); - } - - Task.WaitAll(threadTasks.ToArray()); - - perThreadTimings[Thread.CurrentThread] = profiler.GetSession().FinishProfiling().ToList(); - }); - - threads.Add(thread); - } - - threads.ForEach(thread => thread.Start()); - threads.ForEach(thread => thread.Join()); - - Assert.Equal(16, perThreadTimings.Count); - Assert.True(perThreadTimings.All(kv => kv.Value.Count == 1000)); - } - } - - [FactLongRunning] - public async Task ProfilingMD_Ex2_Async() - { - using (var c = Create()) - { - IConnectionMultiplexer conn = c; - var profiler = new AsyncLocalProfiler(); - var prefix = Me(); - - conn.RegisterProfiler(profiler.GetSession); - - var tasks = new List(); - - var perThreadTimings = new ConcurrentBag>(); - - for (var i = 0; i < 16; i++) - { - var db = conn.GetDatabase(i); - - var task = Task.Run(async () => - { - for (var j = 0; j < 100; j++) - { - await db.StringSetAsync(prefix + j, "" + j).ForAwait(); - } - - perThreadTimings.Add(profiler.GetSession().FinishProfiling().ToList()); - }); - - tasks.Add(task); - } - - var timeout = Task.Delay(10000); - var complete = Task.WhenAll(tasks); - if (timeout == await Task.WhenAny(timeout, complete).ForAwait()) - { - throw new TimeoutException(); - } - - Assert.Equal(16, perThreadTimings.Count); - foreach (var item in perThreadTimings) - { - Assert.Equal(100, item.Count); - } - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/ProfilingTests.cs b/tests/StackExchange.Redis.Tests/ProfilingTests.cs new file mode 100644 index 000000000..366abd395 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/ProfilingTests.cs @@ -0,0 +1,405 @@ +using System; +using System.Collections.Concurrent; +using System.Collections.Generic; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using StackExchange.Redis.Profiling; +using Xunit; + +namespace StackExchange.Redis.Tests; + +[Collection(NonParallelCollection.Name)] +public class ProfilingTests(ITestOutputHelper output) : TestBase(output) +{ + [Fact] + public async Task Simple() + { + await using var conn = Create(); + + var server = conn.GetServer(TestConfig.Current.PrimaryServerAndPort); + var script = LuaScript.Prepare("return redis.call('get', @key)"); + var loaded = script.Load(server); + var key = Me(); + + var session = new ProfilingSession(); + + conn.RegisterProfiler(() => session); + + var dbId = TestConfig.GetDedicatedDB(conn); + var db = conn.GetDatabase(dbId); + db.StringSet(key, "world"); + var result = db.ScriptEvaluate(script, new { key = (RedisKey)key }); + Assert.NotNull(result); + Assert.Equal("world", result.AsString()); + var loadedResult = db.ScriptEvaluate(loaded, new { key = (RedisKey)key }); + Assert.NotNull(loadedResult); + Assert.Equal("world", loadedResult.AsString()); + var val = db.StringGet(key); + Assert.Equal("world", val); + var s = (string?)db.Execute("ECHO", "fii"); + Assert.Equal("fii", s); + + var cmds = session.FinishProfiling(); + var evalCmds = cmds.Where(c => c.Command == "EVAL").ToList(); + Assert.Equal(2, evalCmds.Count); + var i = 0; + foreach (var cmd in cmds) + { + Log($"Command {i++} (DB: {cmd.Db}): {cmd?.ToString()?.Replace("\n", ", ")}"); + } + + var all = string.Join(",", cmds.Select(x => x.Command)); + Assert.Equal("SET,EVAL,EVAL,GET,ECHO", all); + Log("Checking for SET"); + var set = cmds.SingleOrDefault(cmd => cmd.Command == "SET"); + Assert.NotNull(set); + Log("Checking for GET"); + var get = cmds.SingleOrDefault(cmd => cmd.Command == "GET"); + Assert.NotNull(get); + Log("Checking for EVAL"); + var eval1 = evalCmds[0]; + Log("Checking for EVAL"); + var eval2 = evalCmds[1]; + var echo = cmds.SingleOrDefault(cmd => cmd.Command == "ECHO"); + Assert.NotNull(echo); + + Assert.Equal(5, cmds.Count()); + + Assert.True(set.CommandCreated <= eval1.CommandCreated); + Assert.True(eval1.CommandCreated <= eval2.CommandCreated); + Assert.True(eval2.CommandCreated <= get.CommandCreated); + + AssertProfiledCommandValues(set, conn, dbId); + + AssertProfiledCommandValues(get, conn, dbId); + + AssertProfiledCommandValues(eval1, conn, dbId); + + AssertProfiledCommandValues(eval2, conn, dbId); + + AssertProfiledCommandValues(echo, conn, dbId); + } + + private static void AssertProfiledCommandValues(IProfiledCommand command, IConnectionMultiplexer conn, int dbId) + { + Assert.Equal(dbId, command.Db); + Assert.Equal(conn.GetEndPoints()[0], command.EndPoint); + Assert.True(command.CreationToEnqueued > TimeSpan.Zero, nameof(command.CreationToEnqueued)); + Assert.True(command.EnqueuedToSending > TimeSpan.Zero, nameof(command.EnqueuedToSending)); + Assert.True(command.SentToResponse > TimeSpan.Zero, nameof(command.SentToResponse)); + Assert.True(command.ResponseToCompletion >= TimeSpan.Zero, nameof(command.ResponseToCompletion)); + Assert.True(command.ElapsedTime > TimeSpan.Zero, nameof(command.ElapsedTime)); + Assert.True(command.ElapsedTime > command.CreationToEnqueued && command.ElapsedTime > command.EnqueuedToSending && command.ElapsedTime > command.SentToResponse, "Comparisons"); + Assert.True(command.RetransmissionOf == null, nameof(command.RetransmissionOf)); + Assert.True(command.RetransmissionReason == null, nameof(command.RetransmissionReason)); + } + + [Fact] + public async Task ManyThreads() + { + Skip.UnlessLongRunning(); + await using var conn = Create(); + + var session = new ProfilingSession(); + var prefix = Me(); + + conn.RegisterProfiler(() => session); + + var threads = new List(); + const int CountPer = 100; + for (var i = 1; i <= 16; i++) + { + var db = conn.GetDatabase(i); + + threads.Add(new Thread(() => + { + var threadTasks = new List(); + + for (var j = 0; j < CountPer; j++) + { + var task = db.StringSetAsync(prefix + j, "" + j); + threadTasks.Add(task); + } + + Task.WaitAll(threadTasks.ToArray()); + })); + } + + threads.ForEach(thread => thread.Start()); + threads.ForEach(thread => thread.Join()); + + var allVals = session.FinishProfiling(); + var relevant = allVals.Where(cmd => cmd.Db > 0).ToList(); + + var kinds = relevant.Select(cmd => cmd.Command).Distinct().ToList(); + foreach (var k in kinds) + { + Log("Kind Seen: " + k); + } + Assert.True(kinds.Count <= 2); + Assert.Contains("SET", kinds); + if (kinds.Count == 2 && !kinds.Contains("SELECT") && !kinds.Contains("GET")) + { + Assert.Fail("Non-SET, Non-SELECT, Non-GET command seen"); + } + + Assert.Equal(16 * CountPer, relevant.Count); + Assert.Equal(16, relevant.Select(cmd => cmd.Db).Distinct().Count()); + + for (var i = 1; i <= 16; i++) + { + var setsInDb = relevant.Count(cmd => cmd.Db == i); + Assert.Equal(CountPer, setsInDb); + } + } + + [Fact] + public async Task ManyContexts() + { + Skip.UnlessLongRunning(); + await using var conn = Create(); + + var profiler = new AsyncLocalProfiler(); + var prefix = Me(); + conn.RegisterProfiler(profiler.GetSession); + + var tasks = new Task[16]; + + var results = new ProfiledCommandEnumerable[tasks.Length]; + + for (var i = 0; i < tasks.Length; i++) + { + var ix = i; + tasks[ix] = Task.Run(async () => + { + var db = conn.GetDatabase(ix); + + var allTasks = new List(); + + for (var j = 0; j < 1000; j++) + { + var g = db.StringGetAsync(prefix + ix); + var s = db.StringSetAsync(prefix + ix, "world" + ix); + // overlap the g+s, just for fun + await g; + await s; + } + + results[ix] = profiler.GetSession().FinishProfiling(); + }); + } + Task.WhenAll(tasks).Wait(); + + for (var i = 0; i < results.Length; i++) + { + var res = results[i]; + + var numGets = res.Count(r => r.Command == "GET"); + var numSets = res.Count(r => r.Command == "SET"); + + Assert.Equal(1000, numGets); + Assert.Equal(1000, numSets); + Assert.True(res.All(cmd => cmd.Db == i)); + } + } + + internal sealed class PerThreadProfiler + { + private readonly ThreadLocal perThreadSession = new ThreadLocal(() => new ProfilingSession()); + + public ProfilingSession GetSession() => perThreadSession.Value!; + } + + internal sealed class AsyncLocalProfiler + { + private readonly AsyncLocal perThreadSession = new AsyncLocal(); + + public ProfilingSession GetSession() + { + var val = perThreadSession.Value; + if (val == null) + { + perThreadSession.Value = val = new ProfilingSession(); + } + return val; + } + } + + [Fact] + public async Task LowAllocationEnumerable() + { + await using var conn = Create(); + + const int OuterLoop = 1000; + var session = new ProfilingSession(); + conn.RegisterProfiler(() => session); + + var prefix = Me(); + var db = conn.GetDatabase(1); + + var allTasks = new List>(); + + foreach (var i in Enumerable.Range(0, OuterLoop)) + { + var t = db.StringSetAsync(prefix + i, "bar" + i).ContinueWith(async _ => (string?)(await db.StringGetAsync(prefix + i).ForAwait())); + + var finalResult = t.Unwrap(); + allTasks.Add(finalResult); + } + + conn.WaitAll(allTasks.ToArray()); + + var res = session.FinishProfiling(); + Assert.True(res.GetType().IsValueType); + + using (var e = res.GetEnumerator()) + { + Assert.True(e.GetType().IsValueType); + + Assert.True(e.MoveNext()); + var i = e.Current; + + e.Reset(); + Assert.True(e.MoveNext()); + var j = e.Current; + + Assert.True(ReferenceEquals(i, j)); + } + + Assert.Equal(OuterLoop, res.Count(r => r.Command == "GET" && r.Db > 0)); + Assert.Equal(OuterLoop, res.Count(r => r.Command == "SET" && r.Db > 0)); + Assert.Equal(OuterLoop * 2, res.Count(r => r.Db > 0)); + } + + [Fact] + public async Task ProfilingMD_Ex1() + { + Skip.UnlessLongRunning(); + await using var conn = Create(); + + var session = new ProfilingSession(); + var prefix = Me(); + + conn.RegisterProfiler(() => session); + + var threads = new List(); + + for (var i = 0; i < 16; i++) + { + var db = conn.GetDatabase(i); + + var thread = new Thread(() => + { + var threadTasks = new List(); + + for (var j = 0; j < 1000; j++) + { + var task = db.StringSetAsync(prefix + j, "" + j); + threadTasks.Add(task); + } + + Task.WaitAll(threadTasks.ToArray()); + }); + + threads.Add(thread); + } + + threads.ForEach(thread => thread.Start()); + threads.ForEach(thread => thread.Join()); + + IEnumerable timings = session.FinishProfiling(); + + Assert.Equal(16000, timings.Count()); + } + + [Fact] + public async Task ProfilingMD_Ex2() + { + Skip.UnlessLongRunning(); + await using var conn = Create(); + + var profiler = new PerThreadProfiler(); + var prefix = Me(); + + conn.RegisterProfiler(profiler.GetSession); + + var threads = new List(); + + var perThreadTimings = new ConcurrentDictionary>(); + + for (var i = 0; i < 16; i++) + { + var db = conn.GetDatabase(i); + + var thread = new Thread(() => + { + var threadTasks = new List(); + + for (var j = 0; j < 1000; j++) + { + var task = db.StringSetAsync(prefix + j, "" + j); + threadTasks.Add(task); + } + + Task.WaitAll(threadTasks.ToArray()); + + perThreadTimings[Thread.CurrentThread] = profiler.GetSession().FinishProfiling().ToList(); + }); + + threads.Add(thread); + } + + threads.ForEach(thread => thread.Start()); + threads.ForEach(thread => thread.Join()); + + Assert.Equal(16, perThreadTimings.Count); + Assert.True(perThreadTimings.All(kv => kv.Value.Count == 1000)); + } + + [Fact] + public async Task ProfilingMD_Ex2_Async() + { + Skip.UnlessLongRunning(); + await using var conn = Create(); + + var profiler = new AsyncLocalProfiler(); + var prefix = Me(); + + conn.RegisterProfiler(profiler.GetSession); + + var tasks = new List(); + + var perThreadTimings = new ConcurrentBag>(); + + for (var i = 0; i < 16; i++) + { + var db = conn.GetDatabase(i); + + var task = Task.Run(async () => + { + for (var j = 0; j < 100; j++) + { + await db.StringSetAsync(prefix + j, "" + j).ForAwait(); + } + + perThreadTimings.Add(profiler.GetSession().FinishProfiling().ToList()); + }); + + tasks.Add(task); + } + + var timeout = Task.Delay(10000); + var complete = Task.WhenAll(tasks); + if (timeout == await Task.WhenAny(timeout, complete).ForAwait()) + { + throw new TimeoutException(); + } + + Assert.Equal(16, perThreadTimings.Count); + foreach (var item in perThreadTimings) + { + Assert.Equal(100, item.Count); + } + } +} diff --git a/tests/StackExchange.Redis.Tests/PubSub.cs b/tests/StackExchange.Redis.Tests/PubSub.cs deleted file mode 100644 index ad0e9060c..000000000 --- a/tests/StackExchange.Redis.Tests/PubSub.cs +++ /dev/null @@ -1,743 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Diagnostics; -using System.Text; -using System.Threading; -using System.Threading.Channels; -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; -// ReSharper disable AccessToModifiedClosure - -namespace StackExchange.Redis.Tests -{ - [Collection(SharedConnectionFixture.Key)] - public class PubSub : TestBase - { - public PubSub(ITestOutputHelper output, SharedConnectionFixture fixture) : base(output, fixture) { } - - [Fact] - public async Task ExplicitPublishMode() - { - using (var mx = Create(channelPrefix: "foo:")) - { - var pub = mx.GetSubscriber(); - int a = 0, b = 0, c = 0, d = 0; - pub.Subscribe(new RedisChannel("*bcd", RedisChannel.PatternMode.Literal), (x, y) => Interlocked.Increment(ref a)); - pub.Subscribe(new RedisChannel("a*cd", RedisChannel.PatternMode.Pattern), (x, y) => Interlocked.Increment(ref b)); - pub.Subscribe(new RedisChannel("ab*d", RedisChannel.PatternMode.Auto), (x, y) => Interlocked.Increment(ref c)); - pub.Subscribe("abc*", (x, y) => Interlocked.Increment(ref d)); - - await Task.Delay(1000).ForAwait(); - pub.Publish("abcd", "efg"); - await UntilCondition(TimeSpan.FromSeconds(10), - () => Thread.VolatileRead(ref b) == 1 - && Thread.VolatileRead(ref c) == 1 - && Thread.VolatileRead(ref d) == 1); - Assert.Equal(0, Thread.VolatileRead(ref a)); - Assert.Equal(1, Thread.VolatileRead(ref b)); - Assert.Equal(1, Thread.VolatileRead(ref c)); - Assert.Equal(1, Thread.VolatileRead(ref d)); - - pub.Publish("*bcd", "efg"); - await UntilCondition(TimeSpan.FromSeconds(10), () => Thread.VolatileRead(ref a) == 1); - Assert.Equal(1, Thread.VolatileRead(ref a)); - } - } - - [Theory] - [InlineData(null, false, "a")] - [InlineData("", false, "b")] - [InlineData("Foo:", false, "c")] - [InlineData(null, true, "d")] - [InlineData("", true, "e")] - [InlineData("Foo:", true, "f")] - public async Task TestBasicPubSub(string channelPrefix, bool wildCard, string breaker) - { - using (var muxer = Create(channelPrefix: channelPrefix)) - { - var pub = GetAnyMaster(muxer); - var sub = muxer.GetSubscriber(); - await PingAsync(muxer, pub, sub).ForAwait(); - HashSet received = new HashSet(); - int secondHandler = 0; - string subChannel = (wildCard ? "a*c" : "abc") + breaker; - string pubChannel = "abc" + breaker; - Action handler1 = (channel, payload) => - { - lock (received) - { - if (channel == pubChannel) - { - received.Add(payload); - } - else - { - Log(channel); - } - } - } - , handler2 = (_, __) => Interlocked.Increment(ref secondHandler); - sub.Subscribe(subChannel, handler1); - sub.Subscribe(subChannel, handler2); - - lock (received) - { - Assert.Empty(received); - } - Assert.Equal(0, Thread.VolatileRead(ref secondHandler)); - var count = sub.Publish(pubChannel, "def"); - - await PingAsync(muxer, pub, sub, 3).ForAwait(); - - lock (received) - { - Assert.Single(received); - } - Assert.Equal(1, Thread.VolatileRead(ref secondHandler)); - - // unsubscribe from first; should still see second - sub.Unsubscribe(subChannel, handler1); - count = sub.Publish(pubChannel, "ghi"); - await PingAsync(muxer, pub, sub).ForAwait(); - lock (received) - { - Assert.Single(received); - } - Assert.Equal(2, Thread.VolatileRead(ref secondHandler)); - Assert.Equal(1, count); - - // unsubscribe from second; should see nothing this time - sub.Unsubscribe(subChannel, handler2); - count = sub.Publish(pubChannel, "ghi"); - await PingAsync(muxer, pub, sub).ForAwait(); - lock (received) - { - Assert.Single(received); - } - Assert.Equal(2, Thread.VolatileRead(ref secondHandler)); - Assert.Equal(0, count); - } - } - - [Fact] - public async Task TestBasicPubSubFireAndForget() - { - using (var muxer = Create()) - { - var pub = GetAnyMaster(muxer); - var sub = muxer.GetSubscriber(); - - RedisChannel key = Me() + Guid.NewGuid(); - HashSet received = new HashSet(); - int secondHandler = 0; - await PingAsync(muxer, pub, sub).ForAwait(); - sub.Subscribe(key, (channel, payload) => - { - lock (received) - { - if (channel == key) - { - received.Add(payload); - } - } - }, CommandFlags.FireAndForget); - - sub.Subscribe(key, (_, __) => Interlocked.Increment(ref secondHandler), CommandFlags.FireAndForget); - - lock (received) - { - Assert.Empty(received); - } - Assert.Equal(0, Thread.VolatileRead(ref secondHandler)); - await PingAsync(muxer, pub, sub).ForAwait(); - var count = sub.Publish(key, "def", CommandFlags.FireAndForget); - await PingAsync(muxer, pub, sub).ForAwait(); - - lock (received) - { - Assert.Single(received); - } - Assert.Equal(1, Thread.VolatileRead(ref secondHandler)); - - sub.Unsubscribe(key); - count = sub.Publish(key, "ghi", CommandFlags.FireAndForget); - - await PingAsync(muxer, pub, sub).ForAwait(); - - lock (received) - { - Assert.Single(received); - } - Assert.Equal(0, count); - } - } - - private static async Task PingAsync(IConnectionMultiplexer muxer, IServer pub, ISubscriber sub, int times = 1) - { - while (times-- > 0) - { - // both use async because we want to drain the completion managers, and the only - // way to prove that is to use TPL objects - var t1 = sub.PingAsync(); - var t2 = pub.PingAsync(); - await Task.Delay(100).ForAwait(); // especially useful when testing any-order mode - - if (!Task.WaitAll(new[] { t1, t2 }, muxer.TimeoutMilliseconds * 2)) throw new TimeoutException(); - } - } - - [Fact] - public async Task TestPatternPubSub() - { - using (var muxer = Create()) - { - var pub = GetAnyMaster(muxer); - var sub = muxer.GetSubscriber(); - - HashSet received = new HashSet(); - int secondHandler = 0; - sub.Subscribe("a*c", (channel, payload) => - { - lock (received) - { - if (channel == "abc") - { - received.Add(payload); - } - } - }); - - sub.Subscribe("a*c", (_, __) => Interlocked.Increment(ref secondHandler)); - lock (received) - { - Assert.Empty(received); - } - Assert.Equal(0, Thread.VolatileRead(ref secondHandler)); - - await PingAsync(muxer, pub, sub).ForAwait(); - var count = sub.Publish("abc", "def"); - await PingAsync(muxer, pub, sub).ForAwait(); - - lock (received) - { - Assert.Single(received); - } - Assert.Equal(1, Thread.VolatileRead(ref secondHandler)); - - sub.Unsubscribe("a*c"); - count = sub.Publish("abc", "ghi"); - - await PingAsync(muxer, pub, sub).ForAwait(); - - lock (received) - { - Assert.Single(received); - } - Assert.Equal(0, count); - } - } - - [Fact] - public void TestPublishWithNoSubscribers() - { - using (var muxer = Create()) - { - var conn = muxer.GetSubscriber(); - Assert.Equal(0, conn.Publish(Me() + "channel", "message")); - } - } - - [FactLongRunning] - public void TestMassivePublishWithWithoutFlush_Local() - { - using (var muxer = Create()) - { - var conn = muxer.GetSubscriber(); - TestMassivePublish(conn, Me(), "local"); - } - } - - [FactLongRunning] - public void TestMassivePublishWithWithoutFlush_Remote() - { - using (var muxer = Create(configuration: TestConfig.Current.RemoteServerAndPort)) - { - var conn = muxer.GetSubscriber(); - TestMassivePublish(conn, Me(), "remote"); - } - } - - private void TestMassivePublish(ISubscriber conn, string channel, string caption) - { - const int loop = 10000; - - var tasks = new Task[loop]; - - var withFAF = Stopwatch.StartNew(); - for (int i = 0; i < loop; i++) - { - conn.Publish(channel, "bar", CommandFlags.FireAndForget); - } - withFAF.Stop(); - - var withAsync = Stopwatch.StartNew(); - for (int i = 0; i < loop; i++) - { - tasks[i] = conn.PublishAsync(channel, "bar"); - } - conn.WaitAll(tasks); - withAsync.Stop(); - - Log("{2}: {0}ms (F+F) vs {1}ms (async)", - withFAF.ElapsedMilliseconds, withAsync.ElapsedMilliseconds, caption); - // We've made async so far, this test isn't really valid anymore - // So let's check they're at least within a few seconds. - Assert.True(withFAF.ElapsedMilliseconds < withAsync.ElapsedMilliseconds + 3000, caption); - } - - [FactLongRunning] - public async Task PubSubGetAllAnyOrder() - { - using (var muxer = Create(syncTimeout: 20000)) - { - var sub = muxer.GetSubscriber(); - RedisChannel channel = Me(); - const int count = 1000; - var syncLock = new object(); - - var data = new HashSet(); - await sub.SubscribeAsync(channel, (_, val) => - { - bool pulse; - lock (data) - { - data.Add(int.Parse(Encoding.UTF8.GetString(val))); - pulse = data.Count == count; - if ((data.Count % 100) == 99) Log(data.Count.ToString()); - } - if (pulse) - { - lock (syncLock) - { - Monitor.PulseAll(syncLock); - } - } - }).ForAwait(); - - lock (syncLock) - { - for (int i = 0; i < count; i++) - { - sub.Publish(channel, i.ToString(), CommandFlags.FireAndForget); - } - sub.Ping(); - if (!Monitor.Wait(syncLock, 20000)) - { - throw new TimeoutException("Items: " + data.Count); - } - for (int i = 0; i < count; i++) - { - Assert.Contains(i, data); - } - } - } - } - - [Fact] - public async Task PubSubGetAllCorrectOrder() - { - using (var muxer = Create(configuration: TestConfig.Current.RemoteServerAndPort, syncTimeout: 20000)) - { - var sub = muxer.GetSubscriber(); - RedisChannel channel = Me(); - const int count = 250; - var syncLock = new object(); - - var data = new List(count); - var subChannel = await sub.SubscribeAsync(channel).ForAwait(); - - await sub.PingAsync().ForAwait(); - - async Task RunLoop() - { - while (!subChannel.Completion.IsCompleted) - { - var work = await subChannel.ReadAsync().ForAwait(); - int i = int.Parse(Encoding.UTF8.GetString(work.Message)); - lock (data) - { - data.Add(i); - if (data.Count == count) break; - if ((data.Count % 100) == 99) Log("Received: " + data.Count.ToString()); - } - } - lock (syncLock) - { - Log("PulseAll."); - Monitor.PulseAll(syncLock); - } - } - - lock (syncLock) - { - // Intentionally not awaited - running in parallel - _ = Task.Run(RunLoop); - for (int i = 0; i < count; i++) - { - sub.Publish(channel, i.ToString()); - if ((i % 100) == 99) Log("Published: " + i.ToString()); - } - Log("Send loop complete."); - if (!Monitor.Wait(syncLock, 20000)) - { - throw new TimeoutException("Items: " + data.Count); - } - Log("Unsubscribe."); - subChannel.Unsubscribe(); - Log("Sub Ping."); - sub.Ping(); - Log("Database Ping."); - muxer.GetDatabase().Ping(); - for (int i = 0; i < count; i++) - { - Assert.Equal(i, data[i]); - } - } - - Log("Awaiting completion."); - await subChannel.Completion; - Log("Completion awaited."); - await Assert.ThrowsAsync(async delegate - { - await subChannel.ReadAsync().ForAwait(); - }).ForAwait(); - Log("End of muxer."); - } - Log("End of test."); - } - - [Fact] - public async Task PubSubGetAllCorrectOrder_OnMessage_Sync() - { - using (var muxer = Create(configuration: TestConfig.Current.RemoteServerAndPort, syncTimeout: 20000)) - { - var sub = muxer.GetSubscriber(); - RedisChannel channel = Me(); - const int count = 1000; - var syncLock = new object(); - - var data = new List(count); - var subChannel = await sub.SubscribeAsync(channel).ForAwait(); - subChannel.OnMessage(msg => - { - int i = int.Parse(Encoding.UTF8.GetString(msg.Message)); - bool pulse = false; - lock (data) - { - data.Add(i); - if (data.Count == count) pulse = true; - if ((data.Count % 100) == 99) Log("Received: " + data.Count.ToString()); - } - if (pulse) - { - lock (syncLock) - { - Monitor.PulseAll(syncLock); - } - } - }); - await sub.PingAsync().ForAwait(); - - lock (syncLock) - { - for (int i = 0; i < count; i++) - { - sub.Publish(channel, i.ToString(), CommandFlags.FireAndForget); - if ((i % 100) == 99) Log("Published: " + i.ToString()); - } - Log("Send loop complete."); - if (!Monitor.Wait(syncLock, 20000)) - { - throw new TimeoutException("Items: " + data.Count); - } - Log("Unsubscribe."); - subChannel.Unsubscribe(); - Log("Sub Ping."); - sub.Ping(); - Log("Database Ping."); - muxer.GetDatabase().Ping(); - for (int i = 0; i < count; i++) - { - Assert.Equal(i, data[i]); - } - } - - Log("Awaiting completion."); - await subChannel.Completion; - Log("Completion awaited."); - Assert.True(subChannel.Completion.IsCompleted); - await Assert.ThrowsAsync(async delegate - { - await subChannel.ReadAsync().ForAwait(); - }).ForAwait(); - Log("End of muxer."); - } - Log("End of test."); - } - - [Fact] - public async Task PubSubGetAllCorrectOrder_OnMessage_Async() - { - using (var muxer = Create(configuration: TestConfig.Current.RemoteServerAndPort, syncTimeout: 20000)) - { - var sub = muxer.GetSubscriber(); - RedisChannel channel = Me(); - const int count = 1000; - var syncLock = new object(); - - var data = new List(count); - var subChannel = await sub.SubscribeAsync(channel).ForAwait(); - subChannel.OnMessage(msg => - { - int i = int.Parse(Encoding.UTF8.GetString(msg.Message)); - bool pulse = false; - lock (data) - { - data.Add(i); - if (data.Count == count) pulse = true; - if ((data.Count % 100) == 99) Log("Received: " + data.Count.ToString()); - } - if (pulse) - { - lock (syncLock) - { - Monitor.PulseAll(syncLock); - } - } - return i % 2 == 0 ? null : Task.CompletedTask; - }); - await sub.PingAsync().ForAwait(); - - lock (syncLock) - { - for (int i = 0; i < count; i++) - { - sub.Publish(channel, i.ToString(), CommandFlags.FireAndForget); - if ((i % 100) == 99) Log("Published: " + i.ToString()); - } - Log("Send loop complete."); - if (!Monitor.Wait(syncLock, 20000)) - { - throw new TimeoutException("Items: " + data.Count); - } - Log("Unsubscribe."); - subChannel.Unsubscribe(); - Log("Sub Ping."); - sub.Ping(); - Log("Database Ping."); - muxer.GetDatabase().Ping(); - for (int i = 0; i < count; i++) - { - Assert.Equal(i, data[i]); - } - } - - Log("Awaiting completion."); - await subChannel.Completion; - Log("Completion awaited."); - Assert.True(subChannel.Completion.IsCompleted); - await Assert.ThrowsAsync(async delegate - { - await subChannel.ReadAsync().ForAwait(); - }).ForAwait(); - Log("End of muxer."); - } - Log("End of test."); - } - - [Fact] - public async Task TestPublishWithSubscribers() - { - var channel = Me(); - using (var muxerA = Create(shared: false)) - using (var muxerB = Create(shared: false)) - using (var conn = Create()) - { - var listenA = muxerA.GetSubscriber(); - var listenB = muxerB.GetSubscriber(); - var t1 = listenA.SubscribeAsync(channel, delegate { }); - var t2 = listenB.SubscribeAsync(channel, delegate { }); - - await Task.WhenAll(t1, t2).ForAwait(); - - // subscribe is just a thread-race-mess - await listenA.PingAsync(); - await listenB.PingAsync(); - - var pub = conn.GetSubscriber().PublishAsync(channel, "message"); - Assert.Equal(2, await pub); // delivery count - } - } - - [Fact] - public async Task TestMultipleSubscribersGetMessage() - { - var channel = Me(); - using (var muxerA = Create(shared: false)) - using (var muxerB = Create(shared: false)) - using (var conn = Create()) - { - var listenA = muxerA.GetSubscriber(); - var listenB = muxerB.GetSubscriber(); - conn.GetDatabase().Ping(); - var pub = conn.GetSubscriber(); - int gotA = 0, gotB = 0; - var tA = listenA.SubscribeAsync(channel, (_, msg) => { if (msg == "message") Interlocked.Increment(ref gotA); }); - var tB = listenB.SubscribeAsync(channel, (_, msg) => { if (msg == "message") Interlocked.Increment(ref gotB); }); - await Task.WhenAll(tA, tB).ForAwait(); - Assert.Equal(2, pub.Publish(channel, "message")); - await AllowReasonableTimeToPublishAndProcess().ForAwait(); - Assert.Equal(1, Interlocked.CompareExchange(ref gotA, 0, 0)); - Assert.Equal(1, Interlocked.CompareExchange(ref gotB, 0, 0)); - - // and unsubscibe... - tA = listenA.UnsubscribeAsync(channel); - await tA; - Assert.Equal(1, pub.Publish(channel, "message")); - await AllowReasonableTimeToPublishAndProcess().ForAwait(); - Assert.Equal(1, Interlocked.CompareExchange(ref gotA, 0, 0)); - Assert.Equal(2, Interlocked.CompareExchange(ref gotB, 0, 0)); - } - } - - [Fact] - public async Task Issue38() - { - // https://code.google.com/p/booksleeve/issues/detail?id=38 - using (var pub = Create()) - { - var sub = pub.GetSubscriber(); - int count = 0; - var prefix = Me(); - void handler(RedisChannel _, RedisValue __) => Interlocked.Increment(ref count); - var a0 = sub.SubscribeAsync(prefix + "foo", handler); - var a1 = sub.SubscribeAsync(prefix + "bar", handler); - var b0 = sub.SubscribeAsync(prefix + "f*o", handler); - var b1 = sub.SubscribeAsync(prefix + "b*r", handler); - await Task.WhenAll(a0, a1, b0, b1).ForAwait(); - - var c = sub.PublishAsync(prefix + "foo", "foo"); - var d = sub.PublishAsync(prefix + "f@o", "f@o"); - var e = sub.PublishAsync(prefix + "bar", "bar"); - var f = sub.PublishAsync(prefix + "b@r", "b@r"); - await Task.WhenAll(c, d, e, f).ForAwait(); - - long total = c.Result + d.Result + e.Result + f.Result; - - await AllowReasonableTimeToPublishAndProcess().ForAwait(); - - Assert.Equal(6, total); // sent - Assert.Equal(6, Interlocked.CompareExchange(ref count, 0, 0)); // received - } - } - - internal static Task AllowReasonableTimeToPublishAndProcess() => Task.Delay(100); - - [Fact] - public async Task TestPartialSubscriberGetMessage() - { - using (var muxerA = Create()) - using (var muxerB = Create()) - using (var conn = Create()) - { - int gotA = 0, gotB = 0; - var listenA = muxerA.GetSubscriber(); - var listenB = muxerB.GetSubscriber(); - var pub = conn.GetSubscriber(); - var prefix = Me(); - var tA = listenA.SubscribeAsync(prefix + "channel", (s, msg) => { if (s == prefix + "channel" && msg == "message") Interlocked.Increment(ref gotA); }); - var tB = listenB.SubscribeAsync(prefix + "chann*", (s, msg) => { if (s == prefix + "channel" && msg == "message") Interlocked.Increment(ref gotB); }); - await Task.WhenAll(tA, tB).ForAwait(); - Assert.Equal(2, pub.Publish(prefix + "channel", "message")); - await AllowReasonableTimeToPublishAndProcess().ForAwait(); - Assert.Equal(1, Interlocked.CompareExchange(ref gotA, 0, 0)); - Assert.Equal(1, Interlocked.CompareExchange(ref gotB, 0, 0)); - - // and unsubscibe... - tB = listenB.UnsubscribeAsync(prefix + "chann*", null); - await tB; - Assert.Equal(1, pub.Publish(prefix + "channel", "message")); - await AllowReasonableTimeToPublishAndProcess().ForAwait(); - Assert.Equal(2, Interlocked.CompareExchange(ref gotA, 0, 0)); - Assert.Equal(1, Interlocked.CompareExchange(ref gotB, 0, 0)); - } - } - - [Fact] - public async Task TestSubscribeUnsubscribeAndSubscribeAgain() - { - using (var pubMuxer = Create()) - using (var subMuxer = Create()) - { - var prefix = Me(); - var pub = pubMuxer.GetSubscriber(); - var sub = subMuxer.GetSubscriber(); - int x = 0, y = 0; - var t1 = sub.SubscribeAsync(prefix + "abc", delegate { Interlocked.Increment(ref x); }); - var t2 = sub.SubscribeAsync(prefix + "ab*", delegate { Interlocked.Increment(ref y); }); - await Task.WhenAll(t1, t2).ForAwait(); - pub.Publish(prefix + "abc", ""); - await AllowReasonableTimeToPublishAndProcess().ForAwait(); - Assert.Equal(1, Volatile.Read(ref x)); - Assert.Equal(1, Volatile.Read(ref y)); - t1 = sub.UnsubscribeAsync(prefix + "abc", null); - t2 = sub.UnsubscribeAsync(prefix + "ab*", null); - await Task.WhenAll(t1, t2).ForAwait(); - pub.Publish(prefix + "abc", ""); - Assert.Equal(1, Volatile.Read(ref x)); - Assert.Equal(1, Volatile.Read(ref y)); - t1 = sub.SubscribeAsync(prefix + "abc", delegate { Interlocked.Increment(ref x); }); - t2 = sub.SubscribeAsync(prefix + "ab*", delegate { Interlocked.Increment(ref y); }); - await Task.WhenAll(t1, t2).ForAwait(); - pub.Publish(prefix + "abc", ""); - await AllowReasonableTimeToPublishAndProcess().ForAwait(); - Assert.Equal(2, Volatile.Read(ref x)); - Assert.Equal(2, Volatile.Read(ref y)); - } - } - -#if DEBUG - [Fact] - public async Task SubscriptionsSurviveConnectionFailureAsync() - { - using (var muxer = Create(allowAdmin: true)) - { - RedisChannel channel = Me(); - var sub = muxer.GetSubscriber(); - int counter = 0; - await sub.SubscribeAsync(channel, delegate - { - Interlocked.Increment(ref counter); - }).ConfigureAwait(false); - await sub.PublishAsync(channel, "abc").ConfigureAwait(false); - sub.Ping(); - await Task.Delay(200).ConfigureAwait(false); - Assert.Equal(1, Thread.VolatileRead(ref counter)); - var server = GetServer(muxer); - Assert.Equal(1, server.GetCounters().Subscription.SocketCount); - - server.SimulateConnectionFailure(); - SetExpectedAmbientFailureCount(2); - await Task.Delay(200).ConfigureAwait(false); - sub.Ping(); - Assert.Equal(2, server.GetCounters().Subscription.SocketCount); - await sub.PublishAsync(channel, "abc").ConfigureAwait(false); - await Task.Delay(200).ConfigureAwait(false); - sub.Ping(); - Assert.Equal(2, Thread.VolatileRead(ref counter)); - } - } -#endif - } -} diff --git a/tests/StackExchange.Redis.Tests/PubSubCommand.cs b/tests/StackExchange.Redis.Tests/PubSubCommand.cs deleted file mode 100644 index d763dbb4c..000000000 --- a/tests/StackExchange.Redis.Tests/PubSubCommand.cs +++ /dev/null @@ -1,92 +0,0 @@ -using System; -using System.Runtime.CompilerServices; -using System.Threading; -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - [Collection(SharedConnectionFixture.Key)] - public class PubSubCommand : TestBase - { - public PubSubCommand(ITestOutputHelper output, SharedConnectionFixture fixture) : base (output, fixture) { } - - [Fact] - public void SubscriberCount() - { - using (var conn = Create()) - { - RedisChannel channel = Me() + Guid.NewGuid(); - var server = conn.GetServer(conn.GetEndPoints()[0]); - - var channels = server.SubscriptionChannels(Me() + "*"); - Assert.DoesNotContain(channel, channels); - - _ = server.SubscriptionPatternCount(); - var count = server.SubscriptionSubscriberCount(channel); - Assert.Equal(0, count); - conn.GetSubscriber().Subscribe(channel, delegate { }); - count = server.SubscriptionSubscriberCount(channel); - Assert.Equal(1, count); - - channels = server.SubscriptionChannels(Me() + "*"); - Assert.Contains(channel, channels); - } - } - - [Fact] - public async Task SubscriberCountAsync() - { - using (var conn = Create()) - { - RedisChannel channel = Me() + Guid.NewGuid(); - var server = conn.GetServer(conn.GetEndPoints()[0]); - - var channels = await server.SubscriptionChannelsAsync(Me() + "*").WithTimeout(2000); - Assert.DoesNotContain(channel, channels); - - _ = await server.SubscriptionPatternCountAsync().WithTimeout(2000); - var count = await server.SubscriptionSubscriberCountAsync(channel).WithTimeout(2000); - Assert.Equal(0, count); - await conn.GetSubscriber().SubscribeAsync(channel, delegate { }).WithTimeout(2000); - count = await server.SubscriptionSubscriberCountAsync(channel).WithTimeout(2000); - Assert.Equal(1, count); - - channels = await server.SubscriptionChannelsAsync(Me() + "*").WithTimeout(2000); - Assert.Contains(channel, channels); - } - } - } - static class Util - { - public static async Task WithTimeout(this Task task, int timeoutMs, - [CallerMemberName] string caller = null, [CallerLineNumber] int line = 0) - { - var cts = new CancellationTokenSource(); - if (task == await Task.WhenAny(task, Task.Delay(timeoutMs, cts.Token)).ForAwait()) - { - cts.Cancel(); - await task.ForAwait(); - } - else - { - throw new TimeoutException($"timout from {caller} line {line}"); - } - } - public static async Task WithTimeout(this Task task, int timeoutMs, - [CallerMemberName] string caller = null, [CallerLineNumber] int line = 0) - { - var cts = new CancellationTokenSource(); - if (task == await Task.WhenAny(task, Task.Delay(timeoutMs, cts.Token)).ForAwait()) - { - cts.Cancel(); - return await task.ForAwait(); - } - else - { - throw new TimeoutException($"timout from {caller} line {line}"); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/PubSubCommandTests.cs b/tests/StackExchange.Redis.Tests/PubSubCommandTests.cs new file mode 100644 index 000000000..71b82e262 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/PubSubCommandTests.cs @@ -0,0 +1,94 @@ +using System; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +[RunPerProtocol] +public class PubSubCommandTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + [Fact] + public async Task SubscriberCount() + { + await using var conn = Create(); + +#pragma warning disable CS0618 + RedisChannel channel = Me() + Guid.NewGuid(); + var server = conn.GetServer(conn.GetEndPoints()[0]); + + var channels = server.SubscriptionChannels(Me() + "*"); +#pragma warning restore CS0618 + Assert.DoesNotContain(channel, channels); + + _ = server.SubscriptionPatternCount(); + var count = server.SubscriptionSubscriberCount(channel); + Assert.Equal(0, count); + conn.GetSubscriber().Subscribe(channel, (channel, value) => { }); + count = server.SubscriptionSubscriberCount(channel); + Assert.Equal(1, count); + +#pragma warning disable CS0618 + channels = server.SubscriptionChannels(Me() + "*"); +#pragma warning restore CS0618 + Assert.Contains(channel, channels); + } + + [Fact] + public async Task SubscriberCountAsync() + { + await using var conn = Create(); + +#pragma warning disable CS0618 + RedisChannel channel = Me() + Guid.NewGuid(); +#pragma warning restore CS0618 + var server = conn.GetServer(conn.GetEndPoints()[0]); + +#pragma warning disable CS0618 + var channels = await server.SubscriptionChannelsAsync(Me() + "*").WithTimeout(2000); +#pragma warning restore CS0618 + Assert.DoesNotContain(channel, channels); + + _ = await server.SubscriptionPatternCountAsync().WithTimeout(2000); + var count = await server.SubscriptionSubscriberCountAsync(channel).WithTimeout(2000); + Assert.Equal(0, count); + await conn.GetSubscriber().SubscribeAsync(channel, (channel, value) => { }).WithTimeout(2000); + count = await server.SubscriptionSubscriberCountAsync(channel).WithTimeout(2000); + Assert.Equal(1, count); + +#pragma warning disable CS0618 + channels = await server.SubscriptionChannelsAsync(Me() + "*").WithTimeout(2000); +#pragma warning restore CS0618 + Assert.Contains(channel, channels); + } +} +internal static class Util +{ + public static async Task WithTimeout(this Task task, int timeoutMs, [CallerMemberName] string? caller = null, [CallerLineNumber] int line = 0) + { + var cts = new CancellationTokenSource(); + if (task == await Task.WhenAny(task, Task.Delay(timeoutMs, cts.Token)).ForAwait()) + { + cts.Cancel(); + await task.ForAwait(); + } + else + { + throw new TimeoutException($"timeout from {caller} line {line}"); + } + } + public static async Task WithTimeout(this Task task, int timeoutMs, [CallerMemberName] string? caller = null, [CallerLineNumber] int line = 0) + { + var cts = new CancellationTokenSource(); + if (task == await Task.WhenAny(task, Task.Delay(timeoutMs, cts.Token)).ForAwait()) + { + cts.Cancel(); + return await task.ForAwait(); + } + else + { + throw new TimeoutException($"timout from {caller} line {line}"); + } + } +} diff --git a/tests/StackExchange.Redis.Tests/PubSubKeyNotificationTests.cs b/tests/StackExchange.Redis.Tests/PubSubKeyNotificationTests.cs new file mode 100644 index 000000000..a65d0c631 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/PubSubKeyNotificationTests.cs @@ -0,0 +1,419 @@ +using System; +using System.Buffers; +using System.Collections.Concurrent; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using StackExchange.Redis.KeyspaceIsolation; +using Xunit; + +namespace StackExchange.Redis.Tests; + +// ReSharper disable once UnusedMember.Global - used via test framework +public sealed class PubSubKeyNotificationTestsCluster(ITestOutputHelper output, ITestContextAccessor context, SharedConnectionFixture fixture) + : PubSubKeyNotificationTests(output, context, fixture) +{ + protected override string GetConfiguration() => TestConfig.Current.ClusterServersAndPorts; +} + +// ReSharper disable once UnusedMember.Global - used via test framework +public sealed class PubSubKeyNotificationTestsStandalone(ITestOutputHelper output, ITestContextAccessor context, SharedConnectionFixture fixture) + : PubSubKeyNotificationTests(output, context, fixture) +{ +} + +public abstract class PubSubKeyNotificationTests(ITestOutputHelper output, ITestContextAccessor context, SharedConnectionFixture? fixture = null) + : TestBase(output, fixture) +{ + private const int DefaultKeyCount = 10; + private const int DefaultEventCount = 512; + private CancellationToken CancellationToken => context.Current.CancellationToken; + + private RedisKey[] InventKeys(out byte[] prefix, int count = DefaultKeyCount) + { + RedisKey[] keys = new RedisKey[count]; + var prefixString = $"{Guid.NewGuid()}/"; + prefix = Encoding.UTF8.GetBytes(prefixString); + for (int i = 0; i < count; i++) + { + keys[i] = $"{prefixString}{Guid.NewGuid()}"; + } + return keys; + } + + [Obsolete("Use Create(withChannelPrefix: false) instead", error: true)] + private IInternalConnectionMultiplexer Create() => Create(withChannelPrefix: false); + private IInternalConnectionMultiplexer Create(bool withChannelPrefix) => + Create(channelPrefix: withChannelPrefix ? "prefix:" : null); + + private RedisKey SelectKey(RedisKey[] keys) => keys[SharedRandom.Next(0, keys.Length)]; + +#if NET + private static Random SharedRandom => Random.Shared; +#else + private static Random SharedRandom { get; } = new(); +#endif + + [Fact] + public async Task KeySpace_Events_Enabled() + { + // see https://redis.io/docs/latest/develop/pubsub/keyspace-notifications/#configuration + await using var conn = Create(allowAdmin: true); + int failures = 0; + foreach (var ep in conn.GetEndPoints()) + { + var server = conn.GetServer(ep); + var config = (await server.ConfigGetAsync("notify-keyspace-events")).Single(); + Log($"[{Format.ToString(ep)}] notify-keyspace-events: '{config.Value}'"); + + // this is a very broad config, but it's what we use in CI (and probably a common basic config) + if (config.Value != "AKE") + { + failures++; + } + } + // for details, check the log output + Assert.Equal(0, failures); + } + + [Fact] + public async Task KeySpace_CanSubscribe_ManualPublish() + { + await using var conn = Create(withChannelPrefix: false); + var db = conn.GetDatabase(); + + var channel = RedisChannel.KeyEvent("nonesuch"u8, database: null); + Log($"Monitoring channel: {channel}"); + var sub = conn.GetSubscriber(); + await sub.UnsubscribeAsync(channel); + + int count = 0; + await sub.SubscribeAsync(channel, (_, _) => Interlocked.Increment(ref count)); + + // to publish, we need to remove the marker that this is a multi-node channel + var asLiteral = RedisChannel.Literal(channel.ToString()); + await sub.PublishAsync(asLiteral, Guid.NewGuid().ToString()); + + int expected = GetConnectedCount(conn, channel); + await Task.Delay(100).ForAwait(); + Assert.Equal(expected, count); + } + + // this looks past the horizon to see how many connections we actually have for a given channel, + // which could be more than 1 in a cluster scenario + private static int GetConnectedCount(IConnectionMultiplexer muxer, in RedisChannel channel) + => muxer is ConnectionMultiplexer typed && typed.TryGetSubscription(channel, out var sub) + ? sub.GetConnectionCount() : 1; + + private sealed class Counter + { + private int _count; + public int Count => Volatile.Read(ref _count); + public int Increment() => Interlocked.Increment(ref _count); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task KeyEvent_CanObserveSimple_ViaCallbackHandler(bool withChannelPrefix) + { + await using var conn = Create(withChannelPrefix); + var db = conn.GetDatabase(); + + var keys = InventKeys(out var prefix); + var channel = RedisChannel.KeyEvent(KeyNotificationType.SAdd, db.Database); + Assert.True(channel.IsMultiNode); + Assert.False(channel.IsPattern); + Log($"Monitoring channel: {channel}"); + var sub = conn.GetSubscriber(); + await sub.UnsubscribeAsync(channel); + Counter callbackCount = new(), matchingEventCount = new(); + TaskCompletionSource allDone = new(); + + ConcurrentDictionary observedCounts = new(); + foreach (var key in keys) + { + observedCounts[key.ToString()] = new(); + } + + await sub.SubscribeAsync(channel, (recvChannel, recvValue) => + { + callbackCount.Increment(); + if (KeyNotification.TryParse(in recvChannel, in recvValue, out var notification) + && notification is { IsKeyEvent: true, Type: KeyNotificationType.SAdd }) + { + OnNotification(notification, prefix, matchingEventCount, observedCounts, allDone); + } + }); + + await SendAndObserveAsync(keys, db, allDone, callbackCount, observedCounts); + await sub.UnsubscribeAsync(channel); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task KeyEvent_CanObserveSimple_ViaQueue(bool withChannelPrefix) + { + await using var conn = Create(withChannelPrefix); + var db = conn.GetDatabase(); + + var keys = InventKeys(out var prefix); + var channel = RedisChannel.KeyEvent(KeyNotificationType.SAdd, db.Database); + Assert.True(channel.IsMultiNode); + Assert.False(channel.IsPattern); + Log($"Monitoring channel: {channel}"); + var sub = conn.GetSubscriber(); + await sub.UnsubscribeAsync(channel); + Counter callbackCount = new(), matchingEventCount = new(); + TaskCompletionSource allDone = new(); + + ConcurrentDictionary observedCounts = new(); + foreach (var key in keys) + { + observedCounts[key.ToString()] = new(); + } + + var queue = await sub.SubscribeAsync(channel); + _ = Task.Run(async () => + { + await foreach (var msg in queue.WithCancellation(CancellationToken)) + { + callbackCount.Increment(); + if (msg.TryParseKeyNotification(out var notification) + && notification is { IsKeyEvent: true, Type: KeyNotificationType.SAdd }) + { + OnNotification(notification, prefix, matchingEventCount, observedCounts, allDone); + } + } + }); + + await SendAndObserveAsync(keys, db, allDone, callbackCount, observedCounts); + await queue.UnsubscribeAsync(); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task KeyNotification_CanObserveSimple_ViaCallbackHandler(bool withChannelPrefix) + { + await using var conn = Create(withChannelPrefix); + var db = conn.GetDatabase(); + + var keys = InventKeys(out var prefix); + var channel = RedisChannel.KeySpacePrefix(prefix, db.Database); + Assert.True(channel.IsMultiNode); + Assert.True(channel.IsPattern); + Log($"Monitoring channel: {channel}"); + var sub = conn.GetSubscriber(); + await sub.UnsubscribeAsync(channel); + Counter callbackCount = new(), matchingEventCount = new(); + TaskCompletionSource allDone = new(); + + ConcurrentDictionary observedCounts = new(); + foreach (var key in keys) + { + observedCounts[key.ToString()] = new(); + } + + var queue = await sub.SubscribeAsync(channel); + _ = Task.Run(async () => + { + await foreach (var msg in queue.WithCancellation(CancellationToken)) + { + callbackCount.Increment(); + if (msg.TryParseKeyNotification(out var notification) + && notification is { IsKeySpace: true, Type: KeyNotificationType.SAdd }) + { + OnNotification(notification, prefix, matchingEventCount, observedCounts, allDone); + } + } + }); + + await SendAndObserveAsync(keys, db, allDone, callbackCount, observedCounts); + await sub.UnsubscribeAsync(channel); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task KeyNotification_CanObserveSimple_ViaQueue(bool withChannelPrefix) + { + await using var conn = Create(withChannelPrefix); + var db = conn.GetDatabase(); + + var keys = InventKeys(out var prefix); + var channel = RedisChannel.KeySpacePrefix(prefix, db.Database); + Assert.True(channel.IsMultiNode); + Assert.True(channel.IsPattern); + Log($"Monitoring channel: {channel}"); + var sub = conn.GetSubscriber(); + await sub.UnsubscribeAsync(channel); + Counter callbackCount = new(), matchingEventCount = new(); + TaskCompletionSource allDone = new(); + + ConcurrentDictionary observedCounts = new(); + foreach (var key in keys) + { + observedCounts[key.ToString()] = new(); + } + + await sub.SubscribeAsync(channel, (recvChannel, recvValue) => + { + callbackCount.Increment(); + if (KeyNotification.TryParse(in recvChannel, in recvValue, out var notification) + && notification is { IsKeySpace: true, Type: KeyNotificationType.SAdd }) + { + OnNotification(notification, prefix, matchingEventCount, observedCounts, allDone); + } + }); + + await SendAndObserveAsync(keys, db, allDone, callbackCount, observedCounts); + await sub.UnsubscribeAsync(channel); + } + + [Theory] + [InlineData(true, false)] + [InlineData(false, false)] + [InlineData(true, true)] + [InlineData(false, true)] + public async Task KeyNotification_CanObserveSingleKey_ViaQueue(bool withChannelPrefix, bool withKeyPrefix) + { + await using var conn = Create(withChannelPrefix); + string keyPrefix = withKeyPrefix ? "isolated:" : ""; + byte[] keyPrefixBytes = Encoding.UTF8.GetBytes(keyPrefix); + var db = conn.GetDatabase().WithKeyPrefix(keyPrefix); + + var keys = InventKeys(out var prefix, count: 1); + Log($"Using {Encoding.UTF8.GetString(prefix)} as filter prefix, sample key: {SelectKey(keys)}"); + var channel = RedisChannel.KeySpaceSingleKey(RedisKey.WithPrefix(keyPrefixBytes, keys.Single()), db.Database); + + Assert.False(channel.IsMultiNode); + Assert.False(channel.IsPattern); + Log($"Monitoring channel: {channel}, routing via {Encoding.UTF8.GetString(channel.RoutingSpan)}"); + + var sub = conn.GetSubscriber(); + await sub.UnsubscribeAsync(channel); + Counter callbackCount = new(), matchingEventCount = new(); + TaskCompletionSource allDone = new(); + + ConcurrentDictionary observedCounts = new(); + foreach (var key in keys) + { + observedCounts[key.ToString()] = new(); + } + + var queue = await sub.SubscribeAsync(channel); + _ = Task.Run(async () => + { + await foreach (var msg in queue.WithCancellation(CancellationToken)) + { + callbackCount.Increment(); + if (msg.TryParseKeyNotification(keyPrefixBytes, out var notification) + && notification is { IsKeySpace: true, Type: KeyNotificationType.SAdd }) + { + OnNotification(notification, prefix, matchingEventCount, observedCounts, allDone); + } + } + }); + + await SendAndObserveAsync(keys, db, allDone, callbackCount, observedCounts); + await sub.UnsubscribeAsync(channel); + } + + private void OnNotification( + in KeyNotification notification, + ReadOnlySpan prefix, + Counter matchingEventCount, + ConcurrentDictionary observedCounts, + TaskCompletionSource allDone) + { + if (notification.KeyStartsWith(prefix)) // avoid problems with parallel SADD tests + { + int currentCount = matchingEventCount.Increment(); + + // get the key and check that we expected it + var recvKey = notification.GetKey(); + Assert.True(observedCounts.TryGetValue(recvKey.ToString(), out var counter)); + +#if NET10_0_OR_GREATER + // it would be more efficient to stash the alt-lookup, but that would make our API here non-viable, + // since we need to support multiple frameworks + var viaAlt = FindViaAltLookup(notification, observedCounts.GetAlternateLookup>()); + Assert.Same(counter, viaAlt); +#endif + + // accounting... + if (counter.Increment() == 1) + { + Log($"Observed key: '{recvKey}' after {currentCount} events"); + } + + if (currentCount == DefaultEventCount) + { + allDone.TrySetResult(true); + } + } + } + + private async Task SendAndObserveAsync( + RedisKey[] keys, + IDatabase db, + TaskCompletionSource allDone, + Counter callbackCount, + ConcurrentDictionary observedCounts) + { + await Task.Delay(300).ForAwait(); // give it a moment to settle + + Dictionary sentCounts = new(keys.Length); + foreach (var key in keys) + { + sentCounts[key] = new(); + } + + for (int i = 0; i < DefaultEventCount; i++) + { + var key = SelectKey(keys); + sentCounts[key].Increment(); + await db.SetAddAsync(key, i); + } + + // Wait for all events to be observed + try + { + Assert.True(await allDone.Task.WithTimeout(5000)); + } + catch (TimeoutException) when (callbackCount.Count == 0) + { + Assert.Fail($"Timeout with zero events; are keyspace events enabled?"); + } + + foreach (var key in keys) + { + Assert.Equal(sentCounts[key].Count, observedCounts[key.ToString()].Count); + } + } + +#if NET10_0_OR_GREATER + // demonstrate that we can use the alt-lookup APIs to avoid string allocations + private static Counter? FindViaAltLookup( + in KeyNotification notification, + ConcurrentDictionary.AlternateLookup> lookup) + { + // Demonstrate typical alt-lookup usage; this is an advanced topic, so it + // isn't trivial to grok, but: this is typical of perf-focused APIs. + char[]? lease = null; + const int MAX_STACK = 128; + var maxLength = notification.GetKeyMaxCharCount(); + Span scratch = maxLength <= MAX_STACK + ? stackalloc char[MAX_STACK] + : (lease = ArrayPool.Shared.Rent(maxLength)); + Assert.True(notification.TryCopyKey(scratch, out var length)); + if (!lookup.TryGetValue(scratch.Slice(0, length), out var counter)) counter = null; + if (lease is not null) ArrayPool.Shared.Return(lease); + return counter; + } +#endif +} diff --git a/tests/StackExchange.Redis.Tests/PubSubMultiserverTests.cs b/tests/StackExchange.Redis.Tests/PubSubMultiserverTests.cs new file mode 100644 index 000000000..691232218 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/PubSubMultiserverTests.cs @@ -0,0 +1,209 @@ +using System; +using System.Threading; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +[RunPerProtocol] +public class PubSubMultiserverTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + protected override string GetConfiguration() => TestConfig.Current.ClusterServersAndPorts + ",connectTimeout=10000"; + + [Fact] + public async Task ChannelSharding() + { + await using var conn = Create(channelPrefix: Me()); + + var defaultSlot = conn.ServerSelectionStrategy.HashSlot(default(RedisChannel)); + var slot1 = conn.ServerSelectionStrategy.HashSlot(RedisChannel.Literal("hey")); + var slot2 = conn.ServerSelectionStrategy.HashSlot(RedisChannel.Literal("hey2")); + + Assert.NotEqual(defaultSlot, slot1); + Assert.NotEqual(ServerSelectionStrategy.NoSlot, slot1); + Assert.NotEqual(slot1, slot2); + } + + [Fact] + public async Task ClusterNodeSubscriptionFailover() + { + Skip.UnlessLongRunning(); + Log("Connecting..."); + + await using var conn = Create(allowAdmin: true, shared: false); + + var sub = conn.GetSubscriber(); + var channel = RedisChannel.Literal(Me()); + + var count = 0; + Log("Subscribing..."); + await sub.SubscribeAsync(channel, (_, val) => + { + Interlocked.Increment(ref count); + Log("Message: " + val); + }); + Assert.True(sub.IsConnected(channel)); + + Log("Publishing (1)..."); + Assert.Equal(0, count); + var publishedTo = await sub.PublishAsync(channel, "message1"); + // Client -> Redis -> Client -> handler takes just a moment + await UntilConditionAsync(TimeSpan.FromSeconds(2), () => Volatile.Read(ref count) == 1); + Assert.Equal(1, count); + Log($" Published (1) to {publishedTo} subscriber(s)."); + Assert.Equal(1, publishedTo); + + var endpoint = sub.SubscribedEndpoint(channel)!; + var subscribedServer = conn.GetServer(endpoint); + var subscribedServerEndpoint = conn.GetServerEndPoint(endpoint); + + Assert.True(subscribedServer.IsConnected, "subscribedServer.IsConnected"); + Assert.NotNull(subscribedServerEndpoint); + Assert.True(subscribedServerEndpoint.IsConnected, "subscribedServerEndpoint.IsConnected"); + Assert.True(subscribedServerEndpoint.IsSubscriberConnected, "subscribedServerEndpoint.IsSubscriberConnected"); + + Assert.True(conn.GetSubscriptions().TryGetValue(channel, out var subscription)); + var initialServer = subscription.GetAnyCurrentServer(); + Assert.NotNull(initialServer); + Assert.True(initialServer.IsConnected); + Log("Connected to: " + initialServer); + + conn.AllowConnect = false; + if (TestContext.Current.IsResp3()) + { + subscribedServerEndpoint.SimulateConnectionFailure(SimulatedFailureType.All); + + Assert.False(subscribedServerEndpoint.IsConnected, "subscribedServerEndpoint.IsConnected"); + Assert.False(subscribedServerEndpoint.IsSubscriberConnected, "subscribedServerEndpoint.IsSubscriberConnected"); + } + else + { + subscribedServerEndpoint.SimulateConnectionFailure(SimulatedFailureType.AllSubscription); + + Assert.True(subscribedServerEndpoint.IsConnected, "subscribedServerEndpoint.IsConnected"); + Assert.False(subscribedServerEndpoint.IsSubscriberConnected, "subscribedServerEndpoint.IsSubscriberConnected"); + } + await UntilConditionAsync(TimeSpan.FromSeconds(5), () => subscription.IsConnectedAny()); + Assert.True(subscription.IsConnectedAny()); + + var newServer = subscription.GetAnyCurrentServer(); + Assert.NotNull(newServer); + Assert.NotEqual(newServer, initialServer); + Log("Now connected to: " + newServer); + + count = 0; + Log("Publishing (2)..."); + Assert.Equal(0, count); + publishedTo = await sub.PublishAsync(channel, "message2"); + // Client -> Redis -> Client -> handler takes just a moment + await UntilConditionAsync(TimeSpan.FromSeconds(2), () => Volatile.Read(ref count) == 1); + Assert.Equal(1, count); + Log($" Published (2) to {publishedTo} subscriber(s)."); + + ClearAmbientFailures(); + } + + [Theory(Skip="TODO: Hostile")] + [InlineData(CommandFlags.PreferMaster, true)] + [InlineData(CommandFlags.PreferReplica, true)] + [InlineData(CommandFlags.DemandMaster, false)] + [InlineData(CommandFlags.DemandReplica, false)] + public async Task PrimaryReplicaSubscriptionFailover(CommandFlags flags, bool expectSuccess) + { + var config = TestConfig.Current.PrimaryServerAndPort + "," + TestConfig.Current.ReplicaServerAndPort; + Log("Connecting..."); + + await using var conn = Create(configuration: config, shared: false, allowAdmin: true); + + var sub = conn.GetSubscriber(); + var channel = RedisChannel.Literal(Me() + flags.ToString()); // Individual channel per case to not overlap publishers + + var count = 0; + Log("Subscribing..."); + await sub.SubscribeAsync( + channel, + (_, val) => + { + Interlocked.Increment(ref count); + Log("Message: " + val); + }, + flags); + Assert.True(sub.IsConnected(channel)); + + Log("Publishing (1)..."); + Assert.Equal(0, count); + var publishedTo = await sub.PublishAsync(channel, "message1"); + // Client -> Redis -> Client -> handler takes just a moment + await UntilConditionAsync(TimeSpan.FromSeconds(2), () => Volatile.Read(ref count) == 1); + Assert.Equal(1, count); + Log($" Published (1) to {publishedTo} subscriber(s)."); + + var endpoint = sub.SubscribedEndpoint(channel)!; + var subscribedServer = conn.GetServer(endpoint); + var subscribedServerEndpoint = conn.GetServerEndPoint(endpoint); + + Assert.True(subscribedServer.IsConnected, "subscribedServer.IsConnected"); + Assert.NotNull(subscribedServerEndpoint); + Assert.True(subscribedServerEndpoint.IsConnected, "subscribedServerEndpoint.IsConnected"); + Assert.True(subscribedServerEndpoint.IsSubscriberConnected, "subscribedServerEndpoint.IsSubscriberConnected"); + + Assert.True(conn.GetSubscriptions().TryGetValue(channel, out var subscription)); + var initialServer = subscription.GetAnyCurrentServer(); + Assert.NotNull(initialServer); + Assert.True(initialServer.IsConnected); + Log("Connected to: " + initialServer); + + conn.AllowConnect = false; + if (TestContext.Current.IsResp3()) + { + subscribedServerEndpoint.SimulateConnectionFailure(SimulatedFailureType.All); // need to kill the main connection + Assert.False(subscribedServerEndpoint.IsConnected, "subscribedServerEndpoint.IsConnected"); + Assert.False(subscribedServerEndpoint.IsSubscriberConnected, "subscribedServerEndpoint.IsSubscriberConnected"); + } + else + { + subscribedServerEndpoint.SimulateConnectionFailure(SimulatedFailureType.AllSubscription); + Assert.True(subscribedServerEndpoint.IsConnected, "subscribedServerEndpoint.IsConnected"); + Assert.False(subscribedServerEndpoint.IsSubscriberConnected, "subscribedServerEndpoint.IsSubscriberConnected"); + } + + if (expectSuccess) + { + await UntilConditionAsync(TimeSpan.FromSeconds(5), () => subscription.IsConnectedAny()); + Assert.True(subscription.IsConnectedAny()); + + var newServer = subscription.GetAnyCurrentServer(); + Assert.NotNull(newServer); + Assert.NotEqual(newServer, initialServer); + Log("Now connected to: " + newServer); + } + else + { + // This subscription shouldn't be able to reconnect by flags (demanding an unavailable server) + await UntilConditionAsync(TimeSpan.FromSeconds(5), () => subscription.IsConnectedAny()); + Assert.False(subscription.IsConnectedAny()); + Log("Unable to reconnect (as expected)"); + + // Allow connecting back to the original + conn.AllowConnect = true; + await UntilConditionAsync(TimeSpan.FromSeconds(5), () => subscription.IsConnectedAny()); + Assert.True(subscription.IsConnectedAny()); + + var newServer = subscription.GetAnyCurrentServer(); + Assert.NotNull(newServer); + Assert.Equal(newServer, initialServer); + Log("Now connected to: " + newServer); + } + + count = 0; + Log("Publishing (2)..."); + Assert.Equal(0, count); + publishedTo = await sub.PublishAsync(channel, "message2"); + // Client -> Redis -> Client -> handler takes just a moment + await UntilConditionAsync(TimeSpan.FromSeconds(2), () => Volatile.Read(ref count) == 1); + Assert.Equal(1, count); + Log($" Published (2) to {publishedTo} subscriber(s)."); + + ClearAmbientFailures(); + } +} diff --git a/tests/StackExchange.Redis.Tests/PubSubTests.cs b/tests/StackExchange.Redis.Tests/PubSubTests.cs new file mode 100644 index 000000000..65e1ee574 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/PubSubTests.cs @@ -0,0 +1,870 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Text; +using System.Threading; +using System.Threading.Channels; +using System.Threading.Tasks; +using StackExchange.Redis.Maintenance; +using Xunit; + +namespace StackExchange.Redis.Tests; + +[RunPerProtocol] +public class PubSubTests(ITestOutputHelper output, SharedConnectionFixture fixture) + : PubSubTestBase(output, fixture, null) +{ +} + +[RunPerProtocol] +public class InProcPubSubTests(ITestOutputHelper output, InProcServerFixture fixture) + : PubSubTestBase(output, null, fixture) +{ + protected override bool UseDedicatedInProcessServer => true; +} + +[RunPerProtocol] +public abstract class PubSubTestBase( + ITestOutputHelper output, + SharedConnectionFixture? connection, + InProcServerFixture? server) + : TestBase(output, connection, server) +{ + [Fact] + public async Task ExplicitPublishMode() + { + await using var conn = ConnectFactory(channelPrefix: "foo:"); + + var pub = conn.GetSubscriber(); + int a = 0, b = 0, c = 0, d = 0; + pub.Subscribe(new RedisChannel("*bcd", RedisChannel.PatternMode.Literal), (x, y) => Interlocked.Increment(ref a)); + pub.Subscribe(new RedisChannel("a*cd", RedisChannel.PatternMode.Pattern), (x, y) => Interlocked.Increment(ref b)); + pub.Subscribe(new RedisChannel("ab*d", RedisChannel.PatternMode.Auto), (x, y) => Interlocked.Increment(ref c)); +#pragma warning disable CS0618 + pub.Subscribe("abc*", (x, y) => Interlocked.Increment(ref d)); + + pub.Publish("abcd", "efg"); +#pragma warning restore CS0618 + await UntilConditionAsync( + TimeSpan.FromSeconds(10), + () => Volatile.Read(ref b) == 1 + && Volatile.Read(ref c) == 1 + && Volatile.Read(ref d) == 1); + Assert.Equal(0, Volatile.Read(ref a)); + Assert.Equal(1, Volatile.Read(ref b)); + Assert.Equal(1, Volatile.Read(ref c)); + Assert.Equal(1, Volatile.Read(ref d)); + +#pragma warning disable CS0618 + pub.Publish("*bcd", "efg"); +#pragma warning restore CS0618 + await UntilConditionAsync(TimeSpan.FromSeconds(10), () => Volatile.Read(ref a) == 1); + Assert.Equal(1, Volatile.Read(ref a)); + } + + [Theory] + [InlineData(null, false, "a")] + [InlineData("", false, "b")] + [InlineData("Foo:", false, "c")] + [InlineData(null, true, "d")] + [InlineData("", true, "e")] + [InlineData("Foo:", true, "f")] + public async Task TestBasicPubSub(string? channelPrefix, bool wildCard, string breaker) + { + await using var conn = ConnectFactory(channelPrefix: channelPrefix, shared: false); + + var pub = GetAnyPrimary(conn.DefaultClient); + var sub = conn.GetSubscriber(); + await PingAsync(pub, sub).ForAwait(); + HashSet received = []; + int secondHandler = 0; + string subChannel = (wildCard ? "a*c" : "abc") + breaker; + string pubChannel = "abc" + breaker; + Action handler1 = (channel, payload) => + { + lock (received) + { + if (channel == pubChannel) + { + received.Add(payload); + } + else + { + Log(channel); + } + } + }, handler2 = (_, __) => Interlocked.Increment(ref secondHandler); +#pragma warning disable CS0618 + sub.Subscribe(subChannel, handler1); + sub.Subscribe(subChannel, handler2); +#pragma warning restore CS0618 + + lock (received) + { + Assert.Empty(received); + } + Assert.Equal(0, Volatile.Read(ref secondHandler)); +#pragma warning disable CS0618 + var count = sub.Publish(pubChannel, "def"); +#pragma warning restore CS0618 + + await PingAsync(pub, sub, 3).ForAwait(); + + await UntilConditionAsync(TimeSpan.FromSeconds(5), () => received.Count == 1); + lock (received) + { + Assert.Single(received); + } + // Give handler firing a moment + await UntilConditionAsync(TimeSpan.FromSeconds(2), () => Volatile.Read(ref secondHandler) == 1); + Assert.Equal(1, Volatile.Read(ref secondHandler)); + + // unsubscribe from first; should still see second +#pragma warning disable CS0618 + sub.Unsubscribe(subChannel, handler1); + count = sub.Publish(pubChannel, "ghi"); +#pragma warning restore CS0618 + await PingAsync(pub, sub).ForAwait(); + lock (received) + { + Assert.Single(received); + } + + await UntilConditionAsync(TimeSpan.FromSeconds(2), () => Volatile.Read(ref secondHandler) == 2); + + var secondHandlerCount = Volatile.Read(ref secondHandler); + Log("Expecting 2 from second handler, got: " + secondHandlerCount); + Assert.Equal(2, secondHandlerCount); + Assert.Equal(1, count); + + // unsubscribe from second; should see nothing this time +#pragma warning disable CS0618 + sub.Unsubscribe(subChannel, handler2); + count = sub.Publish(pubChannel, "ghi"); +#pragma warning restore CS0618 + await PingAsync(pub, sub).ForAwait(); + lock (received) + { + Assert.Single(received); + } + secondHandlerCount = Volatile.Read(ref secondHandler); + Log("Expecting 2 from second handler, got: " + secondHandlerCount); + Assert.Equal(2, secondHandlerCount); + Assert.Equal(0, count); + } + + [Fact] + public async Task Ping() + { + await using var conn = ConnectFactory(shared: false); + var pub = GetAnyPrimary(conn.DefaultClient); + var sub = conn.GetSubscriber(); + + await PingAsync(pub, sub, 5).ForAwait(); + await sub.SubscribeAsync(RedisChannel.Literal(Me()), (_, __) => { }); // to ensure we're in subscriber mode + await PingAsync(pub, sub, 5).ForAwait(); + } + + [Fact] + public async Task TestBasicPubSubFireAndForget() + { + await using var conn = ConnectFactory(shared: false); + + var profiler = conn.DefaultClient.AddProfiler(); + var pub = GetAnyPrimary(conn.DefaultClient); + var sub = conn.GetSubscriber(); + + RedisChannel key = RedisChannel.Literal(Me() + Guid.NewGuid()); + HashSet received = []; + int secondHandler = 0; + await PingAsync(pub, sub).ForAwait(); + sub.Subscribe( + key, + (channel, payload) => + { + lock (received) + { + if (channel == key) + { + received.Add(payload); + } + } + }, + CommandFlags.FireAndForget); + + sub.Subscribe(key, (_, __) => Interlocked.Increment(ref secondHandler), CommandFlags.FireAndForget); + Log(profiler); + + lock (received) + { + Assert.Empty(received); + } + Assert.Equal(0, Volatile.Read(ref secondHandler)); + await PingAsync(pub, sub).ForAwait(); + var count = sub.Publish(key, "def", CommandFlags.FireAndForget); + await PingAsync(pub, sub).ForAwait(); + + await UntilConditionAsync(TimeSpan.FromSeconds(5), () => received.Count == 1); + Log(profiler); + + lock (received) + { + Assert.Single(received); + } + Assert.Equal(1, Volatile.Read(ref secondHandler)); + + sub.Unsubscribe(key); + count = sub.Publish(key, "ghi", CommandFlags.FireAndForget); + + await PingAsync(pub, sub).ForAwait(); + Log(profiler); + lock (received) + { + Assert.Single(received); + } + Assert.Equal(0, count); + } + + private async Task PingAsync(IServer pub, ISubscriber sub, int times = 1) + { + while (times-- > 0) + { + // both use async because we want to drain the completion managers, and the only + // way to prove that is to use TPL objects + var subTask = sub.PingAsync(); + var pubTask = pub.PingAsync(); + try + { + await Task.WhenAll(subTask, pubTask).ForAwait(); + } + catch (TimeoutException ex) + { + throw new TimeoutException($"Timeout; sub: {GetState(subTask)}, pub: {GetState(pubTask)}", ex); + } + + Log($"sub: {GetState(subTask)}, pub: {GetState(pubTask)}"); + + static string GetState(Task pending) + { + var status = pending.Status; + return status switch + { + TaskStatus.RanToCompletion => $"{status} in {pending.Result.TotalMilliseconds:###,##0.0}ms)", + TaskStatus.Faulted when pending.Exception is { InnerExceptions.Count:1 } ae => $"{status}: {ae.InnerExceptions[0].Message}", + TaskStatus.Faulted => $"{status}: {pending.Exception?.Message}", + _ => status.ToString(), + }; + } + } + } + + [Fact] + public async Task TestPatternPubSub() + { + await using var conn = ConnectFactory(shared: false); + + var pub = GetAnyPrimary(conn.DefaultClient); + var sub = conn.GetSubscriber(); + + HashSet received = []; + int secondHandler = 0; +#pragma warning disable CS0618 + sub.Subscribe("a*c", (channel, payload) => +#pragma warning restore CS0618 + { + lock (received) + { + if (channel == "abc") + { + received.Add(payload); + } + } + }); + +#pragma warning disable CS0618 + sub.Subscribe("a*c", (_, __) => Interlocked.Increment(ref secondHandler)); +#pragma warning restore CS0618 + lock (received) + { + Assert.Empty(received); + } + Assert.Equal(0, Volatile.Read(ref secondHandler)); + + await PingAsync(pub, sub).ForAwait(); + var count = sub.Publish(RedisChannel.Literal("abc"), "def"); + await PingAsync(pub, sub).ForAwait(); + + await UntilConditionAsync(TimeSpan.FromSeconds(5), () => received.Count == 1); + lock (received) + { + Assert.Single(received); + } + + // Give reception a bit, the handler could be delayed under load + await UntilConditionAsync(TimeSpan.FromSeconds(2), () => Volatile.Read(ref secondHandler) == 1); + Assert.Equal(1, Volatile.Read(ref secondHandler)); + +#pragma warning disable CS0618 + sub.Unsubscribe("a*c"); + count = sub.Publish("abc", "ghi"); +#pragma warning restore CS0618 + + await PingAsync(pub, sub).ForAwait(); + + lock (received) + { + Assert.Single(received); + } + } + + [Fact] + public async Task TestPublishWithNoSubscribers() + { + await using var conn = ConnectFactory(); + + var sub = conn.GetSubscriber(); +#pragma warning disable CS0618 + Assert.Equal(0, sub.Publish(Me() + "channel", "message")); +#pragma warning restore CS0618 + } + + [Fact] + public async Task TestMassivePublishWithWithoutFlush_Local() + { + Skip.UnlessLongRunning(); + await using var conn = ConnectFactory(); + + var sub = conn.GetSubscriber(); + TestMassivePublish(sub, Me(), "local"); + } + + [Fact] + public async Task TestMassivePublishWithWithoutFlush_Remote() + { + Skip.UnlessLongRunning(); + SkipIfWouldUseInProcessServer(); + await using var conn = Create(configuration: TestConfig.Current.RemoteServerAndPort); + + var sub = conn.GetSubscriber(); + TestMassivePublish(sub, Me(), "remote"); + } + + private void TestMassivePublish(ISubscriber sub, string channel, string caption) + { + const int loop = 10000; + + var tasks = new Task[loop]; + + var withFAF = Stopwatch.StartNew(); + for (int i = 0; i < loop; i++) + { +#pragma warning disable CS0618 + sub.Publish(channel, "bar", CommandFlags.FireAndForget); +#pragma warning restore CS0618 + } + withFAF.Stop(); + + var withAsync = Stopwatch.StartNew(); + for (int i = 0; i < loop; i++) + { +#pragma warning disable CS0618 + tasks[i] = sub.PublishAsync(channel, "bar"); +#pragma warning restore CS0618 + } + sub.WaitAll(tasks); + withAsync.Stop(); + + Log($"{caption}: {withFAF.ElapsedMilliseconds}ms (F+F) vs {withAsync.ElapsedMilliseconds}ms (async)"); + // We've made async so far, this test isn't really valid anymore + // So let's check they're at least within a few seconds. + Assert.True(withFAF.ElapsedMilliseconds < withAsync.ElapsedMilliseconds + 3000, caption); + } + + [Fact] + public async Task SubscribeAsyncEnumerable() + { + await using var conn = ConnectFactory(shared: false); + + var sub = conn.GetSubscriber(); + RedisChannel channel = RedisChannel.Literal(Me()); + + const int TO_SEND = 5; + var gotall = new TaskCompletionSource(); + + var source = await sub.SubscribeAsync(channel); + var op = Task.Run(async () => + { + int count = 0; + await foreach (var item in source) + { + count++; + if (count == TO_SEND) gotall.TrySetResult(count); + } + return count; + }); + + for (int i = 0; i < TO_SEND; i++) + { + await sub.PublishAsync(channel, i); + } + await gotall.Task.WithTimeout(5000); + + // check the enumerator exits cleanly + sub.Unsubscribe(channel); + var count = await op.WithTimeout(1000); + Assert.Equal(5, count); + } + + [Fact] + public async Task PubSubGetAllAnyOrder() + { + await using var conn = ConnectFactory(shared: false); + + var sub = conn.GetSubscriber(); + RedisChannel channel = RedisChannel.Literal(Me()); + const int count = 1000; + var syncLock = new object(); + + Assert.True(sub.IsConnected(), nameof(sub.IsConnected)); + var data = new HashSet(); + await sub.SubscribeAsync(channel, (_, val) => + { + bool pulse; + lock (data) + { + data.Add(int.Parse(Encoding.UTF8.GetString(val!))); + pulse = data.Count == count; + if ((data.Count % 100) == 99) Log(data.Count.ToString()); + } + if (pulse) + { + lock (syncLock) + { + Monitor.PulseAll(syncLock); + } + } + }).ForAwait(); + + lock (syncLock) + { + for (int i = 0; i < count; i++) + { + sub.Publish(channel, i.ToString(), CommandFlags.FireAndForget); + } + sub.Ping(); + if (!Monitor.Wait(syncLock, 20000)) + { + throw new TimeoutException("Items: " + data.Count); + } + for (int i = 0; i < count; i++) + { + Assert.Contains(i, data); + } + } + } + + [Fact] + public async Task PubSubGetAllCorrectOrder() + { + SkipIfWouldUseInProcessServer(); + await using (var conn = Create(configuration: TestConfig.Current.RemoteServerAndPort, syncTimeout: 20000, log: Writer)) + { + var sub = conn.GetSubscriber(); + RedisChannel channel = RedisChannel.Literal(Me()); + const int count = 250; + var syncLock = new object(); + + var data = new List(count); + var subChannel = await sub.SubscribeAsync(channel).ForAwait(); + + await sub.PingAsync().ForAwait(); + + async Task RunLoop() + { + while (!subChannel.Completion.IsCompleted) + { + var work = await subChannel.ReadAsync().ForAwait(); + int i = int.Parse(Encoding.UTF8.GetString(work.Message!)); + lock (data) + { + data.Add(i); + if (data.Count == count) break; + if ((data.Count % 100) == 99) Log("Received: " + data.Count.ToString()); + } + } + lock (syncLock) + { + Log("PulseAll."); + Monitor.PulseAll(syncLock); + } + } + + lock (syncLock) + { + // Intentionally not awaited - running in parallel + _ = Task.Run(RunLoop); + for (int i = 0; i < count; i++) + { + sub.Publish(channel, i.ToString()); + if ((i % 100) == 99) Log("Published: " + i.ToString()); + } + Log("Send loop complete."); + if (!Monitor.Wait(syncLock, 20000)) + { + throw new TimeoutException("Items: " + data.Count); + } + Log("Unsubscribe."); + subChannel.Unsubscribe(); + Log("Sub Ping."); + sub.Ping(); + Log("Database Ping."); + conn.GetDatabase().Ping(); + for (int i = 0; i < count; i++) + { + Assert.Equal(i, data[i]); + } + } + + Log("Awaiting completion."); + await subChannel.Completion; + Log("Completion awaited."); + await Assert.ThrowsAsync(async () => await subChannel.ReadAsync().ForAwait()).ForAwait(); + Log("End of muxer."); + } + Log("End of test."); + } + + [Fact] + public async Task PubSubGetAllCorrectOrder_OnMessage_Sync() + { + SkipIfWouldUseInProcessServer(); + await using (var conn = Create(configuration: TestConfig.Current.RemoteServerAndPort, syncTimeout: 20000, log: Writer)) + { + var sub = conn.GetSubscriber(); + RedisChannel channel = RedisChannel.Literal(Me()); + const int count = 1000; + var syncLock = new object(); + + var data = new List(count); + var subChannel = await sub.SubscribeAsync(channel).ForAwait(); + subChannel.OnMessage(msg => + { + int i = int.Parse(Encoding.UTF8.GetString(msg.Message!)); + bool pulse = false; + lock (data) + { + data.Add(i); + if (data.Count == count) pulse = true; + if ((data.Count % 100) == 99) Log("Received: " + data.Count.ToString()); + } + if (pulse) + { + lock (syncLock) + { + Monitor.PulseAll(syncLock); + } + } + }); + await sub.PingAsync().ForAwait(); + + lock (syncLock) + { + for (int i = 0; i < count; i++) + { + sub.Publish(channel, i.ToString(), CommandFlags.FireAndForget); + if ((i % 100) == 99) Log("Published: " + i.ToString()); + } + Log("Send loop complete."); + if (!Monitor.Wait(syncLock, 20000)) + { + throw new TimeoutException("Items: " + data.Count); + } + Log("Unsubscribe."); + subChannel.Unsubscribe(); + Log("Sub Ping."); + sub.Ping(); + Log("Database Ping."); + conn.GetDatabase().Ping(); + for (int i = 0; i < count; i++) + { + Assert.Equal(i, data[i]); + } + } + + Log("Awaiting completion."); + await subChannel.Completion; + Log("Completion awaited."); + Assert.True(subChannel.Completion.IsCompleted); + await Assert.ThrowsAsync(async () => await subChannel.ReadAsync().ForAwait()).ForAwait(); + Log("End of muxer."); + } + Log("End of test."); + } + + [Fact] + public async Task PubSubGetAllCorrectOrder_OnMessage_Async() + { + SkipIfWouldUseInProcessServer(); + await using (var conn = Create(configuration: TestConfig.Current.RemoteServerAndPort, syncTimeout: 20000, log: Writer)) + { + var sub = conn.GetSubscriber(); + RedisChannel channel = RedisChannel.Literal(Me()); + const int count = 1000; + var syncLock = new object(); + + var data = new List(count); + var subChannel = await sub.SubscribeAsync(channel).ForAwait(); + subChannel.OnMessage(msg => + { + int i = int.Parse(Encoding.UTF8.GetString(msg.Message!)); + bool pulse = false; + lock (data) + { + data.Add(i); + if (data.Count == count) pulse = true; + if ((data.Count % 100) == 99) Log("Received: " + data.Count.ToString()); + } + if (pulse) + { + lock (syncLock) + { + Monitor.PulseAll(syncLock); + } + } + // Making sure we cope with null being returned here by a handler + return i % 2 == 0 ? null! : Task.CompletedTask; + }); + await sub.PingAsync().ForAwait(); + + // Give a delay between subscriptions and when we try to publish to be safe + await Task.Delay(1000).ForAwait(); + + lock (syncLock) + { + for (int i = 0; i < count; i++) + { + sub.Publish(channel, i.ToString(), CommandFlags.FireAndForget); + if ((i % 100) == 99) Log("Published: " + i.ToString()); + } + Log("Send loop complete."); + if (!Monitor.Wait(syncLock, 20000)) + { + throw new TimeoutException("Items: " + data.Count); + } + Log("Unsubscribe."); + subChannel.Unsubscribe(); + Log("Sub Ping."); + sub.Ping(); + Log("Database Ping."); + conn.GetDatabase().Ping(); + for (int i = 0; i < count; i++) + { + Assert.Equal(i, data[i]); + } + } + + Log("Awaiting completion."); + await subChannel.Completion; + Log("Completion awaited."); + Assert.True(subChannel.Completion.IsCompleted); + await Assert.ThrowsAsync(async () => await subChannel.ReadAsync().ForAwait()).ForAwait(); + Log("End of muxer."); + } + Log("End of test."); + } + + [Fact] + public async Task TestPublishWithSubscribers() + { + await using var pair = ConnectFactory(shared: false); + await using var connA = pair.DefaultClient; + await using var connB = pair.CreateClient(); + await using var connPub = pair.CreateClient(); + + var channel = Me(); + var listenA = connA.GetSubscriber(); + var listenB = connB.GetSubscriber(); +#pragma warning disable CS0618 + var t1 = listenA.SubscribeAsync(channel, (arg1, arg2) => { }); + var t2 = listenB.SubscribeAsync(channel, (arg1, arg2) => { }); +#pragma warning restore CS0618 + + await Task.WhenAll(t1, t2).ForAwait(); + + // subscribe is just a thread-race-mess + await listenA.PingAsync(); + await listenB.PingAsync(); + +#pragma warning disable CS0618 + var pub = connPub.GetSubscriber().PublishAsync(channel, "message"); +#pragma warning restore CS0618 + Assert.Equal(2, await pub); // delivery count + } + + [Fact] + public async Task TestMultipleSubscribersGetMessage() + { + await using var pair = ConnectFactory(shared: false); + await using var connA = pair.DefaultClient; + await using var connB = pair.CreateClient(); + await using var connPub = pair.CreateClient(); + + var channel = RedisChannel.Literal(Me()); + var listenA = connA.GetSubscriber(); + var listenB = connB.GetSubscriber(); + await connPub.GetDatabase().PingAsync(); + var pub = connPub.GetSubscriber(); + int gotA = 0, gotB = 0; + var tA = listenA.SubscribeAsync(channel, (_, msg) => { if (msg == "message") Interlocked.Increment(ref gotA); }); + var tB = listenB.SubscribeAsync(channel, (_, msg) => { if (msg == "message") Interlocked.Increment(ref gotB); }); + await Task.WhenAll(tA, tB).ForAwait(); + Assert.Equal(2, pub.Publish(channel, "message")); + await AllowReasonableTimeToPublishAndProcess().ForAwait(); + Assert.Equal(1, Interlocked.CompareExchange(ref gotA, 0, 0)); + Assert.Equal(1, Interlocked.CompareExchange(ref gotB, 0, 0)); + + // and unsubscribe... + tA = listenA.UnsubscribeAsync(channel); + await tA; + Assert.Equal(1, pub.Publish(channel, "message")); + await AllowReasonableTimeToPublishAndProcess().ForAwait(); + Assert.Equal(1, Interlocked.CompareExchange(ref gotA, 0, 0)); + Assert.Equal(2, Interlocked.CompareExchange(ref gotB, 0, 0)); + } + + [Fact] + public async Task Issue38() + { + await using var conn = ConnectFactory(); + + var sub = conn.GetSubscriber(); + int count = 0; + var prefix = Me(); + void Handler(RedisChannel unused, RedisValue unused2) => Interlocked.Increment(ref count); +#pragma warning disable CS0618 + var a0 = sub.SubscribeAsync(prefix + "foo", Handler); + var a1 = sub.SubscribeAsync(prefix + "bar", Handler); + var b0 = sub.SubscribeAsync(prefix + "f*o", Handler); + var b1 = sub.SubscribeAsync(prefix + "b*r", Handler); +#pragma warning restore CS0618 + await Task.WhenAll(a0, a1, b0, b1).ForAwait(); + +#pragma warning disable CS0618 + var c = sub.PublishAsync(prefix + "foo", "foo"); + var d = sub.PublishAsync(prefix + "f@o", "f@o"); + var e = sub.PublishAsync(prefix + "bar", "bar"); + var f = sub.PublishAsync(prefix + "b@r", "b@r"); +#pragma warning restore CS0618 + await Task.WhenAll(c, d, e, f).ForAwait(); + + long total = c.Result + d.Result + e.Result + f.Result; + + await AllowReasonableTimeToPublishAndProcess().ForAwait(); + + Assert.Equal(6, total); // sent + Assert.Equal(6, Interlocked.CompareExchange(ref count, 0, 0)); // received + } + + internal static Task AllowReasonableTimeToPublishAndProcess() => Task.Delay(500); + + [Fact] + public async Task TestPartialSubscriberGetMessage() + { + await using var pair = ConnectFactory(); + await using var connA = pair.DefaultClient; + await using var connB = pair.CreateClient(); + await using var connPub = pair.CreateClient(); + + int gotA = 0, gotB = 0; + var listenA = connA.GetSubscriber(); + var listenB = connB.GetSubscriber(); + var pub = connPub.GetSubscriber(); + var prefix = Me(); +#pragma warning disable CS0618 + var tA = listenA.SubscribeAsync(prefix + "channel", (s, msg) => { if (s == prefix + "channel" && msg == "message") Interlocked.Increment(ref gotA); }); + var tB = listenB.SubscribeAsync(prefix + "chann*", (s, msg) => { if (s == prefix + "channel" && msg == "message") Interlocked.Increment(ref gotB); }); + await Task.WhenAll(tA, tB).ForAwait(); + Assert.Equal(2, pub.Publish(prefix + "channel", "message")); +#pragma warning restore CS0618 + await AllowReasonableTimeToPublishAndProcess().ForAwait(); + Assert.Equal(1, Interlocked.CompareExchange(ref gotA, 0, 0)); + Assert.Equal(1, Interlocked.CompareExchange(ref gotB, 0, 0)); + + // and unsubscibe... +#pragma warning disable CS0618 + tB = listenB.UnsubscribeAsync(prefix + "chann*", null); + await tB; + Assert.Equal(1, pub.Publish(prefix + "channel", "message")); +#pragma warning restore CS0618 + await AllowReasonableTimeToPublishAndProcess().ForAwait(); + Assert.Equal(2, Interlocked.CompareExchange(ref gotA, 0, 0)); + Assert.Equal(1, Interlocked.CompareExchange(ref gotB, 0, 0)); + } + + [Fact] + public async Task TestSubscribeUnsubscribeAndSubscribeAgain() + { + await using var pair = ConnectFactory(); + await using var connPub = pair.DefaultClient; + await using var connSub = pair.CreateClient(); + + var prefix = Me(); + var pub = connPub.GetSubscriber(); + var sub = connSub.GetSubscriber(); + int x = 0, y = 0; +#pragma warning disable CS0618 + var t1 = sub.SubscribeAsync(prefix + "abc", (arg1, arg2) => Interlocked.Increment(ref x)); + var t2 = sub.SubscribeAsync(prefix + "ab*", (arg1, arg2) => Interlocked.Increment(ref y)); + await Task.WhenAll(t1, t2).ForAwait(); + pub.Publish(prefix + "abc", ""); + await AllowReasonableTimeToPublishAndProcess().ForAwait(); + Assert.Equal(1, Volatile.Read(ref x)); + Assert.Equal(1, Volatile.Read(ref y)); + t1 = sub.UnsubscribeAsync(prefix + "abc", null); + t2 = sub.UnsubscribeAsync(prefix + "ab*", null); + await Task.WhenAll(t1, t2).ForAwait(); + pub.Publish(prefix + "abc", ""); + Assert.Equal(1, Volatile.Read(ref x)); + Assert.Equal(1, Volatile.Read(ref y)); + t1 = sub.SubscribeAsync(prefix + "abc", (arg1, arg2) => Interlocked.Increment(ref x)); + t2 = sub.SubscribeAsync(prefix + "ab*", (arg1, arg2) => Interlocked.Increment(ref y)); + await Task.WhenAll(t1, t2).ForAwait(); + pub.Publish(prefix + "abc", ""); +#pragma warning restore CS0618 + await AllowReasonableTimeToPublishAndProcess().ForAwait(); + Assert.Equal(2, Volatile.Read(ref x)); + Assert.Equal(2, Volatile.Read(ref y)); + } + + [Fact] + public async Task AzureRedisEventsAutomaticSubscribe() + { + Skip.IfNoConfig(nameof(TestConfig.Config.AzureCacheServer), TestConfig.Current.AzureCacheServer); + Skip.IfNoConfig(nameof(TestConfig.Config.AzureCachePassword), TestConfig.Current.AzureCachePassword); + + bool didUpdate = false; + var options = new ConfigurationOptions() + { + EndPoints = { TestConfig.Current.AzureCacheServer }, + Password = TestConfig.Current.AzureCachePassword, + Ssl = true, + }; + + using (var connection = await ConnectionMultiplexer.ConnectAsync(options)) + { + connection.ServerMaintenanceEvent += (_, e) => + { + if (e is AzureMaintenanceEvent) + { + didUpdate = true; + } + }; + + var pubSub = connection.GetSubscriber(); + await pubSub.PublishAsync(RedisChannel.Literal("AzureRedisEvents"), "HI"); + await Task.Delay(100); + + Assert.True(didUpdate); + } + } +} diff --git a/tests/StackExchange.Redis.Tests/RawResultTests.cs b/tests/StackExchange.Redis.Tests/RawResultTests.cs index accc2fe34..9cf578ee1 100644 --- a/tests/StackExchange.Redis.Tests/RawResultTests.cs +++ b/tests/StackExchange.Redis.Tests/RawResultTests.cs @@ -1,65 +1,69 @@ using System.Buffers; using Xunit; -namespace StackExchange.Redis.Tests +namespace StackExchange.Redis.Tests; + +public class RawResultTests { - public class RawResultTests + [Fact] + public void TypeLoads() + { + var type = typeof(RawResult); + Assert.Equal(nameof(RawResult), type.Name); + } + + [Theory] + [InlineData(ResultType.BulkString)] + [InlineData(ResultType.Null)] + public void NullWorks(ResultType type) { - [Fact] - public void TypeLoads() - { - var type = typeof(RawResult); - Assert.Equal(nameof(RawResult), type.Name); - } - [Fact] - public void NullWorks() - { - var result = new RawResult(ResultType.BulkString, ReadOnlySequence.Empty, true); - Assert.Equal(ResultType.BulkString, result.Type); - Assert.True(result.IsNull); + var result = new RawResult(type, ReadOnlySequence.Empty, RawResult.ResultFlags.None); + Assert.Equal(type, result.Resp3Type); + Assert.True(result.HasValue); + Assert.True(result.IsNull); - var value = result.AsRedisValue(); + var value = result.AsRedisValue(); - Assert.True(value.IsNull); - string s = value; - Assert.Null(s); + Assert.True(value.IsNull); + string? s = value; + Assert.Null(s); - byte[] arr = (byte[])value; - Assert.Null(arr); - } + byte[]? arr = (byte[]?)value; + Assert.Null(arr); + } - [Fact] - public void DefaultWorks() - { - var result = default(RawResult); - Assert.Equal(ResultType.None, result.Type); - Assert.True(result.IsNull); + [Fact] + public void DefaultWorks() + { + var result = RawResult.Nil; + Assert.Equal(ResultType.None, result.Resp3Type); + Assert.False(result.HasValue); + Assert.True(result.IsNull); - var value = result.AsRedisValue(); + var value = result.AsRedisValue(); - Assert.True(value.IsNull); - var s = (string)value; - Assert.Null(s); + Assert.True(value.IsNull); + var s = (string?)value; + Assert.Null(s); - var arr = (byte[])value; - Assert.Null(arr); - } + var arr = (byte[]?)value; + Assert.Null(arr); + } - [Fact] - public void NilWorks() - { - var result = RawResult.Nil; - Assert.Equal(ResultType.None, result.Type); - Assert.True(result.IsNull); + [Fact] + public void NilWorks() + { + var result = RawResult.Nil; + Assert.Equal(ResultType.None, result.Resp3Type); + Assert.True(result.IsNull); - var value = result.AsRedisValue(); + var value = result.AsRedisValue(); - Assert.True(value.IsNull); - var s = (string)value; - Assert.Null(s); + Assert.True(value.IsNull); + var s = (string?)value; + Assert.Null(s); - var arr = (byte[])value; - Assert.Null(arr); - } + var arr = (byte[]?)value; + Assert.Null(arr); } } diff --git a/tests/StackExchange.Redis.Tests/RealWorld.cs b/tests/StackExchange.Redis.Tests/RealWorld.cs deleted file mode 100644 index 7b8d75ca7..000000000 --- a/tests/StackExchange.Redis.Tests/RealWorld.cs +++ /dev/null @@ -1,32 +0,0 @@ -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - public class RealWorld : TestBase - { - public RealWorld(ITestOutputHelper output) : base(output) { } - - [Fact] - public async Task WhyDoesThisNotWork() - { - Log("first:"); - var config = ConfigurationOptions.Parse("localhost:6379,localhost:6380,name=Core (Q&A),tiebreaker=:RedisMaster,abortConnect=False"); - Assert.Equal(2, config.EndPoints.Count); - Log("Endpoint 0: {0} (AddressFamily: {1})", config.EndPoints[0], config.EndPoints[0].AddressFamily); - Log("Endpoint 1: {0} (AddressFamily: {1})", config.EndPoints[1], config.EndPoints[1].AddressFamily); - - using (var conn = ConnectionMultiplexer.Connect("localhost:6379,localhost:6380,name=Core (Q&A),tiebreaker=:RedisMaster,abortConnect=False", Writer)) - { - Log(""); - Log("pausing..."); - await Task.Delay(200).ForAwait(); - Log("second:"); - - bool result = conn.Configure(Writer); - Log("Returned: {0}", result); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/RealWorldTests.cs b/tests/StackExchange.Redis.Tests/RealWorldTests.cs new file mode 100644 index 000000000..ba9605b4f --- /dev/null +++ b/tests/StackExchange.Redis.Tests/RealWorldTests.cs @@ -0,0 +1,28 @@ +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class RealWorldTests(ITestOutputHelper output) : TestBase(output) +{ + [Fact] + public async Task WhyDoesThisNotWork() + { + Log("first:"); + var config = ConfigurationOptions.Parse("localhost:6379,localhost:6380,name=Core (Q&A),tiebreaker=:RedisPrimary,abortConnect=False"); + Assert.Equal(2, config.EndPoints.Count); + Log("Endpoint 0: {0} (AddressFamily: {1})", config.EndPoints[0], config.EndPoints[0].AddressFamily); + Log("Endpoint 1: {0} (AddressFamily: {1})", config.EndPoints[1], config.EndPoints[1].AddressFamily); + + await using (var conn = ConnectionMultiplexer.Connect("localhost:6379,localhost:6380,name=Core (Q&A),tiebreaker=:RedisPrimary,abortConnect=False", Writer)) + { + Log(""); + Log("pausing..."); + await Task.Delay(200).ForAwait(); + Log("second:"); + + bool result = conn.Configure(Writer); + Log("Returned: {0}", result); + } + } +} diff --git a/tests/StackExchange.Redis.Tests/RedisFeaturesTests.cs b/tests/StackExchange.Redis.Tests/RedisFeaturesTests.cs index 2bd492ab9..2cabb90b4 100644 --- a/tests/StackExchange.Redis.Tests/RedisFeaturesTests.cs +++ b/tests/StackExchange.Redis.Tests/RedisFeaturesTests.cs @@ -1,30 +1,29 @@ using System; using Xunit; -namespace StackExchange.Redis.Tests +namespace StackExchange.Redis.Tests; + +public class RedisFeaturesTests { - public class RedisFeaturesTests + [Fact] + public void ExecAbort() // a random one because it is fun { - [Fact] - public void ExecAbort() // a random one because it is fun - { - var features = new RedisFeatures(new Version(2, 9)); - var s = features.ToString(); - Assert.True(features.ExecAbort); - Assert.StartsWith("Features in 2.9" + Environment.NewLine, s); - Assert.Contains("ExecAbort: True" + Environment.NewLine, s); + var features = new RedisFeatures(new Version(2, 9)); + var s = features.ToString(); + Assert.True(features.ExecAbort); + Assert.StartsWith("Features in 2.9" + Environment.NewLine, s); + Assert.Contains("ExecAbort: True" + Environment.NewLine, s); - features = new RedisFeatures(new Version(2, 9, 5)); - s = features.ToString(); - Assert.False(features.ExecAbort); - Assert.StartsWith("Features in 2.9.5" + Environment.NewLine, s); - Assert.Contains("ExecAbort: False" + Environment.NewLine, s); + features = new RedisFeatures(new Version(2, 9, 5)); + s = features.ToString(); + Assert.False(features.ExecAbort); + Assert.StartsWith("Features in 2.9.5" + Environment.NewLine, s); + Assert.Contains("ExecAbort: False" + Environment.NewLine, s); - features = new RedisFeatures(new Version(3, 0)); - s = features.ToString(); - Assert.True(features.ExecAbort); - Assert.StartsWith("Features in 3.0" + Environment.NewLine, s); - Assert.Contains("ExecAbort: True" + Environment.NewLine, s); - } + features = new RedisFeatures(new Version(3, 0)); + s = features.ToString(); + Assert.True(features.ExecAbort); + Assert.StartsWith("Features in 3.0" + Environment.NewLine, s); + Assert.Contains("ExecAbort: True" + Environment.NewLine, s); } } diff --git a/tests/StackExchange.Redis.Tests/RedisResultTests.cs b/tests/StackExchange.Redis.Tests/RedisResultTests.cs index fb27c6745..e63e00dc8 100644 --- a/tests/StackExchange.Redis.Tests/RedisResultTests.cs +++ b/tests/StackExchange.Redis.Tests/RedisResultTests.cs @@ -2,154 +2,196 @@ using System.Collections.Generic; using Xunit; -namespace StackExchange.Redis.Tests +namespace StackExchange.Redis.Tests; + +/// +/// Tests for . +/// +public sealed class RedisResultTests { /// - /// Tests for + /// Tests the basic functionality of . + /// + [Fact] + public void ToDictionaryWorks() + { + var redisArrayResult = RedisResult.Create( + ["one", 1, "two", 2, "three", 3, "four", 4]); + + var dict = redisArrayResult.ToDictionary(); + + Assert.Equal(4, dict.Count); + Assert.Equal(1, (RedisValue)dict["one"]); + Assert.Equal(2, (RedisValue)dict["two"]); + Assert.Equal(3, (RedisValue)dict["three"]); + Assert.Equal(4, (RedisValue)dict["four"]); + } + + /// + /// Tests the basic functionality of + /// when the results contain a nested results array, which is common for lua script results. + /// + [Fact] + public void ToDictionaryWorksWhenNested() + { + var redisArrayResult = RedisResult.Create( + [ + RedisResult.Create((RedisValue)"one"), + RedisResult.Create(["two", 2, "three", 3]), + + RedisResult.Create((RedisValue)"four"), + RedisResult.Create(["five", 5, "six", 6]), + ]); + + var dict = redisArrayResult.ToDictionary(); + var nestedDict = dict["one"].ToDictionary(); + + Assert.Equal(2, dict.Count); + Assert.Equal(2, nestedDict.Count); + Assert.Equal(2, (RedisValue)nestedDict["two"]); + Assert.Equal(3, (RedisValue)nestedDict["three"]); + } + + /// + /// Tests that fails when a duplicate key is encountered. + /// This also tests that the default comparator is case-insensitive. + /// + [Fact] + public void ToDictionaryFailsWithDuplicateKeys() + { + var redisArrayResult = RedisResult.Create( + ["banana", 1, "BANANA", 2, "orange", 3, "apple", 4]); + + Assert.Throws(() => redisArrayResult.ToDictionary(/* Use default comparer, causes collision of banana */)); + } + + /// + /// Tests that correctly uses the provided comparator. + /// + [Fact] + public void ToDictionaryWorksWithCustomComparator() + { + var redisArrayResult = RedisResult.Create( + ["banana", 1, "BANANA", 2, "orange", 3, "apple", 4]); + + var dict = redisArrayResult.ToDictionary(StringComparer.Ordinal); + + Assert.Equal(4, dict.Count); + Assert.Equal(1, (RedisValue)dict["banana"]); + Assert.Equal(2, (RedisValue)dict["BANANA"]); + } + + /// + /// Tests that fails when the redis results array contains an odd number + /// of elements. In other words, it's not actually a Key,Value,Key,Value... etc. array. /// - public sealed class RedisResultTests + [Fact] + public void ToDictionaryFailsOnMishapenResults() + { + var redisArrayResult = RedisResult.Create( + ["one", 1, "two", 2, "three", 3, "four" /* missing 4 */]); + + Assert.Throws(() => redisArrayResult.ToDictionary(StringComparer.Ordinal)); + } + + [Fact] + public void SingleResultConvertibleViaTo() + { + var value = RedisResult.Create(123); + Assert.StrictEqual((int)123, Convert.ToInt32(value)); + Assert.StrictEqual((uint)123U, Convert.ToUInt32(value)); + Assert.StrictEqual(123L, Convert.ToInt64(value)); + Assert.StrictEqual(123UL, Convert.ToUInt64(value)); + Assert.StrictEqual((byte)123, Convert.ToByte(value)); + Assert.StrictEqual((sbyte)123, Convert.ToSByte(value)); + Assert.StrictEqual((short)123, Convert.ToInt16(value)); + Assert.StrictEqual((ushort)123, Convert.ToUInt16(value)); + Assert.Equal("123", Convert.ToString(value)); + Assert.StrictEqual(123M, Convert.ToDecimal(value)); + Assert.StrictEqual((char)123, Convert.ToChar(value)); + Assert.StrictEqual(123f, Convert.ToSingle(value)); + Assert.StrictEqual(123d, Convert.ToDouble(value)); + } + + [Fact] + public void SingleResultConvertibleDirectViaChangeType_Type() + { + var value = RedisResult.Create(123); + Assert.StrictEqual((int)123, Convert.ChangeType(value, typeof(int))); + Assert.StrictEqual((uint)123U, Convert.ChangeType(value, typeof(uint))); + Assert.StrictEqual(123L, Convert.ChangeType(value, typeof(long))); + Assert.StrictEqual(123UL, Convert.ChangeType(value, typeof(ulong))); + Assert.StrictEqual((byte)123, Convert.ChangeType(value, typeof(byte))); + Assert.StrictEqual((sbyte)123, Convert.ChangeType(value, typeof(sbyte))); + Assert.StrictEqual((short)123, Convert.ChangeType(value, typeof(short))); + Assert.StrictEqual((ushort)123, Convert.ChangeType(value, typeof(ushort))); + Assert.Equal("123", Convert.ChangeType(value, typeof(string))); + Assert.StrictEqual(123M, Convert.ChangeType(value, typeof(decimal))); + Assert.StrictEqual((char)123, Convert.ChangeType(value, typeof(char))); + Assert.StrictEqual(123f, Convert.ChangeType(value, typeof(float))); + Assert.StrictEqual(123d, Convert.ChangeType(value, typeof(double))); + } + + [Fact] + public void SingleResultConvertibleDirectViaChangeType_TypeCode() + { + var value = RedisResult.Create(123); + Assert.StrictEqual((int)123, Convert.ChangeType(value, TypeCode.Int32)); + Assert.StrictEqual((uint)123U, Convert.ChangeType(value, TypeCode.UInt32)); + Assert.StrictEqual(123L, Convert.ChangeType(value, TypeCode.Int64)); + Assert.StrictEqual(123UL, Convert.ChangeType(value, TypeCode.UInt64)); + Assert.StrictEqual((byte)123, Convert.ChangeType(value, TypeCode.Byte)); + Assert.StrictEqual((sbyte)123, Convert.ChangeType(value, TypeCode.SByte)); + Assert.StrictEqual((short)123, Convert.ChangeType(value, TypeCode.Int16)); + Assert.StrictEqual((ushort)123, Convert.ChangeType(value, TypeCode.UInt16)); + Assert.Equal("123", Convert.ChangeType(value, TypeCode.String)); + Assert.StrictEqual(123M, Convert.ChangeType(value, TypeCode.Decimal)); + Assert.StrictEqual((char)123, Convert.ChangeType(value, TypeCode.Char)); + Assert.StrictEqual(123f, Convert.ChangeType(value, TypeCode.Single)); + Assert.StrictEqual(123d, Convert.ChangeType(value, TypeCode.Double)); + } + + [Theory] + [InlineData(ResultType.Double)] + [InlineData(ResultType.BulkString)] + [InlineData(ResultType.SimpleString)] + public void RedisResultParseNaN(ResultType resultType) + { + // https://github.com/redis/NRedisStack/issues/439 + var value = RedisResult.Create("NaN", resultType); + Assert.True(double.IsNaN(value.AsDouble())); + } + + [Theory] + [InlineData(ResultType.Double)] + [InlineData(ResultType.BulkString)] + [InlineData(ResultType.SimpleString)] + public void RedisResultParseInf(ResultType resultType) + { + // https://github.com/redis/NRedisStack/issues/439 + var value = RedisResult.Create("inf", resultType); + Assert.True(double.IsPositiveInfinity(value.AsDouble())); + } + + [Theory] + [InlineData(ResultType.Double)] + [InlineData(ResultType.BulkString)] + [InlineData(ResultType.SimpleString)] + public void RedisResultParsePlusInf(ResultType resultType) + { + // https://github.com/redis/NRedisStack/issues/439 + var value = RedisResult.Create("+inf", resultType); + Assert.True(double.IsPositiveInfinity(value.AsDouble())); + } + + [Theory] + [InlineData(ResultType.Double)] + [InlineData(ResultType.BulkString)] + [InlineData(ResultType.SimpleString)] + public void RedisResultParseMinusInf(ResultType resultType) { - /// - /// Tests the basic functionality of - /// - [Fact] - public void ToDictionaryWorks() - { - var redisArrayResult = RedisResult.Create( - new RedisValue[] { "one", 1, "two", 2, "three", 3, "four", 4 }); - - var dict = redisArrayResult.ToDictionary(); - - Assert.Equal(4, dict.Count); - Assert.Equal(1, (RedisValue)dict["one"]); - Assert.Equal(2, (RedisValue)dict["two"]); - Assert.Equal(3, (RedisValue)dict["three"]); - Assert.Equal(4, (RedisValue)dict["four"]); - } - - /// - /// Tests the basic functionality of - /// when the results contain a nested results array, which is common for lua script results - /// - [Fact] - public void ToDictionaryWorksWhenNested() - { - var redisArrayResult = RedisResult.Create( - new [] - { - RedisResult.Create((RedisValue)"one"), - RedisResult.Create(new RedisValue[]{"two", 2, "three", 3}), - - RedisResult.Create((RedisValue)"four"), - RedisResult.Create(new RedisValue[] { "five", 5, "six", 6 }), - }); - - var dict = redisArrayResult.ToDictionary(); - var nestedDict = dict["one"].ToDictionary(); - - Assert.Equal(2, dict.Count); - Assert.Equal(2, nestedDict.Count); - Assert.Equal(2, (RedisValue)nestedDict["two"]); - Assert.Equal(3, (RedisValue)nestedDict["three"]); - } - - /// - /// Tests that fails when a duplicate key is encountered. - /// This also tests that the default comparator is case-insensitive. - /// - [Fact] - public void ToDictionaryFailsWithDuplicateKeys() - { - var redisArrayResult = RedisResult.Create( - new RedisValue[] { "banana", 1, "BANANA", 2, "orange", 3, "apple", 4 }); - - Assert.Throws(() => redisArrayResult.ToDictionary(/* Use default comparer, causes collision of banana */)); - } - - /// - /// Tests that correctly uses the provided comparator - /// - [Fact] - public void ToDictionaryWorksWithCustomComparator() - { - var redisArrayResult = RedisResult.Create( - new RedisValue[] { "banana", 1, "BANANA", 2, "orange", 3, "apple", 4 }); - - var dict = redisArrayResult.ToDictionary(StringComparer.Ordinal); - - Assert.Equal(4, dict.Count); - Assert.Equal(1, (RedisValue)dict["banana"]); - Assert.Equal(2, (RedisValue)dict["BANANA"]); - } - - /// - /// Tests that fails when the redis results array contains an odd number - /// of elements. In other words, it's not actually a Key,Value,Key,Value... etc. array - /// - [Fact] - public void ToDictionaryFailsOnMishapenResults() - { - var redisArrayResult = RedisResult.Create( - new RedisValue[] { "one", 1, "two", 2, "three", 3, "four" /* missing 4 */ }); - - Assert.Throws(()=>redisArrayResult.ToDictionary(StringComparer.Ordinal)); - } - - [Fact] - public void SingleResultConvertibleViaTo() - { - var value = RedisResult.Create(123); - Assert.StrictEqual((int)123, Convert.ToInt32(value)); - Assert.StrictEqual((uint)123U, Convert.ToUInt32(value)); - Assert.StrictEqual((long)123, Convert.ToInt64(value)); - Assert.StrictEqual((ulong)123U, Convert.ToUInt64(value)); - Assert.StrictEqual((byte)123, Convert.ToByte(value)); - Assert.StrictEqual((sbyte)123, Convert.ToSByte(value)); - Assert.StrictEqual((short)123, Convert.ToInt16(value)); - Assert.StrictEqual((ushort)123, Convert.ToUInt16(value)); - Assert.Equal("123", Convert.ToString(value)); - Assert.StrictEqual(123M, Convert.ToDecimal(value)); - Assert.StrictEqual((char)123, Convert.ToChar(value)); - Assert.StrictEqual(123f, Convert.ToSingle(value)); - Assert.StrictEqual(123d, Convert.ToDouble(value)); - } - - [Fact] - public void SingleResultConvertibleDirectViaChangeType_Type() - { - var value = RedisResult.Create(123); - Assert.StrictEqual((int)123, Convert.ChangeType(value, typeof(int))); - Assert.StrictEqual((uint)123U, Convert.ChangeType(value, typeof(uint))); - Assert.StrictEqual((long)123, Convert.ChangeType(value, typeof(long))); - Assert.StrictEqual((ulong)123U, Convert.ChangeType(value, typeof(ulong))); - Assert.StrictEqual((byte)123, Convert.ChangeType(value, typeof(byte))); - Assert.StrictEqual((sbyte)123, Convert.ChangeType(value, typeof(sbyte))); - Assert.StrictEqual((short)123, Convert.ChangeType(value, typeof(short))); - Assert.StrictEqual((ushort)123, Convert.ChangeType(value, typeof(ushort))); - Assert.Equal("123", Convert.ChangeType(value, typeof(string))); - Assert.StrictEqual(123M, Convert.ChangeType(value, typeof(decimal))); - Assert.StrictEqual((char)123, Convert.ChangeType(value, typeof(char))); - Assert.StrictEqual(123f, Convert.ChangeType(value, typeof(float))); - Assert.StrictEqual(123d, Convert.ChangeType(value, typeof(double))); - } - - [Fact] - public void SingleResultConvertibleDirectViaChangeType_TypeCode() - { - var value = RedisResult.Create(123); - Assert.StrictEqual((int)123, Convert.ChangeType(value, TypeCode.Int32)); - Assert.StrictEqual((uint)123U, Convert.ChangeType(value, TypeCode.UInt32)); - Assert.StrictEqual((long)123, Convert.ChangeType(value, TypeCode.Int64)); - Assert.StrictEqual((ulong)123U, Convert.ChangeType(value, TypeCode.UInt64)); - Assert.StrictEqual((byte)123, Convert.ChangeType(value, TypeCode.Byte)); - Assert.StrictEqual((sbyte)123, Convert.ChangeType(value, TypeCode.SByte)); - Assert.StrictEqual((short)123, Convert.ChangeType(value, TypeCode.Int16)); - Assert.StrictEqual((ushort)123, Convert.ChangeType(value, TypeCode.UInt16)); - Assert.Equal("123", Convert.ChangeType(value, TypeCode.String)); - Assert.StrictEqual(123M, Convert.ChangeType(value, TypeCode.Decimal)); - Assert.StrictEqual((char)123, Convert.ChangeType(value, TypeCode.Char)); - Assert.StrictEqual(123f, Convert.ChangeType(value, TypeCode.Single)); - Assert.StrictEqual(123d, Convert.ChangeType(value, TypeCode.Double)); - } + // https://github.com/redis/NRedisStack/issues/439 + var value = RedisResult.Create("-inf", resultType); + Assert.True(double.IsNegativeInfinity(value.AsDouble())); } } diff --git a/tests/StackExchange.Redis.Tests/TestConfig.json b/tests/StackExchange.Redis.Tests/RedisTestConfig.json similarity index 75% rename from tests/StackExchange.Redis.Tests/TestConfig.json rename to tests/StackExchange.Redis.Tests/RedisTestConfig.json index 460c7c5ca..c652a4583 100644 --- a/tests/StackExchange.Redis.Tests/TestConfig.json +++ b/tests/StackExchange.Redis.Tests/RedisTestConfig.json @@ -1,6 +1,6 @@ { //"LogToConsole": false, - //"MasterServer": "[::1]", + //"PrimaryServer": "[::1]", //"ReplicaServer": "[::1]", //"SecureServer": "[::1]" } \ No newline at end of file diff --git a/tests/StackExchange.Redis.Tests/RedisValueEquivalency.cs b/tests/StackExchange.Redis.Tests/RedisValueEquivalency.cs deleted file mode 100644 index 834104a78..000000000 --- a/tests/StackExchange.Redis.Tests/RedisValueEquivalency.cs +++ /dev/null @@ -1,282 +0,0 @@ -using System.Runtime.CompilerServices; -using System.Text; -using Xunit; - -namespace StackExchange.Redis.Tests -{ - public class RedisValueEquivalency - { - // internal storage types: null, integer, double, string, raw - // public perceived types: int, long, double, bool, memory / byte[] - - [Fact] - public void Int32_Matrix() - { - void Check(RedisValue known, RedisValue test) - { - KeysAndValues.CheckSame(known, test); - if (known.IsNull) - { - Assert.True(test.IsNull); - Assert.False(((int?)test).HasValue); - } - else - { - Assert.False(test.IsNull); - Assert.Equal((int)known, ((int?)test).Value); - Assert.Equal((int)known, (int)test); - } - Assert.Equal((int)known, (int)test); - } - Check(42, 42); - Check(42, 42.0); - Check(42, "42"); - Check(42, "42.0"); - Check(42, Bytes("42")); - Check(42, Bytes("42.0")); - CheckString(42, "42"); - - Check(-42, -42); - Check(-42, -42.0); - Check(-42, "-42"); - Check(-42, "-42.0"); - Check(-42, Bytes("-42")); - Check(-42, Bytes("-42.0")); - CheckString(-42, "-42"); - - Check(1, true); - Check(0, false); - } - - [Fact] - public void Int64_Matrix() - { - void Check(RedisValue known, RedisValue test) - { - KeysAndValues.CheckSame(known, test); - if (known.IsNull) - { - Assert.True(test.IsNull); - Assert.False(((long?)test).HasValue); - } - else - { - Assert.False(test.IsNull); - Assert.Equal((long)known, ((long?)test).Value); - Assert.Equal((long)known, (long)test); - } - Assert.Equal((long)known, (long)test); - } - Check(1099511627848, 1099511627848); - Check(1099511627848, 1099511627848.0); - Check(1099511627848, "1099511627848"); - Check(1099511627848, "1099511627848.0"); - Check(1099511627848, Bytes("1099511627848")); - Check(1099511627848, Bytes("1099511627848.0")); - CheckString(1099511627848, "1099511627848"); - - Check(-1099511627848, -1099511627848); - Check(-1099511627848, -1099511627848); - Check(-1099511627848, "-1099511627848"); - Check(-1099511627848, "-1099511627848.0"); - Check(-1099511627848, Bytes("-1099511627848")); - Check(-1099511627848, Bytes("-1099511627848.0")); - CheckString(-1099511627848, "-1099511627848"); - - Check(1L, true); - Check(0L, false); - } - - [Fact] - public void Double_Matrix() - { - void Check(RedisValue known, RedisValue test) - { - KeysAndValues.CheckSame(known, test); - if (known.IsNull) - { - Assert.True(test.IsNull); - Assert.False(((double?)test).HasValue); - } - else - { - Assert.False(test.IsNull); - Assert.Equal((double)known, ((double?)test).Value); - Assert.Equal((double)known, (double)test); - } - Assert.Equal((double)known, (double)test); - } - Check(1099511627848.0, 1099511627848); - Check(1099511627848.0, 1099511627848.0); - Check(1099511627848.0, "1099511627848"); - Check(1099511627848.0, "1099511627848.0"); - Check(1099511627848.0, Bytes("1099511627848")); - Check(1099511627848.0, Bytes("1099511627848.0")); - CheckString(1099511627848.0, "1099511627848"); - - Check(-1099511627848.0, -1099511627848); - Check(-1099511627848.0, -1099511627848); - Check(-1099511627848.0, "-1099511627848"); - Check(-1099511627848.0, "-1099511627848.0"); - Check(-1099511627848.0, Bytes("-1099511627848")); - Check(-1099511627848.0, Bytes("-1099511627848.0")); - CheckString(-1099511627848.0, "-1099511627848"); - - Check(1.0, true); - Check(0.0, false); - - Check(1099511627848.6001, 1099511627848.6001); - Check(1099511627848.6001, "1099511627848.6001"); - Check(1099511627848.6001, Bytes("1099511627848.6001")); - CheckString(1099511627848.6001, "1099511627848.6001"); - - Check(-1099511627848.6001, -1099511627848.6001); - Check(-1099511627848.6001, "-1099511627848.6001"); - Check(-1099511627848.6001, Bytes("-1099511627848.6001")); - CheckString(-1099511627848.6001, "-1099511627848.6001"); - - Check(double.NegativeInfinity, double.NegativeInfinity); - Check(double.NegativeInfinity, "-inf"); - CheckString(double.NegativeInfinity, "-inf"); - - Check(double.PositiveInfinity, double.PositiveInfinity); - Check(double.PositiveInfinity, "+inf"); - CheckString(double.PositiveInfinity, "+inf"); - } - - private static void CheckString(RedisValue value, string expected) - { - var s = value.ToString(); - Assert.True(s == expected, $"'{s}' vs '{expected}'"); - } - - private static byte[] Bytes(string s) => s == null ? null : Encoding.UTF8.GetBytes(s); - - private string LineNumber([CallerLineNumber] int lineNumber = 0) => lineNumber.ToString(); - - [Fact] - public void RedisValueStartsWith() - { - // test strings - RedisValue x = "abc"; - Assert.True(x.StartsWith("a"), LineNumber()); - Assert.True(x.StartsWith("ab"), LineNumber()); - Assert.True(x.StartsWith("abc"), LineNumber()); - Assert.False(x.StartsWith("abd"), LineNumber()); - Assert.False(x.StartsWith("abcd"), LineNumber()); - Assert.False(x.StartsWith(123), LineNumber()); - Assert.False(x.StartsWith(false), LineNumber()); - - // test binary - x = Encoding.ASCII.GetBytes("abc"); - Assert.True(x.StartsWith("a"), LineNumber()); - Assert.True(x.StartsWith("ab"), LineNumber()); - Assert.True(x.StartsWith("abc"), LineNumber()); - Assert.False(x.StartsWith("abd"), LineNumber()); - Assert.False(x.StartsWith("abcd"), LineNumber()); - Assert.False(x.StartsWith(123), LineNumber()); - Assert.False(x.StartsWith(false), LineNumber()); - - Assert.True(x.StartsWith(Encoding.ASCII.GetBytes("a")), LineNumber()); - Assert.True(x.StartsWith(Encoding.ASCII.GetBytes("ab")), LineNumber()); - Assert.True(x.StartsWith(Encoding.ASCII.GetBytes("abc")), LineNumber()); - Assert.False(x.StartsWith(Encoding.ASCII.GetBytes("abd")), LineNumber()); - Assert.False(x.StartsWith(Encoding.ASCII.GetBytes("abcd")), LineNumber()); - - x = 10; // integers are effectively strings in this context - Assert.True(x.StartsWith(1), LineNumber()); - Assert.True(x.StartsWith(10), LineNumber()); - Assert.False(x.StartsWith(100), LineNumber()); - } - - [Fact] - public void TryParseInt64() - { - Assert.True(((RedisValue)123).TryParse(out long l)); - Assert.Equal(123, l); - - Assert.True(((RedisValue)123.0).TryParse(out l)); - Assert.Equal(123, l); - - Assert.True(((RedisValue)(int.MaxValue + 123L)).TryParse(out l)); - Assert.Equal(int.MaxValue + 123L, l); - - Assert.True(((RedisValue)"123").TryParse(out l)); - Assert.Equal(123, l); - - Assert.True(((RedisValue)(-123)).TryParse(out l)); - Assert.Equal(-123, l); - - Assert.True(default(RedisValue).TryParse(out l)); - Assert.Equal(0, l); - - Assert.True(((RedisValue)123.0).TryParse(out l)); - Assert.Equal(123, l); - - Assert.False(((RedisValue)"abc").TryParse(out l)); - Assert.False(((RedisValue)"123.1").TryParse(out l)); - Assert.False(((RedisValue)123.1).TryParse(out l)); - } - - [Fact] - public void TryParseInt32() - { - Assert.True(((RedisValue)123).TryParse(out int i)); - Assert.Equal(123, i); - - Assert.True(((RedisValue)123.0).TryParse(out i)); - Assert.Equal(123, i); - - Assert.False(((RedisValue)(int.MaxValue + 123L)).TryParse(out i)); - - Assert.True(((RedisValue)"123").TryParse(out i)); - Assert.Equal(123, i); - - Assert.True(((RedisValue)(-123)).TryParse(out i)); - Assert.Equal(-123, i); - - Assert.True(default(RedisValue).TryParse(out i)); - Assert.Equal(0, i); - - Assert.True(((RedisValue)123.0).TryParse(out i)); - Assert.Equal(123, i); - - Assert.False(((RedisValue)"abc").TryParse(out i)); - Assert.False(((RedisValue)"123.1").TryParse(out i)); - Assert.False(((RedisValue)123.1).TryParse(out i)); - } - - [Fact] - public void TryParseDouble() - { - Assert.True(((RedisValue)123).TryParse(out double d)); - Assert.Equal(123, d); - - Assert.True(((RedisValue)123.0).TryParse(out d)); - Assert.Equal(123.0, d); - - Assert.True(((RedisValue)123.1).TryParse(out d)); - Assert.Equal(123.1, d); - - Assert.True(((RedisValue)(int.MaxValue + 123L)).TryParse(out d)); - Assert.Equal(int.MaxValue + 123L, d); - - Assert.True(((RedisValue)"123").TryParse(out d)); - Assert.Equal(123.0, d); - - Assert.True(((RedisValue)(-123)).TryParse(out d)); - Assert.Equal(-123.0, d); - - Assert.True(default(RedisValue).TryParse(out d)); - Assert.Equal(0.0, d); - - Assert.True(((RedisValue)123.0).TryParse(out d)); - Assert.Equal(123.0, d); - - Assert.True(((RedisValue)"123.1").TryParse(out d)); - Assert.Equal(123.1, d); - - Assert.False(((RedisValue)"abc").TryParse(out d)); - } - } -} diff --git a/tests/StackExchange.Redis.Tests/RedisValueEquivalencyTests.cs b/tests/StackExchange.Redis.Tests/RedisValueEquivalencyTests.cs new file mode 100644 index 000000000..391a0237a --- /dev/null +++ b/tests/StackExchange.Redis.Tests/RedisValueEquivalencyTests.cs @@ -0,0 +1,455 @@ +using System; +using System.Runtime.CompilerServices; +using System.Text; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class RedisValueEquivalency +{ + // internal storage types: null, integer, double, string, raw + // public perceived types: int, long, double, bool, memory / byte[] + [Fact] + public void Int32_Matrix() + { + static void Check(RedisValue known, RedisValue test) + { + KeyAndValueTests.CheckSame(known, test); + if (known.IsNull) + { + Assert.True(test.IsNull); + Assert.False(((int?)test).HasValue); + } + else + { + Assert.False(test.IsNull); + Assert.Equal((int)known, ((int?)test)!.Value); + Assert.Equal((int)known, (int)test); + } + Assert.Equal((int)known, (int)test); + } + Check(42, 42); + Check(42, 42.0); + Check(42, "42"); + Check(42, "42.0"); + Check(42, Bytes("42")); + Check(42, Bytes("42.0")); + CheckString(42, "42"); + + Check(-42, -42); + Check(-42, -42.0); + Check(-42, "-42"); + Check(-42, "-42.0"); + Check(-42, Bytes("-42")); + Check(-42, Bytes("-42.0")); + CheckString(-42, "-42"); + + Check(1, true); + Check(0, false); + } + + [Fact] + public void Int64_Matrix() + { + static void Check(RedisValue known, RedisValue test) + { + KeyAndValueTests.CheckSame(known, test); + if (known.IsNull) + { + Assert.True(test.IsNull); + Assert.False(((long?)test).HasValue); + } + else + { + Assert.False(test.IsNull); + Assert.Equal((long)known, ((long?)test!).Value); + Assert.Equal((long)known, (long)test); + } + Assert.Equal((long)known, (long)test); + } + Check(1099511627848, 1099511627848); + Check(1099511627848, 1099511627848.0); + Check(1099511627848, "1099511627848"); + Check(1099511627848, "1099511627848.0"); + Check(1099511627848, Bytes("1099511627848")); + Check(1099511627848, Bytes("1099511627848.0")); + CheckString(1099511627848, "1099511627848"); + + Check(-1099511627848, -1099511627848); + Check(-1099511627848, -1099511627848); + Check(-1099511627848, "-1099511627848"); + Check(-1099511627848, "-1099511627848.0"); + Check(-1099511627848, Bytes("-1099511627848")); + Check(-1099511627848, Bytes("-1099511627848.0")); + CheckString(-1099511627848, "-1099511627848"); + + Check(1L, true); + Check(0L, false); + } + + [Fact] + public void Double_Matrix() + { + static void Check(RedisValue known, RedisValue test) + { + KeyAndValueTests.CheckSame(known, test); + if (known.IsNull) + { + Assert.True(test.IsNull); + Assert.False(((double?)test).HasValue); + } + else + { + Assert.False(test.IsNull); + Assert.Equal((double)known, ((double?)test)!.Value); + Assert.Equal((double)known, (double)test); + } + Assert.Equal((double)known, (double)test); + } + Check(1099511627848.0, 1099511627848); + Check(1099511627848.0, 1099511627848.0); + Check(1099511627848.0, "1099511627848"); + Check(1099511627848.0, "1099511627848.0"); + Check(1099511627848.0, Bytes("1099511627848")); + Check(1099511627848.0, Bytes("1099511627848.0")); + CheckString(1099511627848.0, "1099511627848"); + + Check(-1099511627848.0, -1099511627848); + Check(-1099511627848.0, -1099511627848); + Check(-1099511627848.0, "-1099511627848"); + Check(-1099511627848.0, "-1099511627848.0"); + Check(-1099511627848.0, Bytes("-1099511627848")); + Check(-1099511627848.0, Bytes("-1099511627848.0")); + CheckString(-1099511627848.0, "-1099511627848"); + + Check(1.0, true); + Check(0.0, false); + + Check(1099511627848.6001, 1099511627848.6001); + Check(1099511627848.6001, "1099511627848.6001"); + Check(1099511627848.6001, Bytes("1099511627848.6001")); + CheckString(1099511627848.6001, "1099511627848.6001"); + + Check(-1099511627848.6001, -1099511627848.6001); + Check(-1099511627848.6001, "-1099511627848.6001"); + Check(-1099511627848.6001, Bytes("-1099511627848.6001")); + CheckString(-1099511627848.6001, "-1099511627848.6001"); + + Check(double.NegativeInfinity, double.NegativeInfinity); + CheckString(double.NegativeInfinity, "-inf"); + + Check(double.PositiveInfinity, double.PositiveInfinity); + CheckString(double.PositiveInfinity, "+inf"); + + Check(double.NaN, double.NaN); + CheckString(double.NaN, "NaN"); + } + + [Theory] + [InlineData("na")] + [InlineData("nan")] + [InlineData("nans")] + [InlineData("in")] + [InlineData("inf")] + [InlineData("info")] + public void SpecialCaseEqualityRules_String(string value) + { + RedisValue x = value, y = value; + Assert.Equal(x, y); + + Assert.True(x.Equals(y)); + Assert.True(y.Equals(x)); + Assert.True(x == y); + Assert.True(y == x); + Assert.False(x != y); + Assert.False(y != x); + Assert.Equal(x.GetHashCode(), y.GetHashCode()); + } + + [Theory] + [InlineData("na")] + [InlineData("nan")] + [InlineData("nans")] + [InlineData("in")] + [InlineData("inf")] + [InlineData("info")] + public void SpecialCaseEqualityRules_Bytes(string value) + { + byte[] bytes0 = Encoding.UTF8.GetBytes(value), + bytes1 = Encoding.UTF8.GetBytes(value); + Assert.NotSame(bytes0, bytes1); + RedisValue x = bytes0, y = bytes1; + + Assert.True(x.Equals(y)); + Assert.True(y.Equals(x)); + Assert.True(x == y); + Assert.True(y == x); + Assert.False(x != y); + Assert.False(y != x); + Assert.Equal(x.GetHashCode(), y.GetHashCode()); + } + + [Theory] + [InlineData("na")] + [InlineData("nan")] + [InlineData("nans")] + [InlineData("in")] + [InlineData("inf")] + [InlineData("info")] + public void SpecialCaseEqualityRules_Hybrid(string value) + { + byte[] bytes = Encoding.UTF8.GetBytes(value); + RedisValue x = bytes, y = value; + + Assert.True(x.Equals(y)); + Assert.True(y.Equals(x)); + Assert.True(x == y); + Assert.True(y == x); + Assert.False(x != y); + Assert.False(y != x); + Assert.Equal(x.GetHashCode(), y.GetHashCode()); + } + + [Theory] + [InlineData("na", "NA")] + [InlineData("nan", "NAN")] + [InlineData("nans", "NANS")] + [InlineData("in", "IN")] + [InlineData("inf", "INF")] + [InlineData("info", "INFO")] + public void SpecialCaseNonEqualityRules_String(string s, string t) + { + RedisValue x = s, y = t; + Assert.False(x.Equals(y)); + Assert.False(y.Equals(x)); + Assert.False(x == y); + Assert.False(y == x); + Assert.True(x != y); + Assert.True(y != x); + } + + [Theory] + [InlineData("na", "NA")] + [InlineData("nan", "NAN")] + [InlineData("nans", "NANS")] + [InlineData("in", "IN")] + [InlineData("inf", "INF")] + [InlineData("info", "INFO")] + public void SpecialCaseNonEqualityRules_Bytes(string s, string t) + { + RedisValue x = Encoding.UTF8.GetBytes(s), y = Encoding.UTF8.GetBytes(t); + Assert.False(x.Equals(y)); + Assert.False(y.Equals(x)); + Assert.False(x == y); + Assert.False(y == x); + Assert.True(x != y); + Assert.True(y != x); + } + + [Theory] + [InlineData("na", "NA")] + [InlineData("nan", "NAN")] + [InlineData("nans", "NANS")] + [InlineData("in", "IN")] + [InlineData("inf", "INF")] + [InlineData("info", "INFO")] + public void SpecialCaseNonEqualityRules_Hybrid(string s, string t) + { + RedisValue x = s, y = Encoding.UTF8.GetBytes(t); + Assert.False(x.Equals(y)); + Assert.False(y.Equals(x)); + Assert.False(x == y); + Assert.False(y == x); + Assert.True(x != y); + Assert.True(y != x); + } + + private static void CheckString(RedisValue value, string expected) + { + var s = value.ToString(); + Assert.True(s == expected, $"'{s}' vs '{expected}'"); + } + + private static byte[]? Bytes(string? s) => s == null ? null : Encoding.UTF8.GetBytes(s); + + private static string LineNumber([CallerLineNumber] int lineNumber = 0) => lineNumber.ToString(); + + [Fact] + public void RedisValueStartsWith() + { + // test strings + RedisValue x = "abc"; + Assert.True(x.StartsWith("a"), LineNumber()); + Assert.True(x.StartsWith("ab"), LineNumber()); + Assert.True(x.StartsWith("abc"), LineNumber()); + Assert.False(x.StartsWith("abd"), LineNumber()); + Assert.False(x.StartsWith("abcd"), LineNumber()); + Assert.False(x.StartsWith(123), LineNumber()); + Assert.False(x.StartsWith(false), LineNumber()); + + // test binary + x = Encoding.ASCII.GetBytes("abc"); + Assert.True(x.StartsWith("a"), LineNumber()); + Assert.True(x.StartsWith("ab"), LineNumber()); + Assert.True(x.StartsWith("abc"), LineNumber()); + Assert.False(x.StartsWith("abd"), LineNumber()); + Assert.False(x.StartsWith("abcd"), LineNumber()); + Assert.False(x.StartsWith(123), LineNumber()); + Assert.False(x.StartsWith(false), LineNumber()); + + Assert.True(x.StartsWith((RedisValue)Encoding.ASCII.GetBytes("a")), LineNumber()); + Assert.True(x.StartsWith((RedisValue)Encoding.ASCII.GetBytes("ab")), LineNumber()); + Assert.True(x.StartsWith((RedisValue)Encoding.ASCII.GetBytes("abc")), LineNumber()); + Assert.False(x.StartsWith((RedisValue)Encoding.ASCII.GetBytes("abd")), LineNumber()); + Assert.False(x.StartsWith((RedisValue)Encoding.ASCII.GetBytes("abcd")), LineNumber()); + + Assert.True(x.StartsWith("a"u8), LineNumber()); + Assert.True(x.StartsWith("ab"u8), LineNumber()); + Assert.True(x.StartsWith("abc"u8), LineNumber()); + Assert.False(x.StartsWith("abd"u8), LineNumber()); + Assert.False(x.StartsWith("abcd"u8), LineNumber()); + + x = 10; // integers are effectively strings in this context + Assert.True(x.StartsWith(1), LineNumber()); + Assert.True(x.StartsWith(10), LineNumber()); + Assert.False(x.StartsWith(100), LineNumber()); + } + + [Fact] + public void TryParseInt64() + { + Assert.True(((RedisValue)123).TryParse(out long l)); + Assert.Equal(123, l); + + Assert.True(((RedisValue)123.0).TryParse(out l)); + Assert.Equal(123, l); + + Assert.True(((RedisValue)(int.MaxValue + 123L)).TryParse(out l)); + Assert.Equal(int.MaxValue + 123L, l); + + Assert.True(((RedisValue)"123").TryParse(out l)); + Assert.Equal(123, l); + + Assert.True(((RedisValue)(-123)).TryParse(out l)); + Assert.Equal(-123, l); + + Assert.True(default(RedisValue).TryParse(out l)); + Assert.Equal(0, l); + + Assert.True(((RedisValue)123.0).TryParse(out l)); + Assert.Equal(123, l); + + Assert.False(((RedisValue)"abc").TryParse(out long _)); + Assert.False(((RedisValue)"123.1").TryParse(out long _)); + Assert.False(((RedisValue)123.1).TryParse(out long _)); + } + + [Fact] + public void TryParseInt32() + { + Assert.True(((RedisValue)123).TryParse(out int i)); + Assert.Equal(123, i); + + Assert.True(((RedisValue)123.0).TryParse(out i)); + Assert.Equal(123, i); + + Assert.False(((RedisValue)(int.MaxValue + 123L)).TryParse(out int _)); + + Assert.True(((RedisValue)"123").TryParse(out i)); + Assert.Equal(123, i); + + Assert.True(((RedisValue)(-123)).TryParse(out i)); + Assert.Equal(-123, i); + + Assert.True(default(RedisValue).TryParse(out i)); + Assert.Equal(0, i); + + Assert.True(((RedisValue)123.0).TryParse(out i)); + Assert.Equal(123, i); + + Assert.False(((RedisValue)"abc").TryParse(out int _)); + Assert.False(((RedisValue)"123.1").TryParse(out int _)); + Assert.False(((RedisValue)123.1).TryParse(out int _)); + } + + [Fact] + public void TryParseDouble() + { + Assert.True(((RedisValue)123).TryParse(out double d)); + Assert.Equal(123, d); + + Assert.True(((RedisValue)123.0).TryParse(out d)); + Assert.Equal(123.0, d); + + Assert.True(((RedisValue)123.1).TryParse(out d)); + Assert.Equal(123.1, d); + + Assert.True(((RedisValue)(int.MaxValue + 123L)).TryParse(out d)); + Assert.Equal(int.MaxValue + 123L, d); + + Assert.True(((RedisValue)"123").TryParse(out d)); + Assert.Equal(123.0, d); + + Assert.True(((RedisValue)(-123)).TryParse(out d)); + Assert.Equal(-123.0, d); + + Assert.True(default(RedisValue).TryParse(out d)); + Assert.Equal(0.0, d); + + Assert.True(((RedisValue)123.0).TryParse(out d)); + Assert.Equal(123.0, d); + + Assert.True(((RedisValue)"123.1").TryParse(out d)); + Assert.Equal(123.1, d); + + Assert.False(((RedisValue)"abc").TryParse(out double _)); + } + + [Fact] + public void RedisValueLengthString() + { + RedisValue value = "abc"; + Assert.Equal(RedisValue.StorageType.String, value.Type); + Assert.Equal(3, value.Length()); + } + + [Fact] + public void RedisValueLengthDouble() + { + RedisValue value = Math.PI; + Assert.Equal(RedisValue.StorageType.Double, value.Type); + Assert.Equal(18, value.Length()); + } + + [Fact] + public void RedisValueLengthInt64() + { + RedisValue value = 123; + Assert.Equal(RedisValue.StorageType.Int64, value.Type); + Assert.Equal(3, value.Length()); + } + + [Fact] + public void RedisValueLengthUInt64() + { + RedisValue value = ulong.MaxValue - 5; + Assert.Equal(RedisValue.StorageType.UInt64, value.Type); + Assert.Equal(20, value.Length()); + } + + [Fact] + public void RedisValueLengthRaw() + { + RedisValue value = new byte[] { 0, 1, 2 }; + Assert.Equal(RedisValue.StorageType.Raw, value.Type); + Assert.Equal(3, value.Length()); + } + + [Fact] + public void RedisValueLengthNull() + { + RedisValue value = RedisValue.Null; + Assert.Equal(RedisValue.StorageType.Null, value.Type); + Assert.Equal(0, value.Length()); + } +} diff --git a/tests/StackExchange.Redis.Tests/RespProtocolTests.cs b/tests/StackExchange.Redis.Tests/RespProtocolTests.cs new file mode 100644 index 000000000..855ec96d1 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/RespProtocolTests.cs @@ -0,0 +1,435 @@ +using System; +using System.Linq; +using System.Text.RegularExpressions; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public sealed class RespProtocolTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + [Fact] + [RunPerProtocol] + public async Task ConnectWithTiming() + { + await using var conn = Create(shared: false, log: Writer); + await conn.GetDatabase().PingAsync(); + } + + [Theory] + // specify nothing + [InlineData("someserver", false)] + // specify *just* the protocol; sure, we'll believe you + [InlineData("someserver,protocol=resp3", true)] + [InlineData("someserver,protocol=resp3,$HELLO=", false)] + [InlineData("someserver,protocol=resp3,$HELLO=BONJOUR", true)] + [InlineData("someserver,protocol=3", true, "resp3")] + [InlineData("someserver,protocol=3,$HELLO=", false, "resp3")] + [InlineData("someserver,protocol=3,$HELLO=BONJOUR", true, "resp3")] + [InlineData("someserver,protocol=2", false, "resp2")] + [InlineData("someserver,protocol=2,$HELLO=", false, "resp2")] + [InlineData("someserver,protocol=2,$HELLO=BONJOUR", false, "resp2")] + // specify a pre-6 version - only used if protocol specified + [InlineData("someserver,version=5.9", false)] + [InlineData("someserver,version=5.9,$HELLO=", false)] + [InlineData("someserver,version=5.9,$HELLO=BONJOUR", false)] + [InlineData("someserver,version=5.9,protocol=resp3", true)] + [InlineData("someserver,version=5.9,protocol=resp3,$HELLO=", false)] + [InlineData("someserver,version=5.9,protocol=resp3,$HELLO=BONJOUR", true)] + [InlineData("someserver,version=5.9,protocol=3", true, "resp3")] + [InlineData("someserver,version=5.9,protocol=3,$HELLO=", false, "resp3")] + [InlineData("someserver,version=5.9,protocol=3,$HELLO=BONJOUR", true, "resp3")] + [InlineData("someserver,version=5.9,protocol=2", false, "resp2")] + [InlineData("someserver,version=5.9,protocol=2,$HELLO=", false, "resp2")] + [InlineData("someserver,version=5.9,protocol=2,$HELLO=BONJOUR", false, "resp2")] + // specify a post-6 version; attempt by default + [InlineData("someserver,version=6.0", false)] + [InlineData("someserver,version=6.0,$HELLO=", false)] + [InlineData("someserver,version=6.0,$HELLO=BONJOUR", false)] + [InlineData("someserver,version=6.0,protocol=resp3", true)] + [InlineData("someserver,version=6.0,protocol=resp3,$HELLO=", false)] + [InlineData("someserver,version=6.0,protocol=resp3,$HELLO=BONJOUR", true)] + [InlineData("someserver,version=6.0,protocol=3", true, "resp3")] + [InlineData("someserver,version=6.0,protocol=3,$HELLO=", false, "resp3")] + [InlineData("someserver,version=6.0,protocol=3,$HELLO=BONJOUR", true, "resp3")] + [InlineData("someserver,version=6.0,protocol=2", false, "resp2")] + [InlineData("someserver,version=6.0,protocol=2,$HELLO=", false, "resp2")] + [InlineData("someserver,version=6.0,protocol=2,$HELLO=BONJOUR", false, "resp2")] + [InlineData("someserver,version=7.2", false)] + [InlineData("someserver,version=7.2,$HELLO=", false)] + [InlineData("someserver,version=7.2,$HELLO=BONJOUR", false)] + public void ParseFormatConfigOptions(string configurationString, bool tryResp3, string? formatProtocol = null) + { + var config = ConfigurationOptions.Parse(configurationString); + + string expectedConfigurationString = formatProtocol is null ? configurationString : Regex.Replace(configurationString, "(?<=protocol=)[^,]+", formatProtocol); + + Assert.Equal(expectedConfigurationString, config.ToString(true)); // check round-trip + Assert.Equal(expectedConfigurationString, config.Clone().ToString(true)); // check clone + Assert.Equal(tryResp3, config.TryResp3()); + } + + [Fact] + [RunPerProtocol] + public async Task TryConnect() + { + var muxer = Create(shared: false); + await muxer.GetDatabase().PingAsync(); + + var server = muxer.GetServerEndPoint(muxer.GetEndPoints().Single()); + if (TestContext.Current.IsResp3() && !server.GetFeatures().Resp3) + { + Assert.Skip("server does not support RESP3"); + } + if (TestContext.Current.IsResp3()) + { + Assert.Equal(RedisProtocol.Resp3, server.Protocol); + } + else + { + Assert.Equal(RedisProtocol.Resp2, server.Protocol); + } + var cid = server.GetBridge(RedisCommand.GET)?.ConnectionId; + if (server.GetFeatures().ClientId) + { + Assert.NotNull(cid); + } + else + { + Assert.Null(cid); + } + } + + [Theory] + [InlineData("HELLO", true)] + [InlineData("BONJOUR", false)] + public async Task ConnectWithBrokenHello(string command, bool isResp3) + { + var config = ConfigurationOptions.Parse(TestConfig.Current.SecureServerAndPort); + config.Password = TestConfig.Current.SecurePassword; + config.Protocol = RedisProtocol.Resp3; + config.CommandMap = CommandMap.Create(new() { ["hello"] = command }); + + await using var muxer = await ConnectionMultiplexer.ConnectAsync(config, Writer); + await muxer.GetDatabase().PingAsync(); // is connected + var ep = muxer.GetServerEndPoint(muxer.GetEndPoints()[0]); + if (!ep.GetFeatures().Resp3) // this is just a v6 check + { + isResp3 = false; // then, no: it won't be + } + Assert.Equal(isResp3 ? RedisProtocol.Resp3 : RedisProtocol.Resp2, ep.Protocol); + var result = await muxer.GetDatabase().ExecuteAsync("latency", "doctor"); + Assert.Equal(isResp3 ? ResultType.VerbatimString : ResultType.BulkString, result.Resp3Type); + } + + [Theory] + [InlineData("return 42", RedisProtocol.Resp2, ResultType.Integer, ResultType.Integer, 42)] + [InlineData("return 'abc'", RedisProtocol.Resp2, ResultType.BulkString, ResultType.BulkString, "abc")] + [InlineData(@"return {1,2,3}", RedisProtocol.Resp2, ResultType.Array, ResultType.Array, ARR_123)] + [InlineData("return nil", RedisProtocol.Resp2, ResultType.BulkString, ResultType.Null, null)] + [InlineData(@"return redis.pcall('hgetall', '{key}')", RedisProtocol.Resp2, ResultType.Array, ResultType.Array, MAP_ABC)] + [InlineData(@"redis.setresp(3) return redis.pcall('hgetall', '{key}')", RedisProtocol.Resp2, ResultType.Array, ResultType.Array, MAP_ABC)] + [InlineData("return true", RedisProtocol.Resp2, ResultType.Integer, ResultType.Integer, 1)] + [InlineData("return false", RedisProtocol.Resp2, ResultType.BulkString, ResultType.Null, null)] + [InlineData("redis.setresp(3) return true", RedisProtocol.Resp2, ResultType.Integer, ResultType.Integer, 1)] + [InlineData("redis.setresp(3) return false", RedisProtocol.Resp2, ResultType.Integer, ResultType.Integer, 0)] + + [InlineData("return { map = { a = 1, b = 2, c = 3 } }", RedisProtocol.Resp2, ResultType.Array, ResultType.Array, MAP_ABC, 6)] + [InlineData("return { set = { a = 1, b = 2, c = 3 } }", RedisProtocol.Resp2, ResultType.Array, ResultType.Array, SET_ABC, 6)] + [InlineData("return { double = 42 }", RedisProtocol.Resp2, ResultType.BulkString, ResultType.BulkString, 42.0, 6)] + + [InlineData("return 42", RedisProtocol.Resp3, ResultType.Integer, ResultType.Integer, 42)] + [InlineData("return 'abc'", RedisProtocol.Resp3, ResultType.BulkString, ResultType.BulkString, "abc")] + [InlineData("return {1,2,3}", RedisProtocol.Resp3, ResultType.Array, ResultType.Array, ARR_123)] + [InlineData("return nil", RedisProtocol.Resp3, ResultType.BulkString, ResultType.Null, null)] + [InlineData(@"return redis.pcall('hgetall', '{key}')", RedisProtocol.Resp3, ResultType.Array, ResultType.Array, MAP_ABC)] + [InlineData(@"redis.setresp(3) return redis.pcall('hgetall', '{key}')", RedisProtocol.Resp3, ResultType.Array, ResultType.Map, MAP_ABC)] + [InlineData("return true", RedisProtocol.Resp3, ResultType.Integer, ResultType.Integer, 1)] + [InlineData("return false", RedisProtocol.Resp3, ResultType.BulkString, ResultType.Null, null)] + [InlineData("redis.setresp(3) return true", RedisProtocol.Resp3, ResultType.Integer, ResultType.Boolean, true)] + [InlineData("redis.setresp(3) return false", RedisProtocol.Resp3, ResultType.Integer, ResultType.Boolean, false)] + + [InlineData("return { map = { a = 1, b = 2, c = 3 } }", RedisProtocol.Resp3, ResultType.Array, ResultType.Map, MAP_ABC, 6)] + [InlineData("return { set = { a = 1, b = 2, c = 3 } }", RedisProtocol.Resp3, ResultType.Array, ResultType.Set, SET_ABC, 6)] + [InlineData("return { double = 42 }", RedisProtocol.Resp3, ResultType.SimpleString, ResultType.Double, 42.0, 6)] + public async Task CheckLuaResult(string script, RedisProtocol protocol, ResultType resp2, ResultType resp3, object? expected, int? serverMin = 1) + { + // note Lua does not appear to return RESP3 types in any scenarios + var muxer = Create(protocol: protocol); + var ep = muxer.GetServerEndPoint(muxer.GetEndPoints().Single()); + if (serverMin > ep.Version.Major) + { + Assert.Skip($"applies to v{serverMin} onwards - detected v{ep.Version.Major}"); + } + if (script.Contains("redis.setresp(3)") && !ep.GetFeatures().Resp3) /* v6 check */ + { + Assert.Skip("debug protocol not available"); + } + if (ep.Protocol is null) throw new InvalidOperationException($"No protocol! {ep.InteractiveConnectionState}"); + Assert.Equal(protocol, ep.Protocol); + var key = Me(); + script = script.Replace("{key}", key); + + var db = muxer.GetDatabase(); + if (expected is MAP_ABC) + { + db.KeyDelete(key); + db.HashSet(key, "a", 1); + db.HashSet(key, "b", 2); + db.HashSet(key, "c", 3); + } + var result = await db.ScriptEvaluateAsync(script: script, flags: CommandFlags.NoScriptCache); + Assert.Equal(resp2, result.Resp2Type); + Assert.Equal(resp3, result.Resp3Type); + + switch (expected) + { + case null: + Assert.True(result.IsNull); + break; + case ARR_123: + Assert.Equal(3, result.Length); + for (int i = 0; i < result.Length; i++) + { + Assert.Equal(i + 1, result[i].AsInt32()); + } + break; + case MAP_ABC: + var map = result.ToDictionary(); + Assert.Equal(3, map.Count); + Assert.True(map.TryGetValue("a", out var value)); + Assert.Equal(1, value.AsInt32()); + Assert.True(map.TryGetValue("b", out value)); + Assert.Equal(2, value.AsInt32()); + Assert.True(map.TryGetValue("c", out value)); + Assert.Equal(3, value.AsInt32()); + break; + case SET_ABC: + Assert.Equal(3, result.Length); + var arr = result.AsStringArray()!; + Assert.Contains("a", arr); + Assert.Contains("b", arr); + Assert.Contains("c", arr); + break; + case string s: + Assert.Equal(s, result.AsString()); + break; + case double d: + Assert.Equal(d, result.AsDouble()); + break; + case int i: + Assert.Equal(i, result.AsInt32()); + break; + case bool b: + Assert.Equal(b, result.AsBoolean()); + break; + } + } + + [Theory] + // [InlineData("return 42", false, ResultType.Integer, ResultType.Integer, 42)] + // [InlineData("return 'abc'", false, ResultType.BulkString, ResultType.BulkString, "abc")] + // [InlineData(@"return {1,2,3}", false, ResultType.Array, ResultType.Array, ARR_123)] + // [InlineData("return nil", false, ResultType.BulkString, ResultType.Null, null)] + // [InlineData(@"return redis.pcall('hgetall', 'key')", false, ResultType.Array, ResultType.Array, MAP_ABC)] + // [InlineData("return true", false, ResultType.Integer, ResultType.Integer, 1)] + + // [InlineData("return 42", true, ResultType.Integer, ResultType.Integer, 42)] + // [InlineData("return 'abc'", true, ResultType.BulkString, ResultType.BulkString, "abc")] + // [InlineData("return {1,2,3}", true, ResultType.Array, ResultType.Array, ARR_123)] + // [InlineData("return nil", true, ResultType.BulkString, ResultType.Null, null)] + // [InlineData(@"return redis.pcall('hgetall', 'key')", true, ResultType.Array, ResultType.Array, MAP_ABC)] + // [InlineData("return true", true, ResultType.Integer, ResultType.Integer, 1)] + [InlineData("incrby", RedisProtocol.Resp2, ResultType.Integer, ResultType.Integer, 42, "ikey", 2)] + [InlineData("incrby", RedisProtocol.Resp3, ResultType.Integer, ResultType.Integer, 42, "ikey", 2)] + [InlineData("incrby", RedisProtocol.Resp2, ResultType.Integer, ResultType.Integer, 2, "nkey", 2)] + [InlineData("incrby", RedisProtocol.Resp3, ResultType.Integer, ResultType.Integer, 2, "nkey", 2)] + + [InlineData("get", RedisProtocol.Resp2, ResultType.BulkString, ResultType.BulkString, "40", "ikey")] + [InlineData("get", RedisProtocol.Resp3, ResultType.BulkString, ResultType.BulkString, "40", "ikey")] + [InlineData("get", RedisProtocol.Resp2, ResultType.BulkString, ResultType.Null, null, "nkey")] + [InlineData("get", RedisProtocol.Resp3, ResultType.BulkString, ResultType.Null, null, "nkey")] + + [InlineData("smembers", RedisProtocol.Resp2, ResultType.Array, ResultType.Array, SET_ABC, "skey")] + [InlineData("smembers", RedisProtocol.Resp3, ResultType.Array, ResultType.Set, SET_ABC, "skey")] + [InlineData("smembers", RedisProtocol.Resp2, ResultType.Array, ResultType.Array, EMPTY_ARR, "nkey")] + [InlineData("smembers", RedisProtocol.Resp3, ResultType.Array, ResultType.Set, EMPTY_ARR, "nkey")] + + [InlineData("hgetall", RedisProtocol.Resp2, ResultType.Array, ResultType.Array, MAP_ABC, "hkey")] + [InlineData("hgetall", RedisProtocol.Resp3, ResultType.Array, ResultType.Map, MAP_ABC, "hkey")] + [InlineData("hgetall", RedisProtocol.Resp2, ResultType.Array, ResultType.Array, EMPTY_ARR, "nkey")] + [InlineData("hgetall", RedisProtocol.Resp3, ResultType.Array, ResultType.Map, EMPTY_ARR, "nkey")] + + [InlineData("sismember", RedisProtocol.Resp2, ResultType.Integer, ResultType.Integer, true, "skey", "b")] + [InlineData("sismember", RedisProtocol.Resp3, ResultType.Integer, ResultType.Integer, true, "skey", "b")] + [InlineData("sismember", RedisProtocol.Resp2, ResultType.Integer, ResultType.Integer, false, "nkey", "b")] + [InlineData("sismember", RedisProtocol.Resp3, ResultType.Integer, ResultType.Integer, false, "nkey", "b")] + [InlineData("sismember", RedisProtocol.Resp2, ResultType.Integer, ResultType.Integer, false, "skey", "d")] + [InlineData("sismember", RedisProtocol.Resp3, ResultType.Integer, ResultType.Integer, false, "skey", "d")] + + [InlineData("latency", RedisProtocol.Resp2, ResultType.BulkString, ResultType.BulkString, STR_DAVE, "doctor")] + [InlineData("latency", RedisProtocol.Resp3, ResultType.BulkString, ResultType.VerbatimString, STR_DAVE, "doctor")] + + [InlineData("incrbyfloat", RedisProtocol.Resp2, ResultType.BulkString, ResultType.BulkString, 41.5, "ikey", 1.5)] + [InlineData("incrbyfloat", RedisProtocol.Resp3, ResultType.BulkString, ResultType.BulkString, 41.5, "ikey", 1.5)] + + /* DEBUG PROTOCOL + * Reply with a test value of the specified type. can be: string, + * integer, double, bignum, null, array, set, map, attrib, push, verbatim, + * true, false., + * + * NOTE: "debug protocol" may be disabled in later default server configs; if this starts + * failing when we upgrade the test server: update the config to re-enable the command + */ + [InlineData("debug", RedisProtocol.Resp2, ResultType.BulkString, ResultType.BulkString, ANY, "protocol", "string")] + [InlineData("debug", RedisProtocol.Resp3, ResultType.BulkString, ResultType.BulkString, ANY, "protocol", "string")] + + [InlineData("debug", RedisProtocol.Resp2, ResultType.BulkString, ResultType.BulkString, ANY, "protocol", "double")] + [InlineData("debug", RedisProtocol.Resp3, ResultType.SimpleString, ResultType.Double, ANY, "protocol", "double")] + + [InlineData("debug", RedisProtocol.Resp2, ResultType.BulkString, ResultType.BulkString, ANY, "protocol", "bignum")] + [InlineData("debug", RedisProtocol.Resp3, ResultType.SimpleString, ResultType.BigInteger, ANY, "protocol", "bignum")] + + [InlineData("debug", RedisProtocol.Resp2, ResultType.BulkString, ResultType.Null, null, "protocol", "null")] + [InlineData("debug", RedisProtocol.Resp3, ResultType.BulkString, ResultType.Null, null, "protocol", "null")] + + [InlineData("debug", RedisProtocol.Resp2, ResultType.Array, ResultType.Array, ANY, "protocol", "array")] + [InlineData("debug", RedisProtocol.Resp3, ResultType.Array, ResultType.Array, ANY, "protocol", "array")] + + [InlineData("debug", RedisProtocol.Resp2, ResultType.Array, ResultType.Array, ANY, "protocol", "set")] + [InlineData("debug", RedisProtocol.Resp3, ResultType.Array, ResultType.Set, ANY, "protocol", "set")] + + [InlineData("debug", RedisProtocol.Resp2, ResultType.Array, ResultType.Array, ANY, "protocol", "map")] + [InlineData("debug", RedisProtocol.Resp3, ResultType.Array, ResultType.Map, ANY, "protocol", "map")] + + [InlineData("debug", RedisProtocol.Resp2, ResultType.BulkString, ResultType.BulkString, ANY, "protocol", "verbatim")] + [InlineData("debug", RedisProtocol.Resp3, ResultType.BulkString, ResultType.VerbatimString, ANY, "protocol", "verbatim")] + + [InlineData("debug", RedisProtocol.Resp2, ResultType.Integer, ResultType.Integer, true, "protocol", "true")] + [InlineData("debug", RedisProtocol.Resp3, ResultType.Integer, ResultType.Boolean, true, "protocol", "true")] + + [InlineData("debug", RedisProtocol.Resp2, ResultType.Integer, ResultType.Integer, false, "protocol", "false")] + [InlineData("debug", RedisProtocol.Resp3, ResultType.Integer, ResultType.Boolean, false, "protocol", "false")] + + public async Task CheckCommandResult(string command, RedisProtocol protocol, ResultType resp2, ResultType resp3, object? expected, params object[] args) + { + var muxer = Create(protocol: protocol); + var ep = muxer.GetServerEndPoint(muxer.GetEndPoints().Single()); + if (command == "debug" && args.Length > 0 && args[0] is "protocol" && !ep.GetFeatures().Resp3 /* v6 check */) + { + Assert.Skip("debug protocol not available"); + } + Assert.Equal(protocol, ep.Protocol); + + var db = muxer.GetDatabase(); + if (args.Length > 0) + { + var origKey = (string)args[0]; + switch (origKey) + { + case "ikey": + case "skey": + case "hkey": + case "nkey": + var newKey = Me() + "_" + origKey; // disambiguate + args[0] = newKey; + await db.KeyDeleteAsync(newKey); // remove + switch (origKey) // initialize + { + case "ikey": + await db.StringSetAsync(newKey, "40"); + break; + case "skey": + await db.SetAddAsync(newKey, ["a", "b", "c"]); + break; + case "hkey": + await db.HashSetAsync(newKey, [new("a", 1), new("b", 2), new("c", 3)]); + break; + } + break; + } + } + var result = await db.ExecuteAsync(command, args); + Assert.Equal(resp2, result.Resp2Type); + Assert.Equal(resp3, result.Resp3Type); + + switch (expected) + { + case null: + Assert.True(result.IsNull); + break; + case ANY: + // not checked beyond type + break; + case EMPTY_ARR: + Assert.Equal(0, result.Length); + break; + case ARR_123: + Assert.Equal(3, result.Length); + for (int i = 0; i < result.Length; i++) + { + Assert.Equal(i + 1, result[i].AsInt32()); + } + break; + case STR_DAVE: + var scontent = result.ToString(); + Log(scontent); + Assert.NotNull(scontent); + var isExpectedContent = scontent.StartsWith("Dave, ") || scontent.StartsWith("I'm sorry, Dave"); + Assert.True(isExpectedContent); + Log(scontent); + + scontent = result.ToString(out var type); + Assert.NotNull(scontent); + isExpectedContent = scontent.StartsWith("Dave, ") || scontent.StartsWith("I'm sorry, Dave"); + Assert.True(isExpectedContent); + Log(scontent); + if (protocol == RedisProtocol.Resp3) + { + Assert.Equal("txt", type); + } + else + { + Assert.Null(type); + } + break; + case SET_ABC: + Assert.Equal(3, result.Length); + var arr = result.AsStringArray()!; + Assert.Contains("a", arr); + Assert.Contains("b", arr); + Assert.Contains("c", arr); + break; + case MAP_ABC: + var map = result.ToDictionary(); + Assert.Equal(3, map.Count); + Assert.True(map.TryGetValue("a", out var value)); + Assert.Equal(1, value.AsInt32()); + Assert.True(map.TryGetValue("b", out value)); + Assert.Equal(2, value.AsInt32()); + Assert.True(map.TryGetValue("c", out value)); + Assert.Equal(3, value.AsInt32()); + break; + case string s: + Assert.Equal(s, result.AsString()); + break; + case int i: + Assert.Equal(i, result.AsInt32()); + break; + case bool b: + Assert.Equal(b, result.AsBoolean()); + Assert.Equal(b ? 1 : 0, result.AsInt32()); + Assert.Equal(b ? 1 : 0, result.AsInt64()); + break; + } + } + +#pragma warning disable SA1310 // Field names should not contain underscore + private const string SET_ABC = nameof(SET_ABC); + private const string ARR_123 = nameof(ARR_123); + private const string MAP_ABC = nameof(MAP_ABC); + private const string EMPTY_ARR = nameof(EMPTY_ARR); + private const string STR_DAVE = nameof(STR_DAVE); + private const string ANY = nameof(ANY); +#pragma warning restore SA1310 // Field names should not contain underscore +} diff --git a/tests/StackExchange.Redis.Tests/ResultBoxTests.cs b/tests/StackExchange.Redis.Tests/ResultBoxTests.cs new file mode 100644 index 000000000..adb1b309f --- /dev/null +++ b/tests/StackExchange.Redis.Tests/ResultBoxTests.cs @@ -0,0 +1,95 @@ +using System; +using System.Threading; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class ResultBoxTests +{ + [Fact] + public void SyncResultBox() + { + var msg = Message.Create(-1, CommandFlags.None, RedisCommand.PING); + var box = SimpleResultBox.Get(); + Assert.False(box.IsAsync); + + int activated = 0; + lock (box) + { + Task.Run(() => + { + lock (box) + { + // release the worker to start work + Monitor.PulseAll(box); + + // wait for the completion signal + if (Monitor.Wait(box, TimeSpan.FromSeconds(10))) + { + Interlocked.Increment(ref activated); + } + } + }); + Assert.True(Monitor.Wait(box, TimeSpan.FromSeconds(10)), "failed to handover lock to worker"); + } + + // check that continuation was not already signalled + Thread.Sleep(100); + Assert.Equal(0, Volatile.Read(ref activated)); + + msg.SetSource(ResultProcessor.DemandOK, box); + Assert.True(msg.TrySetResult("abc")); + + // check that TrySetResult did not signal continuation + Thread.Sleep(100); + Assert.Equal(0, Volatile.Read(ref activated)); + + // check that complete signals continuation + msg.Complete(); + Thread.Sleep(100); + Assert.Equal(1, Volatile.Read(ref activated)); + + var s = box.GetResult(out var ex); + Assert.Null(ex); + Assert.NotNull(s); + Assert.Equal("abc", s); + } + + [Fact] + public void TaskResultBox() + { + // TaskResultBox currently uses a stating field for values before activations are + // signalled; High Integrity Mode *demands* this behaviour, so: validate that it + // works correctly + var msg = Message.Create(-1, CommandFlags.None, RedisCommand.PING); + var box = TaskResultBox.Create(out var tcs, null); + Assert.True(box.IsAsync); + + msg.SetSource(ResultProcessor.DemandOK, box); + Assert.True(msg.TrySetResult("abc")); + + // check that continuation was not already signalled + Thread.Sleep(100); + Assert.False(tcs.Task.IsCompleted); + + msg.SetSource(ResultProcessor.DemandOK, box); + Assert.True(msg.TrySetResult("abc")); + + // check that TrySetResult did not signal continuation + Thread.Sleep(100); + Assert.False(tcs.Task.IsCompleted); + + // check that complete signals continuation + msg.Complete(); + Thread.Sleep(100); + Assert.True(tcs.Task.IsCompleted); + + var s = box.GetResult(out var ex); + Assert.Null(ex); + Assert.NotNull(s); + Assert.Equal("abc", s); + + Assert.Equal("abc", tcs.Task.Result); // we already checked IsCompleted + } +} diff --git a/tests/StackExchange.Redis.Tests/RoleTests.cs b/tests/StackExchange.Redis.Tests/RoleTests.cs new file mode 100644 index 000000000..198ae6da7 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/RoleTests.cs @@ -0,0 +1,81 @@ +using System.Linq; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class Roles(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + protected override string GetConfiguration() => TestConfig.Current.PrimaryServerAndPort + "," + TestConfig.Current.ReplicaServerAndPort; + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task PrimaryRole(bool allowAdmin) // should work with or without admin now + { + await using var conn = Create(allowAdmin: allowAdmin); + var servers = conn.GetServers(); + Log("Server list:"); + foreach (var s in servers) + { + Log($" Server: {s.EndPoint} (isConnected: {s.IsConnected}, isReplica: {s.IsReplica})"); + } + var server = servers.First(conn => !conn.IsReplica); + var role = server.Role(); + Log($"Chosen primary: {server.EndPoint} (role: {role})"); + if (allowAdmin) + { + Log($"Info (Replication) dump for {server.EndPoint}:"); + Log(server.InfoRaw("Replication")); + Log(""); + + foreach (var s in servers) + { + if (s.IsReplica) + { + Log($"Info (Replication) dump for {s.EndPoint}:"); + Log(s.InfoRaw("Replication")); + Log(""); + } + } + } + Assert.NotNull(role); + Assert.Equal(role.Value, RedisLiterals.master); + var primary = role as Role.Master; + Assert.NotNull(primary); + Assert.NotNull(primary.Replicas); + + // Only do this check for Redis > 4 (to exclude Redis 3.x on Windows). + // Unrelated to this test, the replica isn't connecting and we'll revisit swapping the server out. + // TODO: MemuraiDeveloper check + if (server.Version > RedisFeatures.v4_0_0) + { + Log($"Searching for: {TestConfig.Current.ReplicaServer}:{TestConfig.Current.ReplicaPort}"); + Log($"Replica count: {primary.Replicas.Count}"); + + Assert.NotEmpty(primary.Replicas); + foreach (var replica in primary.Replicas) + { + Log($" Replica: {replica.Ip}:{replica.Port} (offset: {replica.ReplicationOffset})"); + Log(replica.ToString()); + } + Assert.Contains(primary.Replicas, r => + r.Ip == TestConfig.Current.ReplicaServer && + r.Port == TestConfig.Current.ReplicaPort); + } + } + + [Fact] + public async Task ReplicaRole() + { + await using var conn = await ConnectionMultiplexer.ConnectAsync($"{TestConfig.Current.ReplicaServerAndPort},allowAdmin=true"); + var server = conn.GetServers().First(conn => conn.IsReplica); + + var role = server.Role(); + Assert.NotNull(role); + var replica = role as Role.Replica; + Assert.NotNull(replica); + Assert.Equal(replica.MasterIp, TestConfig.Current.PrimaryServer); + Assert.Equal(replica.MasterPort, TestConfig.Current.PrimaryPort); + } +} diff --git a/tests/StackExchange.Redis.Tests/Roles.cs b/tests/StackExchange.Redis.Tests/Roles.cs deleted file mode 100644 index 686960ce9..000000000 --- a/tests/StackExchange.Redis.Tests/Roles.cs +++ /dev/null @@ -1,50 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Linq; -using System.Text; -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - [Collection(SharedConnectionFixture.Key)] - public class Roles : TestBase - { - public Roles(ITestOutputHelper output, SharedConnectionFixture fixture) : base(output, fixture) { } - - [Theory] - [InlineData(true)] - [InlineData(false)] - public void MasterRole(bool allowAdmin) // should work with or without admin now - { - using var muxer = Create(allowAdmin: allowAdmin); - var server = muxer.GetServer(TestConfig.Current.MasterServerAndPort); - - var role = server.Role(); - Assert.NotNull(role); - Assert.Equal(role.Value, RedisLiterals.master); - var master = role as Role.Master; - Assert.NotNull(master); - Assert.NotNull(master.Replicas); - Assert.Contains(master.Replicas, r => - r.Ip == TestConfig.Current.ReplicaServer && - r.Port == TestConfig.Current.ReplicaPort); - } - - [Fact] - public void ReplicaRole() - { - var connString = $"{TestConfig.Current.ReplicaServerAndPort},allowAdmin=true"; - using var muxer = ConnectionMultiplexer.Connect(connString); - var server = muxer.GetServer(TestConfig.Current.ReplicaServerAndPort); - - var role = server.Role(); - Assert.NotNull(role); - var replica = role as Role.Replica; - Assert.NotNull(replica); - Assert.Equal(replica.MasterIp, TestConfig.Current.MasterServer); - Assert.Equal(replica.MasterPort, TestConfig.Current.MasterPort); - } - } -} diff --git a/tests/StackExchange.Redis.Tests/SSDB.cs b/tests/StackExchange.Redis.Tests/SSDB.cs deleted file mode 100644 index 23bdd684d..000000000 --- a/tests/StackExchange.Redis.Tests/SSDB.cs +++ /dev/null @@ -1,31 +0,0 @@ -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - public class SSDB : TestBase - { - public SSDB(ITestOutputHelper output) : base (output) { } - - [Fact] - public void ConnectToSSDB() - { - Skip.IfNoConfig(nameof(TestConfig.Config.SSDBServer), TestConfig.Current.SSDBServer); - - var config = new ConfigurationOptions - { - EndPoints = { { TestConfig.Current.SSDBServer, TestConfig.Current.SSDBPort } }, - CommandMap = CommandMap.SSDB - }; - RedisKey key = Me(); - using (var conn = ConnectionMultiplexer.Connect(config)) - { - var db = conn.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - Assert.True(db.StringGet(key).IsNull); - db.StringSet(key, "abc", flags: CommandFlags.FireAndForget); - Assert.Equal("abc", db.StringGet(key)); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/SSDBTests.cs b/tests/StackExchange.Redis.Tests/SSDBTests.cs new file mode 100644 index 000000000..a1f2f3d5e --- /dev/null +++ b/tests/StackExchange.Redis.Tests/SSDBTests.cs @@ -0,0 +1,26 @@ +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class SSDBTests(ITestOutputHelper output) : TestBase(output) +{ + [Fact] + public async Task ConnectToSSDB() + { + Skip.IfNoConfig(nameof(TestConfig.Config.SSDBServer), TestConfig.Current.SSDBServer); + + await using var conn = await ConnectionMultiplexer.ConnectAsync(new ConfigurationOptions + { + EndPoints = { { TestConfig.Current.SSDBServer, TestConfig.Current.SSDBPort } }, + CommandMap = CommandMap.SSDB, + }); + + RedisKey key = Me(); + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + Assert.True(db.StringGet(key).IsNull); + db.StringSet(key, "abc", flags: CommandFlags.FireAndForget); + Assert.Equal("abc", db.StringGet(key)); + } +} diff --git a/tests/StackExchange.Redis.Tests/SSL.cs b/tests/StackExchange.Redis.Tests/SSL.cs deleted file mode 100644 index 634e9b5cc..000000000 --- a/tests/StackExchange.Redis.Tests/SSL.cs +++ /dev/null @@ -1,485 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Diagnostics; -using System.Globalization; -using System.IO; -using System.Linq; -using System.Net; -using System.Net.Security; -using System.Reflection; -using System.Security.Authentication; -using System.Security.Cryptography.X509Certificates; -using System.Threading.Tasks; -using StackExchange.Redis.Tests.Helpers; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - public class SSL : TestBase - { - public SSL(ITestOutputHelper output) : base (output) { } - - [Theory] - [InlineData(null, true)] // auto-infer port (but specify 6380) - [InlineData(6380, true)] // all explicit - // (note the 6379 port is closed) - public void ConnectToAzure(int? port, bool ssl) - { - Skip.IfNoConfig(nameof(TestConfig.Config.AzureCacheServer), TestConfig.Current.AzureCacheServer); - Skip.IfNoConfig(nameof(TestConfig.Config.AzureCachePassword), TestConfig.Current.AzureCachePassword); - - var options = new ConfigurationOptions(); - options.CertificateValidation += ShowCertFailures(Writer); - if (port == null) - { - options.EndPoints.Add(TestConfig.Current.AzureCacheServer); - } - else - { - options.EndPoints.Add(TestConfig.Current.AzureCacheServer, port.Value); - } - options.Ssl = ssl; - options.Password = TestConfig.Current.AzureCachePassword; - Log(options.ToString()); - using (var connection = ConnectionMultiplexer.Connect(options)) - { - var ttl = connection.GetDatabase().Ping(); - Log(ttl.ToString()); - } - } - - [Theory] - [InlineData(false, false)] - [InlineData(true, false)] - [InlineData(true, true)] - public async Task ConnectToSSLServer(bool useSsl, bool specifyHost) - { - var server = TestConfig.Current.SslServer; - int? port = TestConfig.Current.SslPort; - string password = ""; - bool isAzure = false; - if (string.IsNullOrWhiteSpace(server) && useSsl) - { - // we can bounce it past azure instead? - server = TestConfig.Current.AzureCacheServer; - password = TestConfig.Current.AzureCachePassword; - port = null; - isAzure = true; - } - Skip.IfNoConfig(nameof(TestConfig.Config.SslServer), server); - - var config = new ConfigurationOptions - { - AllowAdmin = true, - SyncTimeout = Debugger.IsAttached ? int.MaxValue : 5000, - Password = password, - }; - var map = new Dictionary - { - ["config"] = null // don't rely on config working - }; - if (!isAzure) map["cluster"] = null; - config.CommandMap = CommandMap.Create(map); - if (port != null) config.EndPoints.Add(server, port.Value); - else config.EndPoints.Add(server); - - if (useSsl) - { - config.Ssl = useSsl; - if (specifyHost) - { - config.SslHost = server; - } - config.CertificateValidation += (sender, cert, chain, errors) => - { - Log("errors: " + errors); - Log("cert issued to: " + cert.Subject); - return true; // fingers in ears, pretend we don't know this is wrong - }; - } - - var configString = config.ToString(); - Log("config: " + configString); - var clone = ConfigurationOptions.Parse(configString); - Assert.Equal(configString, clone.ToString()); - - using (var log = new StringWriter()) - using (var muxer = ConnectionMultiplexer.Connect(config, log)) - { - Log("Connect log:"); - lock (log) - { - Log(log.ToString()); - } - Log("===="); - muxer.ConnectionFailed += OnConnectionFailed; - muxer.InternalError += OnInternalError; - var db = muxer.GetDatabase(); - await db.PingAsync().ForAwait(); - using (var file = File.Create("ssl-" + useSsl + "-" + specifyHost + ".zip")) - { - muxer.ExportConfiguration(file); - } - RedisKey key = "SE.Redis"; - - const int AsyncLoop = 2000; - // perf; async - await db.KeyDeleteAsync(key).ForAwait(); - var watch = Stopwatch.StartNew(); - for (int i = 0; i < AsyncLoop; i++) - { - try - { - await db.StringIncrementAsync(key, flags: CommandFlags.FireAndForget).ForAwait(); - } - catch (Exception ex) - { - Log($"Failure on i={i}: {ex.Message}"); - throw; - } - } - // need to do this inside the timer to measure the TTLB - long value = (long)await db.StringGetAsync(key).ForAwait(); - watch.Stop(); - Assert.Equal(AsyncLoop, value); - Log("F&F: {0} INCR, {1:###,##0}ms, {2} ops/s; final value: {3}", - AsyncLoop, - watch.ElapsedMilliseconds, - (long)(AsyncLoop / watch.Elapsed.TotalSeconds), - value); - - // perf: sync/multi-threaded - // TestConcurrent(db, key, 30, 10); - //TestConcurrent(db, key, 30, 20); - //TestConcurrent(db, key, 30, 30); - //TestConcurrent(db, key, 30, 40); - //TestConcurrent(db, key, 30, 50); - } - } - - //private void TestConcurrent(IDatabase db, RedisKey key, int SyncLoop, int Threads) - //{ - // long value; - // db.KeyDelete(key, CommandFlags.FireAndForget); - // var time = RunConcurrent(delegate - // { - // for (int i = 0; i < SyncLoop; i++) - // { - // db.StringIncrement(key); - // } - // }, Threads, timeout: 45000); - // value = (long)db.StringGet(key); - // Assert.Equal(SyncLoop * Threads, value); - // Log("Sync: {0} INCR using {1} threads, {2:###,##0}ms, {3} ops/s; final value: {4}", - // SyncLoop * Threads, Threads, - // (long)time.TotalMilliseconds, - // (long)((SyncLoop * Threads) / time.TotalSeconds), - // value); - //} - - [Fact] - public void RedisLabsSSL() - { - Skip.IfNoConfig(nameof(TestConfig.Config.RedisLabsSslServer), TestConfig.Current.RedisLabsSslServer); - Skip.IfNoConfig(nameof(TestConfig.Config.RedisLabsPfxPath), TestConfig.Current.RedisLabsPfxPath); - - var cert = new X509Certificate2(TestConfig.Current.RedisLabsPfxPath, ""); - Assert.NotNull(cert); - Writer.WriteLine("Thumbprint: " + cert.Thumbprint); - - int timeout = 5000; - if (Debugger.IsAttached) timeout *= 100; - var options = new ConfigurationOptions - { - EndPoints = { { TestConfig.Current.RedisLabsSslServer, TestConfig.Current.RedisLabsSslPort } }, - ConnectTimeout = timeout, - AllowAdmin = true, - CommandMap = CommandMap.Create(new HashSet { - "subscribe", "unsubscribe", "cluster" - }, false) - }; - - options.TrustIssuer("redislabs_ca.pem"); - - if (!Directory.Exists(Me())) Directory.CreateDirectory(Me()); -#if LOGOUTPUT - ConnectionMultiplexer.EchoPath = Me(); -#endif - options.Ssl = true; - options.CertificateSelection += delegate - { - return cert; - }; - RedisKey key = Me(); - using (var conn = ConnectionMultiplexer.Connect(options)) - { - var db = conn.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - string s = db.StringGet(key); - Assert.Null(s); - db.StringSet(key, "abc", flags: CommandFlags.FireAndForget); - s = db.StringGet(key); - Assert.Equal("abc", s); - - var latency = db.Ping(); - Log("RedisLabs latency: {0:###,##0.##}ms", latency.TotalMilliseconds); - - using (var file = File.Create("RedisLabs.zip")) - { - conn.ExportConfiguration(file); - } - } - } - - [Theory] - [InlineData(false)] - [InlineData(true)] - public void RedisLabsEnvironmentVariableClientCertificate(bool setEnv) - { - try - { - Skip.IfNoConfig(nameof(TestConfig.Config.RedisLabsSslServer), TestConfig.Current.RedisLabsSslServer); - Skip.IfNoConfig(nameof(TestConfig.Config.RedisLabsPfxPath), TestConfig.Current.RedisLabsPfxPath); - - if (setEnv) - { - Environment.SetEnvironmentVariable("SERedis_ClientCertPfxPath", TestConfig.Current.RedisLabsPfxPath); - Environment.SetEnvironmentVariable("SERedis_IssuerCertPath", "redislabs_ca.pem"); - // check env worked - Assert.Equal(TestConfig.Current.RedisLabsPfxPath, Environment.GetEnvironmentVariable("SERedis_ClientCertPfxPath")); - Assert.Equal("redislabs_ca.pem", Environment.GetEnvironmentVariable("SERedis_IssuerCertPath")); - } - int timeout = 5000; - if (Debugger.IsAttached) timeout *= 100; - var options = new ConfigurationOptions - { - EndPoints = { { TestConfig.Current.RedisLabsSslServer, TestConfig.Current.RedisLabsSslPort } }, - ConnectTimeout = timeout, - AllowAdmin = true, - CommandMap = CommandMap.Create(new HashSet { - "subscribe", "unsubscribe", "cluster" - }, false) - }; - - if (!Directory.Exists(Me())) Directory.CreateDirectory(Me()); -#if LOGOUTPUT - ConnectionMultiplexer.EchoPath = Me(); -#endif - options.Ssl = true; - RedisKey key = Me(); - using (var conn = ConnectionMultiplexer.Connect(options)) - { - if (!setEnv) Assert.True(false, "Could not set environment"); - - var db = conn.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - string s = db.StringGet(key); - Assert.Null(s); - db.StringSet(key, "abc"); - s = db.StringGet(key); - Assert.Equal("abc", s); - - var latency = db.Ping(); - Log("RedisLabs latency: {0:###,##0.##}ms", latency.TotalMilliseconds); - - using (var file = File.Create("RedisLabs.zip")) - { - conn.ExportConfiguration(file); - } - } - } - catch (RedisConnectionException ex) - { - if (setEnv || ex.FailureType != ConnectionFailureType.UnableToConnect) - { - throw; - } - } - finally - { - Environment.SetEnvironmentVariable("SERedis_ClientCertPfxPath", null); - } - } - - [Fact] - public void SSLHostInferredFromEndpoints() - { - var options = new ConfigurationOptions() - { - EndPoints = { - { "mycache.rediscache.windows.net", 15000}, - { "mycache.rediscache.windows.net", 15001 }, - { "mycache.rediscache.windows.net", 15002 }, - } - }; - options.Ssl = true; - Assert.True(options.SslHost == "mycache.rediscache.windows.net"); - options = new ConfigurationOptions() - { - EndPoints = { - { "121.23.23.45", 15000}, - } - }; - Assert.True(options.SslHost == null); - } - - private void Check(string name, object x, object y) - { - Writer.WriteLine($"{name}: {(x == null ? "(null)" : x.ToString())} vs {(y == null ? "(null)" : y.ToString())}"); - Assert.Equal(x, y); - } - - [Fact] - public void Issue883_Exhaustive() - { - var old = CultureInfo.CurrentCulture; - try - { - var all = CultureInfo.GetCultures(CultureTypes.AllCultures); - Writer.WriteLine($"Checking {all.Length} cultures..."); - foreach (var ci in all) - { - Writer.WriteLine("Tessting: " + ci.Name); - CultureInfo.CurrentCulture = ci; - - var a = ConnectionMultiplexer.PrepareConfig("myDNS:883,password=mypassword,connectRetry=3,connectTimeout=5000,syncTimeout=5000,defaultDatabase=0,ssl=true,abortConnect=false"); - var b = ConnectionMultiplexer.PrepareConfig(new ConfigurationOptions - { - EndPoints = { { "myDNS", 883 } }, - Password = "mypassword", - ConnectRetry = 3, - ConnectTimeout = 5000, - SyncTimeout = 5000, - DefaultDatabase = 0, - Ssl = true, - AbortOnConnectFail = false, - }); - Writer.WriteLine($"computed: {b.ToString(true)}"); - - Writer.WriteLine("Checking endpoints..."); - var c = a.EndPoints.Cast().Single(); - var d = b.EndPoints.Cast().Single(); - Check(nameof(c.Host), c.Host, d.Host); - Check(nameof(c.Port), c.Port, d.Port); - Check(nameof(c.AddressFamily), c.AddressFamily, d.AddressFamily); - - var fields = typeof(ConfigurationOptions).GetFields(BindingFlags.Instance | BindingFlags.Public | BindingFlags.NonPublic); - Writer.WriteLine($"Comparing {fields.Length} fields..."); - Array.Sort(fields, (x, y) => string.CompareOrdinal(x.Name, y.Name)); - foreach (var field in fields) - { - Check(field.Name, field.GetValue(a), field.GetValue(b)); - } - } - } - finally - { - CultureInfo.CurrentCulture = old; - } - } - - [Fact] - public void SSLParseViaConfig_Issue883_ConfigObject() - { - Skip.IfNoConfig(nameof(TestConfig.Config.AzureCacheServer), TestConfig.Current.AzureCacheServer); - Skip.IfNoConfig(nameof(TestConfig.Config.AzureCachePassword), TestConfig.Current.AzureCachePassword); - - var options = new ConfigurationOptions - { - AbortOnConnectFail = false, - Ssl = true, - ConnectRetry = 3, - ConnectTimeout = 5000, - SyncTimeout = 5000, - DefaultDatabase = 0, - EndPoints = { { TestConfig.Current.AzureCacheServer, 6380 } }, - Password = TestConfig.Current.AzureCachePassword - }; - options.CertificateValidation += ShowCertFailures(Writer); - using (var conn = ConnectionMultiplexer.Connect(options)) - { - conn.GetDatabase().Ping(); - } - } - - public static RemoteCertificateValidationCallback ShowCertFailures(TextWriterOutputHelper output) { - if (output == null) return null; - - return (sender, certificate, chain, sslPolicyErrors) => - { - void WriteStatus(X509ChainStatus[] status) - { - if (status != null) - { - for (int i = 0; i < status.Length; i++) - { - var item = status[i]; - output.WriteLine($"\tstatus {i}: {item.Status}, {item.StatusInformation}"); - } - } - } - lock (output) - { - if (certificate != null) - { - output.WriteLine($"Subject: {certificate.Subject}"); - } - output.WriteLine($"Policy errors: {sslPolicyErrors}"); - if (chain != null) - { - WriteStatus(chain.ChainStatus); - - var elements = chain.ChainElements; - if (elements != null) - { - int index = 0; - foreach (var item in elements) - { - output.WriteLine($"{index++}: {item.Certificate.Subject}; {item.Information}"); - WriteStatus(item.ChainElementStatus); - } - } - } - } - return sslPolicyErrors == SslPolicyErrors.None; - }; - } - - [Fact] - public void SSLParseViaConfig_Issue883_ConfigString() - { - Skip.IfNoConfig(nameof(TestConfig.Config.AzureCacheServer), TestConfig.Current.AzureCacheServer); - Skip.IfNoConfig(nameof(TestConfig.Config.AzureCachePassword), TestConfig.Current.AzureCachePassword); - - var configString = $"{TestConfig.Current.AzureCacheServer}:6380,password={TestConfig.Current.AzureCachePassword},connectRetry=3,connectTimeout=5000,syncTimeout=5000,defaultDatabase=0,ssl=true,abortConnect=false"; - var options = ConfigurationOptions.Parse(configString); - options.CertificateValidation += ShowCertFailures(Writer); - using (var conn = ConnectionMultiplexer.Connect(options)) - { - conn.GetDatabase().Ping(); - } - } - - [Fact] - public void ConfigObject_Issue1407_ToStringIncludesSslProtocols() - { - var sslProtocols = SslProtocols.Tls12 | SslProtocols.Tls; - var sourceOptions = new ConfigurationOptions - { - AbortOnConnectFail = false, - Ssl = true, - SslProtocols = sslProtocols, - ConnectRetry = 3, - ConnectTimeout = 5000, - SyncTimeout = 5000, - DefaultDatabase = 0, - EndPoints = { { "endpoint.test", 6380 } }, - Password = "123456" - }; - - var targetOptions = ConfigurationOptions.Parse(sourceOptions.ToString()); - Assert.Equal(sourceOptions.SslProtocols, targetOptions.SslProtocols); - } - } -} diff --git a/tests/StackExchange.Redis.Tests/SSLTests.cs b/tests/StackExchange.Redis.Tests/SSLTests.cs new file mode 100644 index 000000000..96d964b23 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/SSLTests.cs @@ -0,0 +1,569 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Globalization; +using System.IO; +using System.Linq; +using System.Net; +using System.Net.Security; +using System.Reflection; +using System.Security.Authentication; +using System.Security.Cryptography.X509Certificates; +using System.Text; +using System.Threading.Tasks; +using StackExchange.Redis.Tests.Helpers; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class SSLTests(ITestOutputHelper output, SSLTests.SSLServerFixture fixture) : TestBase(output), IClassFixture +{ + private SSLServerFixture Fixture { get; } = fixture; + + [Theory] // (note the 6379 port is closed) + [InlineData(null, true)] // auto-infer port (but specify 6380) + [InlineData(6380, true)] // all explicit + public async Task ConnectToAzure(int? port, bool ssl) + { + Skip.IfNoConfig(nameof(TestConfig.Config.AzureCacheServer), TestConfig.Current.AzureCacheServer); + Skip.IfNoConfig(nameof(TestConfig.Config.AzureCachePassword), TestConfig.Current.AzureCachePassword); + + var options = new ConfigurationOptions(); + options.CertificateValidation += ShowCertFailures(Writer); + if (port == null) + { + options.EndPoints.Add(TestConfig.Current.AzureCacheServer); + } + else + { + options.EndPoints.Add(TestConfig.Current.AzureCacheServer, port.Value); + } + options.Ssl = ssl; + options.Password = TestConfig.Current.AzureCachePassword; + Log(options.ToString()); + using (var connection = ConnectionMultiplexer.Connect(options)) + { + var ttl = await connection.GetDatabase().PingAsync(); + Log(ttl.ToString()); + } + } + + [Theory] + [InlineData(false, false)] + [InlineData(true, false)] + [InlineData(true, true)] + public async Task ConnectToSSLServer(bool useSsl, bool specifyHost) + { + Fixture.SkipIfNoServer(); + + var server = TestConfig.Current.SslServer; + int? port = TestConfig.Current.SslPort; + string? password = ""; + bool isAzure = false; + if (string.IsNullOrWhiteSpace(server) && useSsl) + { + // we can bounce it past azure instead? + server = TestConfig.Current.AzureCacheServer; + password = TestConfig.Current.AzureCachePassword; + port = null; + isAzure = true; + } + Skip.IfNoConfig(nameof(TestConfig.Config.SslServer), server); + + var config = new ConfigurationOptions + { + AllowAdmin = true, + SyncTimeout = Debugger.IsAttached ? int.MaxValue : 2000, + Password = password, + }; + var map = new Dictionary + { + ["config"] = null, // don't rely on config working + }; + if (!isAzure) map["cluster"] = null; + config.CommandMap = CommandMap.Create(map); + if (port != null) config.EndPoints.Add(server, port.Value); + else config.EndPoints.Add(server); + + if (useSsl) + { + config.Ssl = useSsl; + if (specifyHost) + { + config.SslHost = server; + } + config.CertificateValidation += (sender, cert, chain, errors) => + { + Log("errors: " + errors); + Log("cert issued to: " + cert?.Subject); + return true; // fingers in ears, pretend we don't know this is wrong + }; + } + + var configString = config.ToString(); + Log("config: " + configString); + var clone = ConfigurationOptions.Parse(configString); + Assert.Equal(configString, clone.ToString()); + + var log = new StringBuilder(); + Writer.EchoTo(log); + + if (useSsl) + { + await using var conn = await ConnectionMultiplexer.ConnectAsync(config, Writer); + + Log("Connect log:"); + lock (log) + { + Log(log.ToString()); + } + Log("===="); + conn.ConnectionFailed += OnConnectionFailed; + conn.InternalError += OnInternalError; + var db = conn.GetDatabase(); + await db.PingAsync().ForAwait(); + using (var file = File.Create("ssl-" + useSsl + "-" + specifyHost + ".zip")) + { + conn.ExportConfiguration(file); + } + RedisKey key = "SE.Redis"; + + const int AsyncLoop = 2000; + // perf; async + await db.KeyDeleteAsync(key).ForAwait(); + var watch = Stopwatch.StartNew(); + for (int i = 0; i < AsyncLoop; i++) + { + try + { + await db.StringIncrementAsync(key, flags: CommandFlags.FireAndForget).ForAwait(); + } + catch (Exception ex) + { + Log($"Failure on i={i}: {ex.Message}"); + throw; + } + } + // need to do this inside the timer to measure the TTLB + long value = (long)await db.StringGetAsync(key).ForAwait(); + watch.Stop(); + Assert.Equal(AsyncLoop, value); + Log($"F&F: {AsyncLoop} INCR, {watch.ElapsedMilliseconds:###,##0}ms, {(long)(AsyncLoop / watch.Elapsed.TotalSeconds)} ops/s; final value: {value}"); + + // perf: sync/multi-threaded + // TestConcurrent(db, key, 30, 10); + // TestConcurrent(db, key, 30, 20); + // TestConcurrent(db, key, 30, 30); + // TestConcurrent(db, key, 30, 40); + // TestConcurrent(db, key, 30, 50); + } + else + { + Assert.Throws(() => ConnectionMultiplexer.Connect(config, Writer)); + } + } + +#if NET +#pragma warning disable CS0618 // Type or member is obsolete + // Docker configured with only TLS_AES_256_GCM_SHA384 for testing + [Theory] + [InlineData(SslProtocols.None, true, TlsCipherSuite.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TlsCipherSuite.TLS_AES_256_GCM_SHA384)] + [InlineData(SslProtocols.Tls12, true, TlsCipherSuite.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TlsCipherSuite.TLS_AES_256_GCM_SHA384)] + [InlineData(SslProtocols.Tls13, true, TlsCipherSuite.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TlsCipherSuite.TLS_AES_256_GCM_SHA384)] + [InlineData(SslProtocols.Tls12, false, TlsCipherSuite.TLS_AES_128_CCM_8_SHA256)] + [InlineData(SslProtocols.Tls12, true)] + [InlineData(SslProtocols.Tls13, true)] + [InlineData(SslProtocols.Ssl2, false)] + [InlineData(SslProtocols.Ssl3, false)] + [InlineData(SslProtocols.Tls12 | SslProtocols.Tls13, true)] + [InlineData(SslProtocols.Ssl3 | SslProtocols.Tls12 | SslProtocols.Tls13, true)] + [InlineData(SslProtocols.Ssl2, false, TlsCipherSuite.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TlsCipherSuite.TLS_AES_256_GCM_SHA384)] +#pragma warning restore CS0618 // Type or member is obsolete + [System.Diagnostics.CodeAnalysis.SuppressMessage("Interoperability", "CA1416:Validate platform compatibility", Justification = "Yes, we know.")] + public async Task ConnectSslClientAuthenticationOptions(SslProtocols protocols, bool expectSuccess, params TlsCipherSuite[] tlsCipherSuites) + { + Fixture.SkipIfNoServer(); + + try + { + var config = new ConfigurationOptions() + { + EndPoints = { TestConfig.Current.SslServerAndPort }, + AllowAdmin = true, + ConnectRetry = 1, + SyncTimeout = Debugger.IsAttached ? int.MaxValue : 5000, + Ssl = true, + SslClientAuthenticationOptions = host => new SslClientAuthenticationOptions() + { + TargetHost = host, + CertificateRevocationCheckMode = X509RevocationMode.NoCheck, + EnabledSslProtocols = protocols, + CipherSuitesPolicy = tlsCipherSuites?.Length > 0 ? new CipherSuitesPolicy(tlsCipherSuites) : null, + RemoteCertificateValidationCallback = (sender, cert, chain, errors) => + { + Log(" Errors: " + errors); + Log(" Cert issued to: " + cert?.Subject); + return true; + }, + }, + }; + + if (expectSuccess) + { + await using var conn = await ConnectionMultiplexer.ConnectAsync(config, Writer); + + var db = conn.GetDatabase(); + Log("Pinging..."); + var time = await db.PingAsync().ForAwait(); + Log($"Ping time: {time}"); + } + else + { + var ex = await Assert.ThrowsAsync(() => ConnectionMultiplexer.ConnectAsync(config, Writer)); + Log("(Expected) Failure connecting: " + ex.Message); + if (ex.InnerException is PlatformNotSupportedException pnse) + { + Assert.Skip("Expected failure, but also test not supported on this platform: " + pnse.Message); + } + } + } + catch (RedisException ex) when (ex.InnerException is PlatformNotSupportedException pnse) + { + Assert.Skip("Test not supported on this platform: " + pnse.Message); + } + } +#endif + + [Fact] + public async Task RedisLabsSSL() + { + Skip.IfNoConfig(nameof(TestConfig.Config.RedisLabsSslServer), TestConfig.Current.RedisLabsSslServer); + Skip.IfNoConfig(nameof(TestConfig.Config.RedisLabsPfxPath), TestConfig.Current.RedisLabsPfxPath); + +#pragma warning disable SYSLIB0057 + var cert = new X509Certificate2(TestConfig.Current.RedisLabsPfxPath, ""); +#pragma warning restore SYSLIB0057 + Assert.NotNull(cert); + Log("Thumbprint: " + cert.Thumbprint); + + int timeout = 5000; + if (Debugger.IsAttached) timeout *= 100; + var options = new ConfigurationOptions + { + EndPoints = { { TestConfig.Current.RedisLabsSslServer, TestConfig.Current.RedisLabsSslPort } }, + ConnectTimeout = timeout, + AllowAdmin = true, + CommandMap = CommandMap.Create( + new HashSet + { + "subscribe", + "unsubscribe", + "cluster", + }, + false), + }; + + options.TrustIssuer("redislabs_ca.pem"); + + if (!Directory.Exists(Me())) Directory.CreateDirectory(Me()); +#if LOGOUTPUT + ConnectionMultiplexer.EchoPath = Me(); +#endif + options.Ssl = true; + options.CertificateSelection += (sender, targetHost, localCertificates, remoteCertificate, acceptableIssuers) => cert; + + await using var conn = ConnectionMultiplexer.Connect(options); + + RedisKey key = Me(); + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + string? s = db.StringGet(key); + Assert.Null(s); + db.StringSet(key, "abc", flags: CommandFlags.FireAndForget); + s = db.StringGet(key); + Assert.Equal("abc", s); + + var latency = await db.PingAsync(); + Log("RedisLabs latency: {0:###,##0.##}ms", latency.TotalMilliseconds); + + using (var file = File.Create("RedisLabs.zip")) + { + conn.ExportConfiguration(file); + } + } + + [Theory] + [InlineData(false)] + [InlineData(true)] + public async Task RedisLabsEnvironmentVariableClientCertificate(bool setEnv) + { + try + { + Skip.IfNoConfig(nameof(TestConfig.Config.RedisLabsSslServer), TestConfig.Current.RedisLabsSslServer); + Skip.IfNoConfig(nameof(TestConfig.Config.RedisLabsPfxPath), TestConfig.Current.RedisLabsPfxPath); + + if (setEnv) + { + Environment.SetEnvironmentVariable("SERedis_ClientCertPfxPath", TestConfig.Current.RedisLabsPfxPath); + Environment.SetEnvironmentVariable("SERedis_IssuerCertPath", "redislabs_ca.pem"); + // check env worked + Assert.Equal(TestConfig.Current.RedisLabsPfxPath, Environment.GetEnvironmentVariable("SERedis_ClientCertPfxPath")); + Assert.Equal("redislabs_ca.pem", Environment.GetEnvironmentVariable("SERedis_IssuerCertPath")); + } + int timeout = 5000; + if (Debugger.IsAttached) timeout *= 100; + var options = new ConfigurationOptions + { + EndPoints = { { TestConfig.Current.RedisLabsSslServer, TestConfig.Current.RedisLabsSslPort } }, + ConnectTimeout = timeout, + AllowAdmin = true, + CommandMap = CommandMap.Create( + new HashSet + { + "subscribe", + "unsubscribe", + "cluster", + }, + false), + }; + + if (!Directory.Exists(Me())) Directory.CreateDirectory(Me()); +#if LOGOUTPUT + ConnectionMultiplexer.EchoPath = Me(); +#endif + options.Ssl = true; + + await using var conn = ConnectionMultiplexer.Connect(options); + + RedisKey key = Me(); + if (!setEnv) Assert.Fail("Could not set environment"); + + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + string? s = db.StringGet(key); + Assert.Null(s); + db.StringSet(key, "abc"); + s = db.StringGet(key); + Assert.Equal("abc", s); + + var latency = await db.PingAsync(); + Log("RedisLabs latency: {0:###,##0.##}ms", latency.TotalMilliseconds); + + using (var file = File.Create("RedisLabs.zip")) + { + conn.ExportConfiguration(file); + } + } + catch (RedisConnectionException ex) when (!setEnv && ex.FailureType == ConnectionFailureType.UnableToConnect) + { + } + finally + { + Environment.SetEnvironmentVariable("SERedis_ClientCertPfxPath", null); + } + } + + [Fact] + public void SSLHostInferredFromEndpoints() + { + var options = new ConfigurationOptions + { + EndPoints = + { + { "mycache.rediscache.windows.net", 15000 }, + { "mycache.rediscache.windows.net", 15001 }, + { "mycache.rediscache.windows.net", 15002 }, + }, + Ssl = true, + }; + Assert.Equal("mycache.rediscache.windows.net", options.SslHost); + options = new ConfigurationOptions() + { + EndPoints = { { "121.23.23.45", 15000 } }, + }; + Assert.Null(options.SslHost); + } + + private void Check(string name, object? x, object? y) + { + Log($"{name}: {(x == null ? "(null)" : x.ToString())} vs {(y == null ? "(null)" : y.ToString())}"); + Assert.Equal(x, y); + } + + [Fact] + public void Issue883_Exhaustive() + { + var old = CultureInfo.CurrentCulture; + try + { + var fields = typeof(ConfigurationOptions).GetFields(BindingFlags.Instance | BindingFlags.Public | BindingFlags.NonPublic); + var all = CultureInfo.GetCultures(CultureTypes.AllCultures); + Log($"Checking {all.Length} cultures..."); + foreach (var ci in all) + { + Log("Testing: " + ci.Name); + CultureInfo.CurrentCulture = ci; + + var a = ConfigurationOptions.Parse("myDNS:883,password=mypassword,connectRetry=3,connectTimeout=5000,syncTimeout=5000,defaultDatabase=0,ssl=true,abortConnect=false"); + var b = new ConfigurationOptions + { + EndPoints = { { "myDNS", 883 } }, + Password = "mypassword", + ConnectRetry = 3, + ConnectTimeout = 5000, + SyncTimeout = 5000, + DefaultDatabase = 0, + Ssl = true, + AbortOnConnectFail = false, + }; + Log($"computed: {b.ToString(true)}"); + + Log("Checking endpoints..."); + var c = a.EndPoints.Cast().Single(); + var d = b.EndPoints.Cast().Single(); + Check(nameof(c.Host), c.Host, d.Host); + Check(nameof(c.Port), c.Port, d.Port); + Check(nameof(c.AddressFamily), c.AddressFamily, d.AddressFamily); + + Log($"Comparing {fields.Length} fields..."); + Array.Sort(fields, (x, y) => string.CompareOrdinal(x.Name, y.Name)); + foreach (var field in fields) + { + Check(field.Name, field.GetValue(a), field.GetValue(b)); + } + } + } + finally + { + CultureInfo.CurrentCulture = old; + } + } + + [Fact] + public async Task SSLParseViaConfig_Issue883_ConfigObject() + { + Skip.IfNoConfig(nameof(TestConfig.Config.AzureCacheServer), TestConfig.Current.AzureCacheServer); + Skip.IfNoConfig(nameof(TestConfig.Config.AzureCachePassword), TestConfig.Current.AzureCachePassword); + + var options = new ConfigurationOptions + { + AbortOnConnectFail = false, + Ssl = true, + ConnectRetry = 3, + ConnectTimeout = 5000, + SyncTimeout = 5000, + DefaultDatabase = 0, + EndPoints = { { TestConfig.Current.AzureCacheServer, 6380 } }, + Password = TestConfig.Current.AzureCachePassword, + }; + options.CertificateValidation += ShowCertFailures(Writer); + + await using var conn = ConnectionMultiplexer.Connect(options); + + await conn.GetDatabase().PingAsync(); + } + + public static RemoteCertificateValidationCallback? ShowCertFailures(TextWriterOutputHelper output) + { + if (output == null) + { + return null; + } + + return (sender, certificate, chain, sslPolicyErrors) => + { + void WriteStatus(X509ChainStatus[] status) + { + if (status != null) + { + for (int i = 0; i < status.Length; i++) + { + var item = status[i]; + Log(output, $"\tstatus {i}: {item.Status}, {item.StatusInformation}"); + } + } + } + lock (output) + { + if (certificate != null) + { + Log(output, $"Subject: {certificate.Subject}"); + } + Log(output, $"Policy errors: {sslPolicyErrors}"); + if (chain != null) + { + WriteStatus(chain.ChainStatus); + + var elements = chain.ChainElements; + if (elements != null) + { + int index = 0; + foreach (var item in elements) + { + Log(output, $"{index++}: {item.Certificate.Subject}; {item.Information}"); + WriteStatus(item.ChainElementStatus); + } + } + } + } + return sslPolicyErrors == SslPolicyErrors.None; + }; + } + + [Fact] + public async Task SSLParseViaConfig_Issue883_ConfigString() + { + Skip.IfNoConfig(nameof(TestConfig.Config.AzureCacheServer), TestConfig.Current.AzureCacheServer); + Skip.IfNoConfig(nameof(TestConfig.Config.AzureCachePassword), TestConfig.Current.AzureCachePassword); + + var configString = $"{TestConfig.Current.AzureCacheServer}:6380,password={TestConfig.Current.AzureCachePassword},connectRetry=3,connectTimeout=5000,syncTimeout=5000,defaultDatabase=0,ssl=true,abortConnect=false"; + var options = ConfigurationOptions.Parse(configString); + options.CertificateValidation += ShowCertFailures(Writer); + + await using var conn = ConnectionMultiplexer.Connect(options); + + await conn.GetDatabase().PingAsync(); + } + + [Fact] + public void ConfigObject_Issue1407_ToStringIncludesSslProtocols() + { + const SslProtocols sslProtocols = SslProtocols.Tls12 | SslProtocols.Tls13; + var sourceOptions = new ConfigurationOptions + { + AbortOnConnectFail = false, + Ssl = true, + SslProtocols = sslProtocols, + ConnectRetry = 3, + ConnectTimeout = 5000, + SyncTimeout = 5000, + DefaultDatabase = 0, + EndPoints = { { "endpoint.test", 6380 } }, + Password = "123456", + }; + + var targetOptions = ConfigurationOptions.Parse(sourceOptions.ToString()); + Assert.Equal(sourceOptions.SslProtocols, targetOptions.SslProtocols); + } + + public class SSLServerFixture : IDisposable + { + public bool ServerRunning { get; } + + public SSLServerFixture() + { + ServerRunning = TestConfig.IsServerRunning(TestConfig.Current.SslServer, TestConfig.Current.SslPort); + } + + public void SkipIfNoServer() + { + Skip.IfNoConfig(nameof(TestConfig.Config.SslServer), TestConfig.Current.SslServer); + if (!ServerRunning) + { + Assert.Skip($"SSL/TLS Server was not running at {TestConfig.Current.SslServer}:{TestConfig.Current.SslPort}"); + } + } + + public void Dispose() { } + } +} diff --git a/tests/StackExchange.Redis.Tests/SanityCheckTests.cs b/tests/StackExchange.Redis.Tests/SanityCheckTests.cs new file mode 100644 index 000000000..353098fd5 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/SanityCheckTests.cs @@ -0,0 +1,35 @@ +using System; +using System.IO; +using System.Reflection.Metadata; +using System.Reflection.PortableExecutable; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public sealed class SanityChecks +{ + /// + /// Ensure we don't reference System.ValueTuple as it causes issues with .NET Full Framework. + /// + /// + /// Modified from . + /// Thanks Lucas Trzesniewski!. + /// + [Fact] + public void ValueTupleNotReferenced() + { + using var fileStream = File.OpenRead(typeof(RedisValue).Assembly.Location); + using var peReader = new PEReader(fileStream); + var metadataReader = peReader.GetMetadataReader(); + + foreach (var typeRefHandle in metadataReader.TypeReferences) + { + var typeRef = metadataReader.GetTypeReference(typeRefHandle); + if (metadataReader.GetString(typeRef.Namespace) == typeof(ValueTuple).Namespace) + { + var typeName = metadataReader.GetString(typeRef.Name); + Assert.DoesNotContain(nameof(ValueTuple), typeName); + } + } + } +} diff --git a/tests/StackExchange.Redis.Tests/ScanTests.cs b/tests/StackExchange.Redis.Tests/ScanTests.cs new file mode 100644 index 000000000..fe03cbf86 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/ScanTests.cs @@ -0,0 +1,464 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +[RunPerProtocol] +public class ScanTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task KeysScan(bool supported) + { + string[]? disabledCommands = supported ? null : ["scan"]; + await using var conn = Create(disabledCommands: disabledCommands, allowAdmin: true); + + var dbId = TestConfig.GetDedicatedDB(conn); + var db = conn.GetDatabase(dbId); + var prefix = Me() + ":"; + var server = GetServer(conn); + Assert.Equal(TestContext.Current.GetProtocol(), server.Protocol); + server.FlushDatabase(dbId); + for (int i = 0; i < 100; i++) + { + db.StringSet(prefix + i, Guid.NewGuid().ToString(), flags: CommandFlags.FireAndForget); + } + var seq = server.Keys(dbId, pageSize: 50); + var cur = seq as IScanningCursor; + Assert.NotNull(cur); + Log($"Cursor: {cur.Cursor}, PageOffset: {cur.PageOffset}, PageSize: {cur.PageSize}"); + Assert.Equal(0, cur.PageOffset); + Assert.Equal(0, cur.Cursor); + if (supported) + { + Assert.Equal(50, cur.PageSize); + } + else + { + Assert.Equal(int.MaxValue, cur.PageSize); + } + Assert.Equal(100, seq.Distinct().Count()); + Assert.Equal(100, seq.Distinct().Count()); + Assert.Equal(100, server.Keys(dbId, prefix + "*").Distinct().Count()); + // 7, 70, 71, ..., 79 + Assert.Equal(11, server.Keys(dbId, prefix + "7*").Distinct().Count()); + } + + [Fact] + public async Task ScansIScanning() + { + await using var conn = Create(allowAdmin: true); + + var prefix = Me() + Guid.NewGuid(); + var dbId = TestConfig.GetDedicatedDB(conn); + var db = conn.GetDatabase(dbId); + var server = GetServer(conn); + server.FlushDatabase(dbId); + for (int i = 0; i < 100; i++) + { + db.StringSet(prefix + i, Guid.NewGuid().ToString(), flags: CommandFlags.FireAndForget); + } + var seq = server.Keys(dbId, prefix + "*", pageSize: 15); + using (var iter = seq.GetEnumerator()) + { + IScanningCursor s0 = (IScanningCursor)seq, s1 = (IScanningCursor)iter; + + Assert.Equal(15, s0.PageSize); + Assert.Equal(15, s1.PageSize); + + // start at zero + Assert.Equal(0, s0.Cursor); + Assert.Equal(s0.Cursor, s1.Cursor); + + for (int i = 0; i < 47; i++) + { + Assert.True(iter.MoveNext()); + } + + // non-zero in the middle + Assert.NotEqual(0, s0.Cursor); + Assert.Equal(s0.Cursor, s1.Cursor); + + for (int i = 0; i < 53; i++) + { + Assert.True(iter.MoveNext()); + } + + // zero "next" at the end + Assert.False(iter.MoveNext()); + Assert.NotEqual(0, s0.Cursor); + Assert.NotEqual(0, s1.Cursor); + } + } + + [Fact] + public async Task ScanResume() + { + await using var conn = Create(allowAdmin: true, require: RedisFeatures.v2_8_0); + + var dbId = TestConfig.GetDedicatedDB(conn); + var db = conn.GetDatabase(dbId); + var prefix = Me(); + var server = GetServer(conn); + server.FlushDatabase(dbId); + int i; + for (i = 0; i < 100; i++) + { + db.StringSet(prefix + ":" + i, Guid.NewGuid().ToString()); + } + + var expected = new HashSet(); + long snapCursor = 0; + int snapOffset = 0, snapPageSize = 0; + + i = 0; + var seq = server.Keys(dbId, prefix + ":*", pageSize: 15); + foreach (var key in seq) + { + if (i == 57) + { + snapCursor = ((IScanningCursor)seq).Cursor; + snapOffset = ((IScanningCursor)seq).PageOffset; + snapPageSize = ((IScanningCursor)seq).PageSize; + Log($"i: {i}, Cursor: {snapCursor}, Offset: {snapOffset}, PageSize: {snapPageSize}"); + } + if (i >= 57) + { + expected.Add(key); + } + i++; + } + Log($"Expected: 43, Actual: {expected.Count}, Cursor: {snapCursor}, Offset: {snapOffset}, PageSize: {snapPageSize}"); + Assert.Equal(43, expected.Count); + Assert.NotEqual(0, snapCursor); + Assert.Equal(15, snapPageSize); + + // note: you might think that we can say "hmmm, 57 when using page-size 15 on an empty (flushed) db (so: no skipped keys); that'll be + // offset 12 in the 4th page; you'd be wrong, though; page size doesn't *actually* mean page size; it is a rough analogue for + // page size, with zero guarantees; in this particular test, the first page actually has 19 elements, for example. So: we cannot + // make the following assertion: + // Assert.Equal(12, snapOffset); + seq = server.Keys(dbId, prefix + ":*", pageSize: 15, cursor: snapCursor, pageOffset: snapOffset); + var seqCur = (IScanningCursor)seq; + Assert.Equal(snapCursor, seqCur.Cursor); + Assert.Equal(snapPageSize, seqCur.PageSize); + Assert.Equal(snapOffset, seqCur.PageOffset); + using (var iter = seq.GetEnumerator()) + { + var iterCur = (IScanningCursor)iter; + Assert.Equal(snapCursor, iterCur.Cursor); + Assert.Equal(snapOffset, iterCur.PageOffset); + Assert.Equal(snapCursor, seqCur.Cursor); + Assert.Equal(snapOffset, seqCur.PageOffset); + + Assert.True(iter.MoveNext()); + Assert.Equal(snapCursor, iterCur.Cursor); + Assert.Equal(snapOffset, iterCur.PageOffset); + Assert.Equal(snapCursor, seqCur.Cursor); + Assert.Equal(snapOffset, seqCur.PageOffset); + + Assert.True(iter.MoveNext()); + Assert.Equal(snapCursor, iterCur.Cursor); + Assert.Equal(snapOffset + 1, iterCur.PageOffset); + Assert.Equal(snapCursor, seqCur.Cursor); + Assert.Equal(snapOffset + 1, seqCur.PageOffset); + } + + int count = 0; + foreach (var key in seq) + { + expected.Remove(key); + count++; + } + Assert.Empty(expected); + Assert.Equal(43, count); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task SetScan(bool supported) + { + string[]? disabledCommands = supported ? null : ["sscan"]; + + await using var conn = Create(disabledCommands: disabledCommands); + + RedisKey key = Me(); + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + db.SetAdd(key, "a", CommandFlags.FireAndForget); + db.SetAdd(key, "b", CommandFlags.FireAndForget); + db.SetAdd(key, "c", CommandFlags.FireAndForget); + var arr = db.SetScan(key).ToArray(); + Assert.Equal(3, arr.Length); + Assert.Contains((RedisValue)"a", arr); + Assert.Contains((RedisValue)"b", arr); + Assert.Contains((RedisValue)"c", arr); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task SortedSetScan(bool supported) + { + string[]? disabledCommands = supported ? null : ["zscan"]; + + await using var conn = Create(disabledCommands: disabledCommands); + + RedisKey key = Me() + supported; + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + db.SortedSetAdd(key, "a", 1, CommandFlags.FireAndForget); + db.SortedSetAdd(key, "b", 2, CommandFlags.FireAndForget); + db.SortedSetAdd(key, "c", 3, CommandFlags.FireAndForget); + + var arr = db.SortedSetScan(key).ToArray(); + Assert.Equal(3, arr.Length); + Assert.True(arr.Any(x => x.Element == "a" && x.Score == 1), "a"); + Assert.True(arr.Any(x => x.Element == "b" && x.Score == 2), "b"); + Assert.True(arr.Any(x => x.Element == "c" && x.Score == 3), "c"); + + var dictionary = arr.ToDictionary(); + Assert.Equal(1, dictionary["a"]); + Assert.Equal(2, dictionary["b"]); + Assert.Equal(3, dictionary["c"]); + + var sDictionary = arr.ToStringDictionary(); + Assert.Equal(1, sDictionary["a"]); + Assert.Equal(2, sDictionary["b"]); + Assert.Equal(3, sDictionary["c"]); + + var basic = db.SortedSetRangeByRankWithScores(key, order: Order.Ascending).ToDictionary(); + Assert.Equal(3, basic.Count); + Assert.Equal(1, basic["a"]); + Assert.Equal(2, basic["b"]); + Assert.Equal(3, basic["c"]); + + basic = db.SortedSetRangeByRankWithScores(key, order: Order.Descending).ToDictionary(); + Assert.Equal(3, basic.Count); + Assert.Equal(1, basic["a"]); + Assert.Equal(2, basic["b"]); + Assert.Equal(3, basic["c"]); + + var basicArr = db.SortedSetRangeByScoreWithScores(key, order: Order.Ascending); + Assert.Equal(3, basicArr.Length); + Assert.Equal(1, basicArr[0].Score); + Assert.Equal(2, basicArr[1].Score); + Assert.Equal(3, basicArr[2].Score); + basic = basicArr.ToDictionary(); + Assert.Equal(3, basic.Count); // asc + Assert.Equal(1, basic["a"]); + Assert.Equal(2, basic["b"]); + Assert.Equal(3, basic["c"]); + + basicArr = db.SortedSetRangeByScoreWithScores(key, order: Order.Descending); + Assert.Equal(3, basicArr.Length); + Assert.Equal(3, basicArr[0].Score); + Assert.Equal(2, basicArr[1].Score); + Assert.Equal(1, basicArr[2].Score); + basic = basicArr.ToDictionary(); + Assert.Equal(3, basic.Count); // desc + Assert.Equal(1, basic["a"]); + Assert.Equal(2, basic["b"]); + Assert.Equal(3, basic["c"]); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task HashScan(bool supported) + { + string[]? disabledCommands = supported ? null : ["hscan"]; + + await using var conn = Create(disabledCommands: disabledCommands); + + RedisKey key = Me(); + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + db.HashSet(key, "a", "1", flags: CommandFlags.FireAndForget); + db.HashSet(key, "b", "2", flags: CommandFlags.FireAndForget); + db.HashSet(key, "c", "3", flags: CommandFlags.FireAndForget); + + var arr = db.HashScan(key).ToArray(); + Assert.Equal(3, arr.Length); + Assert.True(arr.Any(x => x.Name == "a" && x.Value == "1"), "a"); + Assert.True(arr.Any(x => x.Name == "b" && x.Value == "2"), "b"); + Assert.True(arr.Any(x => x.Name == "c" && x.Value == "3"), "c"); + + var dictionary = arr.ToDictionary(); + Assert.Equal(1, (long)dictionary["a"]); + Assert.Equal(2, (long)dictionary["b"]); + Assert.Equal(3, (long)dictionary["c"]); + + var sDictionary = arr.ToStringDictionary(); + Assert.Equal("1", sDictionary["a"]); + Assert.Equal("2", sDictionary["b"]); + Assert.Equal("3", sDictionary["c"]); + + var basic = db.HashGetAll(key).ToDictionary(); + Assert.Equal(3, basic.Count); + Assert.Equal(1, (long)basic["a"]); + Assert.Equal(2, (long)basic["b"]); + Assert.Equal(3, (long)basic["c"]); + } + + [Theory] + [InlineData(10)] + [InlineData(100)] + [InlineData(1000)] + [InlineData(10000)] + public async Task HashScanLarge(int pageSize) + { + await using var conn = Create(); + + RedisKey key = Me() + pageSize; + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + for (int i = 0; i < 2000; i++) + db.HashSet(key, "k" + i, "v" + i, flags: CommandFlags.FireAndForget); + + int count = db.HashScan(key, pageSize: pageSize).Count(); + Assert.Equal(2000, count); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task HashScanNoValues(bool supported) + { + string[]? disabledCommands = supported ? null : ["hscan"]; + + await using var conn = Create(require: RedisFeatures.v7_4_0_rc1, disabledCommands: disabledCommands); + + RedisKey key = Me(); + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + db.HashSet(key, "a", "1", flags: CommandFlags.FireAndForget); + db.HashSet(key, "b", "2", flags: CommandFlags.FireAndForget); + db.HashSet(key, "c", "3", flags: CommandFlags.FireAndForget); + + var arr = db.HashScanNoValues(key).ToArray(); + Assert.Equal(3, arr.Length); + Assert.True(arr.Any(x => x == "a"), "a"); + Assert.True(arr.Any(x => x == "b"), "b"); + Assert.True(arr.Any(x => x == "c"), "c"); + + var basic = db.HashGetAll(key).ToDictionary(); + Assert.Equal(3, basic.Count); + Assert.Equal(1, (long)basic["a"]); + Assert.Equal(2, (long)basic["b"]); + Assert.Equal(3, (long)basic["c"]); + } + + [Theory] + [InlineData(10)] + [InlineData(100)] + [InlineData(1000)] + [InlineData(10000)] + public async Task HashScanNoValuesLarge(int pageSize) + { + await using var conn = Create(require: RedisFeatures.v7_4_0_rc1); + + RedisKey key = Me() + pageSize; + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + for (int i = 0; i < 2000; i++) + { + db.HashSet(key, "k" + i, "v" + i, flags: CommandFlags.FireAndForget); + } + + int count = db.HashScanNoValues(key, pageSize: pageSize).Count(); + Assert.Equal(2000, count); + } + + /// + /// See . + /// + [Fact] + public async Task HashScanThresholds() + { + await using var conn = Create(allowAdmin: true); + + var config = conn.GetServer(conn.GetEndPoints(true)[0]).ConfigGet("hash-max-ziplist-entries").First(); + var threshold = int.Parse(config.Value); + + RedisKey key = Me(); + Assert.False(GotCursors(conn, key, threshold - 1)); + Assert.True(GotCursors(conn, key, threshold + 1)); + } + + private static bool GotCursors(IConnectionMultiplexer conn, RedisKey key, int count) + { + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + var entries = new HashEntry[count]; + for (var i = 0; i < count; i++) + { + entries[i] = new HashEntry("Item:" + i, i); + } + db.HashSet(key, entries, CommandFlags.FireAndForget); + + var found = false; + var response = db.HashScan(key); + var cursor = (IScanningCursor)response; + foreach (var _ in response) + { + if (cursor.Cursor > 0) + { + found = true; + } + } + return found; + } + + [Theory] + [InlineData(10)] + [InlineData(100)] + [InlineData(1000)] + [InlineData(10000)] + public async Task SetScanLarge(int pageSize) + { + await using var conn = Create(); + + RedisKey key = Me() + pageSize; + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + for (int i = 0; i < 2000; i++) + db.SetAdd(key, "s" + i, flags: CommandFlags.FireAndForget); + + int count = db.SetScan(key, pageSize: pageSize).Count(); + Assert.Equal(2000, count); + } + + [Theory] + [InlineData(10)] + [InlineData(100)] + [InlineData(1000)] + [InlineData(10000)] + public async Task SortedSetScanLarge(int pageSize) + { + await using var conn = Create(); + + RedisKey key = Me() + pageSize; + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + for (int i = 0; i < 2000; i++) + db.SortedSetAdd(key, "z" + i, i, flags: CommandFlags.FireAndForget); + + int count = db.SortedSetScan(key, pageSize: pageSize).Count(); + Assert.Equal(2000, count); + } +} diff --git a/tests/StackExchange.Redis.Tests/Scans.cs b/tests/StackExchange.Redis.Tests/Scans.cs deleted file mode 100644 index 6b6b1401f..000000000 --- a/tests/StackExchange.Redis.Tests/Scans.cs +++ /dev/null @@ -1,422 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Linq; -using Xunit; -using Xunit.Abstractions; -// ReSharper disable PossibleMultipleEnumeration - -namespace StackExchange.Redis.Tests -{ - [Collection(SharedConnectionFixture.Key)] - public class Scans : TestBase - { - public Scans(ITestOutputHelper output, SharedConnectionFixture fixture) : base (output, fixture) { } - - [Theory] - [InlineData(true)] - [InlineData(false)] - public void KeysScan(bool supported) - { - string[] disabledCommands = supported ? null : new[] { "scan" }; - using (var conn = Create(disabledCommands: disabledCommands, allowAdmin: true)) - { - var dbId = TestConfig.GetDedicatedDB(conn); - var db = conn.GetDatabase(dbId); - var prefix = Me() + ":"; - var server = GetServer(conn); - server.FlushDatabase(dbId); - for (int i = 0; i < 100; i++) - { - db.StringSet(prefix + i, Guid.NewGuid().ToString(), flags: CommandFlags.FireAndForget); - } - var seq = server.Keys(dbId, pageSize: 50); - var cur = seq as IScanningCursor; - Assert.NotNull(cur); - Log($"Cursor: {cur.Cursor}, PageOffset: {cur.PageOffset}, PageSize: {cur.PageSize}"); - Assert.Equal(0, cur.PageOffset); - Assert.Equal(0, cur.Cursor); - if (supported) - { - Assert.Equal(50, cur.PageSize); - } - else - { - Assert.Equal(int.MaxValue, cur.PageSize); - } - Assert.Equal(100, seq.Distinct().Count()); - Assert.Equal(100, seq.Distinct().Count()); - Assert.Equal(100, server.Keys(dbId, prefix + "*").Distinct().Count()); - // 7, 70, 71, ..., 79 - Assert.Equal(11, server.Keys(dbId, prefix + "7*").Distinct().Count()); - } - } - - [Fact] - public void ScansIScanning() - { - using (var conn = Create(allowAdmin: true)) - { - var prefix = Me() + Guid.NewGuid(); - var dbId = TestConfig.GetDedicatedDB(conn); - var db = conn.GetDatabase(dbId); - var server = GetServer(conn); - server.FlushDatabase(dbId); - for (int i = 0; i < 100; i++) - { - db.StringSet(prefix + i, Guid.NewGuid().ToString(), flags: CommandFlags.FireAndForget); - } - var seq = server.Keys(dbId, prefix + "*", pageSize: 15); - using (var iter = seq.GetEnumerator()) - { - IScanningCursor s0 = (IScanningCursor)seq, s1 = (IScanningCursor)iter; - - Assert.Equal(15, s0.PageSize); - Assert.Equal(15, s1.PageSize); - - // start at zero - Assert.Equal(0, s0.Cursor); - Assert.Equal(s0.Cursor, s1.Cursor); - - for (int i = 0; i < 47; i++) - { - Assert.True(iter.MoveNext()); - } - - // non-zero in the middle - Assert.NotEqual(0, s0.Cursor); - Assert.Equal(s0.Cursor, s1.Cursor); - - for (int i = 0; i < 53; i++) - { - Assert.True(iter.MoveNext()); - } - - // zero "next" at the end - Assert.False(iter.MoveNext()); - Assert.NotEqual(0, s0.Cursor); - Assert.NotEqual(0, s1.Cursor); - } - } - } - - [Fact] - public void ScanResume() - { - using (var conn = Create(allowAdmin: true)) - { - // only goes up to 3.*, so... - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Scan), x => x.Scan); - var dbId = TestConfig.GetDedicatedDB(conn); - var db = conn.GetDatabase(dbId); - var prefix = Me(); - var server = GetServer(conn); - server.FlushDatabase(dbId); - int i; - for (i = 0; i < 100; i++) - { - db.StringSet(prefix + ":" + i, Guid.NewGuid().ToString()); - } - - var expected = new HashSet(); - long snapCursor = 0; - int snapOffset = 0, snapPageSize = 0; - - i = 0; - var seq = server.Keys(dbId, prefix + ":*", pageSize: 15); - foreach (var key in seq) - { - if (i == 57) - { - snapCursor = ((IScanningCursor)seq).Cursor; - snapOffset = ((IScanningCursor)seq).PageOffset; - snapPageSize = ((IScanningCursor)seq).PageSize; - Log($"i: {i}, Cursor: {snapCursor}, Offset: {snapOffset}, PageSize: {snapPageSize}"); - } - if (i >= 57) - { - expected.Add(key); - } - i++; - } - Log($"Expected: 43, Actual: {expected.Count}, Cursor: {snapCursor}, Offset: {snapOffset}, PageSize: {snapPageSize}"); - Assert.Equal(43, expected.Count); - Assert.NotEqual(0, snapCursor); - Assert.Equal(15, snapPageSize); - - // note: you might think that we can say "hmmm, 57 when using page-size 15 on an empty (flushed) db (so: no skipped keys); that'll be - // offset 12 in the 4th page; you'd be wrong, though; page size doesn't *actually* mean page size; it is a rough analogue for - // page size, with zero guarantees; in this particular test, the first page actually has 19 elements, for example. So: we cannot - // make the following assertion: - // Assert.Equal(12, snapOffset); - - seq = server.Keys(dbId, prefix + ":*", pageSize: 15, cursor: snapCursor, pageOffset: snapOffset); - var seqCur = (IScanningCursor)seq; - Assert.Equal(snapCursor, seqCur.Cursor); - Assert.Equal(snapPageSize, seqCur.PageSize); - Assert.Equal(snapOffset, seqCur.PageOffset); - using (var iter = seq.GetEnumerator()) - { - var iterCur = (IScanningCursor)iter; - Assert.Equal(snapCursor, iterCur.Cursor); - Assert.Equal(snapOffset, iterCur.PageOffset); - Assert.Equal(snapCursor, seqCur.Cursor); - Assert.Equal(snapOffset, seqCur.PageOffset); - - Assert.True(iter.MoveNext()); - Assert.Equal(snapCursor, iterCur.Cursor); - Assert.Equal(snapOffset, iterCur.PageOffset); - Assert.Equal(snapCursor, seqCur.Cursor); - Assert.Equal(snapOffset, seqCur.PageOffset); - - Assert.True(iter.MoveNext()); - Assert.Equal(snapCursor, iterCur.Cursor); - Assert.Equal(snapOffset + 1, iterCur.PageOffset); - Assert.Equal(snapCursor, seqCur.Cursor); - Assert.Equal(snapOffset + 1, seqCur.PageOffset); - } - - int count = 0; - foreach (var key in seq) - { - expected.Remove(key); - count++; - } - Assert.Empty(expected); - Assert.Equal(43, count); - } - } - - [Theory] - [InlineData(true)] - [InlineData(false)] - public void SetScan(bool supported) - { - string[] disabledCommands = supported ? null : new[] { "sscan" }; - using (var conn = Create(disabledCommands: disabledCommands)) - { - RedisKey key = Me(); - var db = conn.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - - db.SetAdd(key, "a", CommandFlags.FireAndForget); - db.SetAdd(key, "b", CommandFlags.FireAndForget); - db.SetAdd(key, "c", CommandFlags.FireAndForget); - var arr = db.SetScan(key).ToArray(); - Assert.Equal(3, arr.Length); - Assert.Contains((RedisValue)"a", arr); - Assert.Contains((RedisValue)"b", arr); - Assert.Contains((RedisValue)"c", arr); - } - } - - [Theory] - [InlineData(true)] - [InlineData(false)] - public void SortedSetScan(bool supported) - { - string[] disabledCommands = supported ? null : new[] { "zscan" }; - using (var conn = Create(disabledCommands: disabledCommands)) - { - RedisKey key = Me() + supported; - var db = conn.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - - db.SortedSetAdd(key, "a", 1, CommandFlags.FireAndForget); - db.SortedSetAdd(key, "b", 2, CommandFlags.FireAndForget); - db.SortedSetAdd(key, "c", 3, CommandFlags.FireAndForget); - - var arr = db.SortedSetScan(key).ToArray(); - Assert.Equal(3, arr.Length); - Assert.True(arr.Any(x => x.Element == "a" && x.Score == 1), "a"); - Assert.True(arr.Any(x => x.Element == "b" && x.Score == 2), "b"); - Assert.True(arr.Any(x => x.Element == "c" && x.Score == 3), "c"); - - var dictionary = arr.ToDictionary(); - Assert.Equal(1, dictionary["a"]); - Assert.Equal(2, dictionary["b"]); - Assert.Equal(3, dictionary["c"]); - - var sDictionary = arr.ToStringDictionary(); - Assert.Equal(1, sDictionary["a"]); - Assert.Equal(2, sDictionary["b"]); - Assert.Equal(3, sDictionary["c"]); - - var basic = db.SortedSetRangeByRankWithScores(key, order: Order.Ascending).ToDictionary(); - Assert.Equal(3, basic.Count); - Assert.Equal(1, basic["a"]); - Assert.Equal(2, basic["b"]); - Assert.Equal(3, basic["c"]); - - basic = db.SortedSetRangeByRankWithScores(key, order: Order.Descending).ToDictionary(); - Assert.Equal(3, basic.Count); - Assert.Equal(1, basic["a"]); - Assert.Equal(2, basic["b"]); - Assert.Equal(3, basic["c"]); - - var basicArr = db.SortedSetRangeByScoreWithScores(key, order: Order.Ascending); - Assert.Equal(3, basicArr.Length); - Assert.Equal(1, basicArr[0].Score); - Assert.Equal(2, basicArr[1].Score); - Assert.Equal(3, basicArr[2].Score); - basic = basicArr.ToDictionary(); - Assert.Equal(3, basic.Count); //asc - Assert.Equal(1, basic["a"]); - Assert.Equal(2, basic["b"]); - Assert.Equal(3, basic["c"]); - - basicArr = db.SortedSetRangeByScoreWithScores(key, order: Order.Descending); - Assert.Equal(3, basicArr.Length); - Assert.Equal(3, basicArr[0].Score); - Assert.Equal(2, basicArr[1].Score); - Assert.Equal(1, basicArr[2].Score); - basic = basicArr.ToDictionary(); - Assert.Equal(3, basic.Count); // desc - Assert.Equal(1, basic["a"]); - Assert.Equal(2, basic["b"]); - Assert.Equal(3, basic["c"]); - } - } - - [Theory] - [InlineData(true)] - [InlineData(false)] - public void HashScan(bool supported) - { - string[] disabledCommands = supported ? null : new[] { "hscan" }; - using (var conn = Create(disabledCommands: disabledCommands)) - { - RedisKey key = Me(); - var db = conn.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - - db.HashSet(key, "a", "1", flags: CommandFlags.FireAndForget); - db.HashSet(key, "b", "2", flags: CommandFlags.FireAndForget); - db.HashSet(key, "c", "3", flags: CommandFlags.FireAndForget); - - var arr = db.HashScan(key).ToArray(); - Assert.Equal(3, arr.Length); - Assert.True(arr.Any(x => x.Name == "a" && x.Value == "1"), "a"); - Assert.True(arr.Any(x => x.Name == "b" && x.Value == "2"), "b"); - Assert.True(arr.Any(x => x.Name == "c" && x.Value == "3"), "c"); - - var dictionary = arr.ToDictionary(); - Assert.Equal(1, (long)dictionary["a"]); - Assert.Equal(2, (long)dictionary["b"]); - Assert.Equal(3, (long)dictionary["c"]); - - var sDictionary = arr.ToStringDictionary(); - Assert.Equal("1", sDictionary["a"]); - Assert.Equal("2", sDictionary["b"]); - Assert.Equal("3", sDictionary["c"]); - - var basic = db.HashGetAll(key).ToDictionary(); - Assert.Equal(3, basic.Count); - Assert.Equal(1, (long)basic["a"]); - Assert.Equal(2, (long)basic["b"]); - Assert.Equal(3, (long)basic["c"]); - } - } - - [Theory] - [InlineData(10)] - [InlineData(100)] - [InlineData(1000)] - [InlineData(10000)] - public void HashScanLarge(int pageSize) - { - using (var conn = Create()) - { - RedisKey key = Me() + pageSize; - var db = conn.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - - for (int i = 0; i < 2000; i++) - db.HashSet(key, "k" + i, "v" + i, flags: CommandFlags.FireAndForget); - - int count = db.HashScan(key, pageSize: pageSize).Count(); - Assert.Equal(2000, count); - } - } - - [Fact] // See https://github.com/StackExchange/StackExchange.Redis/issues/729 - public void HashScanThresholds() - { - using (var conn = Create(allowAdmin: true)) - { - var config = conn.GetServer(conn.GetEndPoints(true)[0]).ConfigGet("hash-max-ziplist-entries").First(); - var threshold = int.Parse(config.Value); - - RedisKey key = Me(); - Assert.False(GotCursors(conn, key, threshold - 1)); - Assert.True(GotCursors(conn, key, threshold + 1)); - } - } - - private bool GotCursors(IConnectionMultiplexer conn, RedisKey key, int count) - { - var db = conn.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - - var entries = new HashEntry[count]; - for (var i = 0; i < count; i++) - { - entries[i] = new HashEntry("Item:" + i, i); - } - db.HashSet(key, entries, CommandFlags.FireAndForget); - - var found = false; - var response = db.HashScan(key); - var cursor = ((IScanningCursor)response); - foreach (var _ in response) - { - if (cursor.Cursor > 0) - { - found = true; - } - } - return found; - } - - [Theory] - [InlineData(10)] - [InlineData(100)] - [InlineData(1000)] - [InlineData(10000)] - public void SetScanLarge(int pageSize) - { - using (var conn = Create()) - { - RedisKey key = Me() + pageSize; - var db = conn.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - - for (int i = 0; i < 2000; i++) - db.SetAdd(key, "s" + i, flags: CommandFlags.FireAndForget); - - int count = db.SetScan(key, pageSize: pageSize).Count(); - Assert.Equal(2000, count); - } - } - - [Theory] - [InlineData(10)] - [InlineData(100)] - [InlineData(1000)] - [InlineData(10000)] - public void SortedSetScanLarge(int pageSize) - { - using (var conn = Create()) - { - RedisKey key = Me() + pageSize; - var db = conn.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - - for (int i = 0; i < 2000; i++) - db.SortedSetAdd(key, "z" + i, i, flags: CommandFlags.FireAndForget); - - int count = db.SortedSetScan(key, pageSize: pageSize).Count(); - Assert.Equal(2000, count); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/Scripting.cs b/tests/StackExchange.Redis.Tests/Scripting.cs deleted file mode 100644 index 84b911659..000000000 --- a/tests/StackExchange.Redis.Tests/Scripting.cs +++ /dev/null @@ -1,1098 +0,0 @@ -using System; -using System.Diagnostics; -using System.Linq; -using System.Text; -using System.Threading.Tasks; -using StackExchange.Redis.KeyspaceIsolation; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - [Collection(SharedConnectionFixture.Key)] - public class Scripting : TestBase - { - public Scripting(ITestOutputHelper output, SharedConnectionFixture fixture) : base (output, fixture) { } - - private IConnectionMultiplexer GetScriptConn(bool allowAdmin = false) - { - int syncTimeout = 5000; - if (Debugger.IsAttached) syncTimeout = 500000; - var muxer = Create(allowAdmin: allowAdmin, syncTimeout: syncTimeout); - - Skip.IfMissingFeature(muxer, nameof(RedisFeatures.Scripting), r => r.Scripting); - return muxer; - } - - [Fact] - public void ClientScripting() - { - using (var conn = GetScriptConn()) - { - _ = conn.GetDatabase().ScriptEvaluate("return redis.call('info','server')", null, null); - } - } - - [Fact] - public async Task BasicScripting() - { - using (var muxer = GetScriptConn()) - { - var conn = muxer.GetDatabase(); - var noCache = conn.ScriptEvaluateAsync("return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}", - new RedisKey[] { "key1", "key2" }, new RedisValue[] { "first", "second" }); - var cache = conn.ScriptEvaluateAsync("return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}", - new RedisKey[] { "key1", "key2" }, new RedisValue[] { "first", "second" }); - var results = (string[])await noCache; - Assert.Equal(4, results.Length); - Assert.Equal("key1", results[0]); - Assert.Equal("key2", results[1]); - Assert.Equal("first", results[2]); - Assert.Equal("second", results[3]); - - results = (string[])await cache; - Assert.Equal(4, results.Length); - Assert.Equal("key1", results[0]); - Assert.Equal("key2", results[1]); - Assert.Equal("first", results[2]); - Assert.Equal("second", results[3]); - } - } - - [Fact] - public void KeysScripting() - { - using (var muxer = GetScriptConn()) - { - var conn = muxer.GetDatabase(); - var key = Me(); - conn.StringSet(key, "bar", flags: CommandFlags.FireAndForget); - var result = (string)conn.ScriptEvaluate("return redis.call('get', KEYS[1])", new RedisKey[] { key }, null); - Assert.Equal("bar", result); - } - } - - [Fact] - public async Task TestRandomThingFromForum() - { - const string script = @"local currentVal = tonumber(redis.call('GET', KEYS[1])); - if (currentVal <= 0 ) then return 1 elseif (currentVal - (tonumber(ARGV[1])) < 0 ) then return 0 end; - return redis.call('INCRBY', KEYS[1], -tonumber(ARGV[1]));"; - - using (var muxer = GetScriptConn()) - { - var prefix = Me(); - var conn = muxer.GetDatabase(); - conn.StringSet(prefix + "A", "0", flags: CommandFlags.FireAndForget); - conn.StringSet(prefix + "B", "5", flags: CommandFlags.FireAndForget); - conn.StringSet(prefix + "C", "10", flags: CommandFlags.FireAndForget); - - var a = conn.ScriptEvaluateAsync(script, new RedisKey[] { prefix + "A" }, new RedisValue[] { 6 }).ForAwait(); - var b = conn.ScriptEvaluateAsync(script, new RedisKey[] { prefix + "B" }, new RedisValue[] { 6 }).ForAwait(); - var c = conn.ScriptEvaluateAsync(script, new RedisKey[] { prefix + "C" }, new RedisValue[] { 6 }).ForAwait(); - - var vals = await conn.StringGetAsync(new RedisKey[] { prefix + "A", prefix + "B", prefix + "C" }).ForAwait(); - - Assert.Equal(1, (long)await a); // exit code when current val is non-positive - Assert.Equal(0, (long)await b); // exit code when result would be negative - Assert.Equal(4, (long)await c); // 10 - 6 = 4 - Assert.Equal("0", vals[0]); - Assert.Equal("5", vals[1]); - Assert.Equal("4", vals[2]); - } - } - - [Fact] - public void HackyGetPerf() - { - using (var muxer = GetScriptConn()) - { - var key = Me(); - var conn = muxer.GetDatabase(); - conn.StringSet(key + "foo", "bar", flags: CommandFlags.FireAndForget); - var result = (long)conn.ScriptEvaluate(@" -redis.call('psetex', KEYS[1], 60000, 'timing') -for i = 1,5000 do - redis.call('set', 'ignore','abc') -end -local timeTaken = 60000 - redis.call('pttl', KEYS[1]) -redis.call('del', KEYS[1]) -return timeTaken -", new RedisKey[] { key }, null); - Log(result.ToString()); - Assert.True(result > 0); - } - } - - [Fact] - public async Task MultiIncrWithoutReplies() - { - using (var muxer = GetScriptConn()) - { - var conn = muxer.GetDatabase(); - var prefix = Me(); - // prime some initial values - conn.KeyDelete(new RedisKey[] { prefix + "a", prefix + "b", prefix + "c" }, CommandFlags.FireAndForget); - conn.StringIncrement(prefix + "b", flags: CommandFlags.FireAndForget); - conn.StringIncrement(prefix + "c", flags: CommandFlags.FireAndForget); - conn.StringIncrement(prefix + "c", flags: CommandFlags.FireAndForget); - - // run the script, passing "a", "b", "c", "c" to - // increment a & b by 1, c twice - var result = conn.ScriptEvaluateAsync( - "for i,key in ipairs(KEYS) do redis.call('incr', key) end", - new RedisKey[] { prefix + "a", prefix + "b", prefix + "c", prefix + "c" }, // <== aka "KEYS" in the script - null).ForAwait(); // <== aka "ARGV" in the script - - // check the incremented values - var a = conn.StringGetAsync(prefix + "a").ForAwait(); - var b = conn.StringGetAsync(prefix + "b").ForAwait(); - var c = conn.StringGetAsync(prefix + "c").ForAwait(); - - Assert.True((await result).IsNull, "result"); - Assert.Equal(1, (long)await a); - Assert.Equal(2, (long)await b); - Assert.Equal(4, (long)await c); - } - } - - [Fact] - public async Task MultiIncrByWithoutReplies() - { - using (var muxer = GetScriptConn()) - { - var conn = muxer.GetDatabase(); - var prefix = Me(); - // prime some initial values - conn.KeyDelete(new RedisKey[] { prefix + "a", prefix + "b", prefix + "c" }, CommandFlags.FireAndForget); - conn.StringIncrement(prefix + "b", flags: CommandFlags.FireAndForget); - conn.StringIncrement(prefix + "c", flags: CommandFlags.FireAndForget); - conn.StringIncrement(prefix + "c", flags: CommandFlags.FireAndForget); - - //run the script, passing "a", "b", "c" and 1,2,3 - // increment a &b by 1, c twice - var result = conn.ScriptEvaluateAsync( - "for i,key in ipairs(KEYS) do redis.call('incrby', key, ARGV[i]) end", - new RedisKey[] { prefix + "a", prefix + "b", prefix + "c" }, // <== aka "KEYS" in the script - new RedisValue[] { 1, 1, 2 }).ForAwait(); // <== aka "ARGV" in the script - - // check the incremented values - var a = conn.StringGetAsync(prefix + "a").ForAwait(); - var b = conn.StringGetAsync(prefix + "b").ForAwait(); - var c = conn.StringGetAsync(prefix + "c").ForAwait(); - - Assert.True((await result).IsNull, "result"); - Assert.Equal(1, (long)await a); - Assert.Equal(2, (long)await b); - Assert.Equal(4, (long)await c); - } - } - - [Fact] - public void DisableStringInference() - { - using (var muxer = GetScriptConn()) - { - var conn = muxer.GetDatabase(); - var key = Me(); - conn.StringSet(key, "bar", flags: CommandFlags.FireAndForget); - var result = (byte[])conn.ScriptEvaluate("return redis.call('get', KEYS[1])", new RedisKey[] { key }); - Assert.Equal("bar", Encoding.UTF8.GetString(result)); - } - } - - [Fact] - public void FlushDetection() - { // we don't expect this to handle everything; we just expect it to be predictable - using (var muxer = GetScriptConn(allowAdmin: true)) - { - var conn = muxer.GetDatabase(); - var key = Me(); - conn.StringSet(key, "bar", flags: CommandFlags.FireAndForget); - var result = (string)conn.ScriptEvaluate("return redis.call('get', KEYS[1])", new RedisKey[] { key }, null); - Assert.Equal("bar", result); - - // now cause all kinds of problems - GetServer(muxer).ScriptFlush(); - - //expect this one to fail just work fine (self-fix) - conn.ScriptEvaluate("return redis.call('get', KEYS[1])", new RedisKey[] { key }, null); - - result = (string)conn.ScriptEvaluate("return redis.call('get', KEYS[1])", new RedisKey[] { key }, null); - Assert.Equal("bar", result); - } - } - - [Fact] - public void PrepareScript() - { - string[] scripts = { "return redis.call('get', KEYS[1])", "return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}" }; - using (var muxer = GetScriptConn(allowAdmin: true)) - { - var server = GetServer(muxer); - server.ScriptFlush(); - - // when vanilla - server.ScriptLoad(scripts[0]); - server.ScriptLoad(scripts[1]); - - //when known to exist - server.ScriptLoad(scripts[0]); - server.ScriptLoad(scripts[1]); - } - using (var muxer = GetScriptConn()) - { - var server = GetServer(muxer); - - //when vanilla - server.ScriptLoad(scripts[0]); - server.ScriptLoad(scripts[1]); - - //when known to exist - server.ScriptLoad(scripts[0]); - server.ScriptLoad(scripts[1]); - - //when known to exist - server.ScriptLoad(scripts[0]); - server.ScriptLoad(scripts[1]); - } - } - - [Fact] - public void NonAsciiScripts() - { - using (var muxer = GetScriptConn()) - { - const string evil = "return '僕'"; - var conn = muxer.GetDatabase(); - GetServer(muxer).ScriptLoad(evil); - - var result = (string)conn.ScriptEvaluate(evil, null, null); - Assert.Equal("僕", result); - } - } - - [Fact] - public async Task ScriptThrowsError() - { - await Assert.ThrowsAsync(async () => - { - using (var muxer = GetScriptConn()) - { - var conn = muxer.GetDatabase(); - try - { - await conn.ScriptEvaluateAsync("return redis.error_reply('oops')", null, null).ForAwait(); - } - catch (AggregateException ex) - { - throw ex.InnerExceptions[0]; - } - } - }).ForAwait(); - } - - [Fact] - public void ScriptThrowsErrorInsideTransaction() - { - using (var muxer = GetScriptConn()) - { - var key = Me(); - var conn = muxer.GetDatabase(); - conn.KeyDelete(key, CommandFlags.FireAndForget); - var beforeTran = (string)conn.StringGet(key); - Assert.Null(beforeTran); - var tran = conn.CreateTransaction(); - { - var a = tran.StringIncrementAsync(key); - var b = tran.ScriptEvaluateAsync("return redis.error_reply('oops')", null, null); - var c = tran.StringIncrementAsync(key); - var complete = tran.ExecuteAsync(); - - Assert.True(muxer.Wait(complete)); - Assert.True(QuickWait(a).IsCompleted, a.Status.ToString()); - Assert.True(QuickWait(c).IsCompleted, "State: " + c.Status); - Assert.Equal(1L, a.Result); - Assert.Equal(2L, c.Result); - - Assert.True(QuickWait(b).IsFaulted, "should be faulted"); - Assert.Single(b.Exception.InnerExceptions); - var ex = b.Exception.InnerExceptions.Single(); - Assert.IsType(ex); - Assert.Equal("oops", ex.Message); - } - var afterTran = conn.StringGetAsync(key); - Assert.Equal(2L, (long)conn.Wait(afterTran)); - } - } - private static Task QuickWait(Task task) - { - if (!task.IsCompleted) - { - try { task.Wait(200); } catch { /* But don't error */ } - } - return task; - } - - [Fact] - public async Task ChangeDbInScript() - { - using (var muxer = GetScriptConn()) - { - var key = Me(); - muxer.GetDatabase(1).StringSet(key, "db 1", flags: CommandFlags.FireAndForget); - muxer.GetDatabase(2).StringSet(key, "db 2", flags: CommandFlags.FireAndForget); - - Log("Key: " + key); - var conn = muxer.GetDatabase(2); - var evalResult = conn.ScriptEvaluateAsync(@"redis.call('select', 1) - return redis.call('get','" + key + "')", null, null); - var getResult = conn.StringGetAsync(key); - - Assert.Equal("db 1", (string)await evalResult); - // now, our connection thought it was in db 2, but the script changed to db 1 - Assert.Equal("db 2", await getResult); - } - } - - [Fact] - public async Task ChangeDbInTranScript() - { - using (var muxer = GetScriptConn()) - { - var key = Me(); - muxer.GetDatabase(1).StringSet(key, "db 1", flags: CommandFlags.FireAndForget); - muxer.GetDatabase(2).StringSet(key, "db 2", flags: CommandFlags.FireAndForget); - - var conn = muxer.GetDatabase(2); - var tran = conn.CreateTransaction(); - var evalResult = tran.ScriptEvaluateAsync(@"redis.call('select', 1) - return redis.call('get','" + key + "')", null, null); - var getResult = tran.StringGetAsync(key); - Assert.True(tran.Execute()); - - Assert.Equal("db 1", (string)await evalResult); - // now, our connection thought it was in db 2, but the script changed to db 1 - Assert.Equal("db 2", await getResult); - } - } - - [Fact] - public void TestBasicScripting() - { - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Scripting), f => f.Scripting); - RedisValue newId = Guid.NewGuid().ToString(); - RedisKey key = Me(); - var db = conn.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.HashSet(key, "id", 123, flags: CommandFlags.FireAndForget); - - var wasSet = (bool)db.ScriptEvaluate("if redis.call('hexists', KEYS[1], 'UniqueId') then return redis.call('hset', KEYS[1], 'UniqueId', ARGV[1]) else return 0 end", - new [] { key }, new [] { newId }); - - Assert.True(wasSet); - - wasSet = (bool)db.ScriptEvaluate("if redis.call('hexists', KEYS[1], 'UniqueId') then return redis.call('hset', KEYS[1], 'UniqueId', ARGV[1]) else return 0 end", - new [] { key }, new [] { newId }); - Assert.False(wasSet); - } - } - - [Theory] - [InlineData(true)] - [InlineData(false)] - public async Task CheckLoads(bool async) - { - using (var conn0 = Create(allowAdmin: true)) - using (var conn1 = Create(allowAdmin: true)) - { - Skip.IfMissingFeature(conn0, nameof(RedisFeatures.Scripting), f => f.Scripting); - // note that these are on different connections (so we wouldn't expect - // the flush to drop the local cache - assume it is a surprise!) - var server = conn0.GetServer(TestConfig.Current.MasterServerAndPort); - var db = conn1.GetDatabase(); - const string script = "return 1;"; - - // start empty - server.ScriptFlush(); - Assert.False(server.ScriptExists(script)); - - // run once, causes to be cached - Assert.True((bool)db.ScriptEvaluate(script)); - Assert.True(server.ScriptExists(script)); - - // can run again - Assert.True((bool)db.ScriptEvaluate(script)); - - // ditch the scripts; should no longer exist - db.Ping(); - server.ScriptFlush(); - Assert.False(server.ScriptExists(script)); - db.Ping(); - - if (async) - { - // now: fails the first time - var ex = await Assert.ThrowsAsync(async () => await db.ScriptEvaluateAsync(script).ForAwait()).ForAwait(); - Assert.Equal("NOSCRIPT No matching script. Please use EVAL.", ex.Message); - } - else - { - // just works; magic - Assert.True((bool)db.ScriptEvaluate(script)); - } - - // but gets marked as unloaded, so we can use it again... - Assert.True((bool)db.ScriptEvaluate(script)); - - // which will cause it to be cached - Assert.True(server.ScriptExists(script)); - } - } - - [Fact] - public void CompareScriptToDirect() - { - const string Script = "return redis.call('incr', KEYS[1])"; - - using (var conn = Create(allowAdmin: true)) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Scripting), f => f.Scripting); - var server = conn.GetServer(TestConfig.Current.MasterServerAndPort); - server.ScriptFlush(); - - server.ScriptLoad(Script); - var db = conn.GetDatabase(); - db.Ping(); // k, we're all up to date now; clean db, minimal script cache - - // we're using a pipeline here, so send 1000 messages, but for timing: only care about the last - const int LOOP = 5000; - RedisKey key = Me(); - RedisKey[] keys = new[] { key }; // script takes an array - - // run via script - db.KeyDelete(key, CommandFlags.FireAndForget); - var watch = Stopwatch.StartNew(); - for (int i = 1; i < LOOP; i++) // the i=1 is to do all-but-one - { - db.ScriptEvaluate(Script, keys, flags: CommandFlags.FireAndForget); - } - var scriptResult = db.ScriptEvaluate(Script, keys); // last one we wait for (no F+F) - watch.Stop(); - TimeSpan scriptTime = watch.Elapsed; - - // run via raw op - db.KeyDelete(key, CommandFlags.FireAndForget); - watch = Stopwatch.StartNew(); - for (int i = 1; i < LOOP; i++) // the i=1 is to do all-but-one - { - db.StringIncrement(key, flags: CommandFlags.FireAndForget); - } - var directResult = db.StringIncrement(key); // last one we wait for (no F+F) - watch.Stop(); - TimeSpan directTime = watch.Elapsed; - - Assert.Equal(LOOP, (long)scriptResult); - Assert.Equal(LOOP, directResult); - - Log("script: {0}ms; direct: {1}ms", - scriptTime.TotalMilliseconds, - directTime.TotalMilliseconds); - } - } - - [Fact] - public void TestCallByHash() - { - const string Script = "return redis.call('incr', KEYS[1])"; - - using (var conn = Create(allowAdmin: true)) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Scripting), f => f.Scripting); - var server = conn.GetServer(TestConfig.Current.MasterServerAndPort); - server.ScriptFlush(); - - byte[] hash = server.ScriptLoad(Script); - - var db = conn.GetDatabase(); - var key = Me(); - db.KeyDelete(key, CommandFlags.FireAndForget); - RedisKey[] keys = { key }; - - string hexHash = string.Concat(hash.Select(x => x.ToString("X2"))); - Assert.Equal("2BAB3B661081DB58BD2341920E0BA7CF5DC77B25", hexHash); - - db.ScriptEvaluate(hexHash, keys, flags: CommandFlags.FireAndForget); - db.ScriptEvaluate(hash, keys, flags: CommandFlags.FireAndForget); - - var count = (int)db.StringGet(keys)[0]; - Assert.Equal(2, count); - } - } - - [Fact] - public void SimpleLuaScript() - { - const string Script = "return @ident"; - - using (var conn = Create(allowAdmin: true)) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Scripting), f => f.Scripting); - var server = conn.GetServer(TestConfig.Current.MasterServerAndPort); - server.ScriptFlush(); - - var prepared = LuaScript.Prepare(Script); - - var db = conn.GetDatabase(); - - { - var val = prepared.Evaluate(db, new { ident = "hello" }); - Assert.Equal("hello", (string)val); - } - - { - var val = prepared.Evaluate(db, new { ident = 123 }); - Assert.Equal(123, (int)val); - } - - { - var val = prepared.Evaluate(db, new { ident = 123L }); - Assert.Equal(123L, (long)val); - } - - { - var val = prepared.Evaluate(db, new { ident = 1.1 }); - Assert.Equal(1.1, (double)val); - } - - { - var val = prepared.Evaluate(db, new { ident = true }); - Assert.True((bool)val); - } - - { - var val = prepared.Evaluate(db, new { ident = new byte[] { 4, 5, 6 } }); - Assert.True(new byte[] { 4, 5, 6 }.SequenceEqual((byte[])val)); - } - - { - var val = prepared.Evaluate(db, new { ident = new ReadOnlyMemory(new byte[] { 4, 5, 6 }) }); - Assert.True(new byte[] { 4, 5, 6 }.SequenceEqual((byte[])val)); - } - } - } - - [Fact] - public void SimpleRawScriptEvaluate() - { - const string Script = "return ARGV[1]"; - - using (var conn = Create(allowAdmin: true)) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Scripting), f => f.Scripting); - var server = conn.GetServer(TestConfig.Current.MasterServerAndPort); - server.ScriptFlush(); - - var db = conn.GetDatabase(); - - { - var val = db.ScriptEvaluate(Script, values: new RedisValue[] { "hello" }); - Assert.Equal("hello", (string)val); - } - - { - var val = db.ScriptEvaluate(Script, values: new RedisValue[] { 123 }); - Assert.Equal(123, (int)val); - } - - { - var val = db.ScriptEvaluate(Script, values: new RedisValue[] { 123L }); - Assert.Equal(123L, (long)val); - } - - { - var val = db.ScriptEvaluate(Script, values: new RedisValue[] { 1.1 }); - Assert.Equal(1.1, (double)val); - } - - { - var val = db.ScriptEvaluate(Script, values: new RedisValue[] { true }); - Assert.True((bool)val); - } - - { - var val = db.ScriptEvaluate(Script, values: new RedisValue[] { new byte[] { 4, 5, 6 } }); - Assert.True(new byte[] { 4, 5, 6 }.SequenceEqual((byte[])val)); - } - - { - var val = db.ScriptEvaluate(Script, values: new RedisValue[] { new ReadOnlyMemory(new byte[] { 4, 5, 6 }) }); - Assert.True(new byte[] { 4, 5, 6 }.SequenceEqual((byte[])val)); - } - } - } - - - [Fact] - public void LuaScriptWithKeys() - { - const string Script = "redis.call('set', @key, @value)"; - - using (var conn = Create(allowAdmin: true)) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Scripting), f => f.Scripting); - var server = conn.GetServer(TestConfig.Current.MasterServerAndPort); - server.ScriptFlush(); - - var script = LuaScript.Prepare(Script); - - var db = conn.GetDatabase(); - var key = Me(); - db.KeyDelete(key, CommandFlags.FireAndForget); - - var p = new { key = (RedisKey)key, value = 123 }; - - script.Evaluate(db, p); - var val = db.StringGet(key); - Assert.Equal(123, (int)val); - - // no super clean way to extract this; so just abuse InternalsVisibleTo - script.ExtractParameters(p, null, out RedisKey[] keys, out _); - Assert.NotNull(keys); - Assert.Single(keys); - Assert.Equal(key, keys[0]); - } - } - - [Fact] - public void NoInlineReplacement() - { - const string Script = "redis.call('set', @key, 'hello@example')"; - using (var conn = Create(allowAdmin: true)) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Scripting), f => f.Scripting); - var server = conn.GetServer(TestConfig.Current.MasterServerAndPort); - server.ScriptFlush(); - - var script = LuaScript.Prepare(Script); - - Assert.Equal("redis.call('set', ARGV[1], 'hello@example')", script.ExecutableScript); - - var db = conn.GetDatabase(); - var key = Me(); - db.KeyDelete(key, CommandFlags.FireAndForget); - - var p = new { key }; - - script.Evaluate(db, p, flags: CommandFlags.FireAndForget); - var val = db.StringGet(key); - Assert.Equal("hello@example", val); - } - } - - [Fact] - public void EscapeReplacement() - { - const string Script = "redis.call('set', @key, @@escapeMe)"; - var script = LuaScript.Prepare(Script); - - Assert.Equal("redis.call('set', ARGV[1], @escapeMe)", script.ExecutableScript); - } - - [Fact] - public void SimpleLoadedLuaScript() - { - const string Script = "return @ident"; - - using (var conn = Create(allowAdmin: true)) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Scripting), f => f.Scripting); - var server = conn.GetServer(TestConfig.Current.MasterServerAndPort); - server.ScriptFlush(); - - var prepared = LuaScript.Prepare(Script); - var loaded = prepared.Load(server); - - var db = conn.GetDatabase(); - - { - var val = loaded.Evaluate(db, new { ident = "hello" }); - Assert.Equal("hello", (string)val); - } - - { - var val = loaded.Evaluate(db, new { ident = 123 }); - Assert.Equal(123, (int)val); - } - - { - var val = loaded.Evaluate(db, new { ident = 123L }); - Assert.Equal(123L, (long)val); - } - - { - var val = loaded.Evaluate(db, new { ident = 1.1 }); - Assert.Equal(1.1, (double)val); - } - - { - var val = loaded.Evaluate(db, new { ident = true }); - Assert.True((bool)val); - } - - { - var val = loaded.Evaluate(db, new { ident = new byte[] { 4, 5, 6 } }); - Assert.True(new byte[] { 4, 5, 6 }.SequenceEqual((byte[])val)); - } - - { - var val = loaded.Evaluate(db, new { ident = new ReadOnlyMemory(new byte[] { 4, 5, 6 }) }); - Assert.True(new byte[] { 4, 5, 6 }.SequenceEqual((byte[])val)); - } - } - } - - [Fact] - public void LoadedLuaScriptWithKeys() - { - const string Script = "redis.call('set', @key, @value)"; - - using (var conn = Create(allowAdmin: true)) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Scripting), f => f.Scripting); - var server = conn.GetServer(TestConfig.Current.MasterServerAndPort); - server.ScriptFlush(); - - var script = LuaScript.Prepare(Script); - var prepared = script.Load(server); - - var db = conn.GetDatabase(); - var key = Me(); - db.KeyDelete(key, CommandFlags.FireAndForget); - - var p = new { key = (RedisKey)key, value = 123 }; - - prepared.Evaluate(db, p, flags: CommandFlags.FireAndForget); - var val = db.StringGet(key); - Assert.Equal(123, (int)val); - - // no super clean way to extract this; so just abuse InternalsVisibleTo - prepared.Original.ExtractParameters(p, null, out RedisKey[] keys, out _); - Assert.NotNull(keys); - Assert.Single(keys); - Assert.Equal(key, keys[0]); - } - } - - [Fact] - public void PurgeLuaScriptCache() - { - const string Script = "redis.call('set', @PurgeLuaScriptCacheKey, @PurgeLuaScriptCacheValue)"; - var first = LuaScript.Prepare(Script); - var fromCache = LuaScript.Prepare(Script); - - Assert.True(ReferenceEquals(first, fromCache)); - - LuaScript.PurgeCache(); - var shouldBeNew = LuaScript.Prepare(Script); - - Assert.False(ReferenceEquals(first, shouldBeNew)); - } - - private static void _PurgeLuaScriptOnFinalize(string script) - { - var first = LuaScript.Prepare(script); - var fromCache = LuaScript.Prepare(script); - Assert.True(ReferenceEquals(first, fromCache)); - Assert.Equal(1, LuaScript.GetCachedScriptCount()); - } - - [FactLongRunning] - public void PurgeLuaScriptOnFinalize() - { - const string Script = "redis.call('set', @PurgeLuaScriptOnFinalizeKey, @PurgeLuaScriptOnFinalizeValue)"; - LuaScript.PurgeCache(); - Assert.Equal(0, LuaScript.GetCachedScriptCount()); - - // This has to be a separate method to guarantee that the created LuaScript objects go out of scope, - // and are thus available to be GC'd - _PurgeLuaScriptOnFinalize(Script); - CollectGarbage(); - - Assert.Equal(0, LuaScript.GetCachedScriptCount()); - - var shouldBeNew = LuaScript.Prepare(Script); - Assert.Equal(1, LuaScript.GetCachedScriptCount()); - } - - [Fact] - public void IDatabaseLuaScriptConvenienceMethods() - { - const string Script = "redis.call('set', @key, @value)"; - - using (var conn = Create(allowAdmin: true)) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Scripting), f => f.Scripting); - var script = LuaScript.Prepare(Script); - var db = conn.GetDatabase(); - var key = Me(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.ScriptEvaluate(script, new { key = (RedisKey)key, value = "value" }, flags: CommandFlags.FireAndForget); - var val = db.StringGet(key); - Assert.Equal("value", val); - - var prepared = script.Load(conn.GetServer(conn.GetEndPoints()[0])); - - db.ScriptEvaluate(prepared, new { key = (RedisKey)(key + "2"), value = "value2" }, flags: CommandFlags.FireAndForget); - var val2 = db.StringGet(key + "2"); - Assert.Equal("value2", val2); - } - } - - [Fact] - public void IServerLuaScriptConvenienceMethods() - { - const string Script = "redis.call('set', @key, @value)"; - - using (var conn = Create(allowAdmin: true)) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Scripting), f => f.Scripting); - var script = LuaScript.Prepare(Script); - var server = conn.GetServer(conn.GetEndPoints()[0]); - var db = conn.GetDatabase(); - var key = Me(); - db.KeyDelete(key, CommandFlags.FireAndForget); - - var prepared = server.ScriptLoad(script); - - db.ScriptEvaluate(prepared, new { key = (RedisKey)key, value = "value3" }); - var val = db.StringGet(key); - Assert.Equal("value3", val); - } - } - - [Fact] - public void LuaScriptPrefixedKeys() - { - const string Script = "redis.call('set', @key, @value)"; - var prepared = LuaScript.Prepare(Script); - var key = Me(); - var p = new { key = (RedisKey)key, value = "hello" }; - - // no super clean way to extract this; so just abuse InternalsVisibleTo - prepared.ExtractParameters(p, "prefix-", out RedisKey[] keys, out RedisValue[] args); - Assert.NotNull(keys); - Assert.Single(keys); - Assert.Equal("prefix-" + key, keys[0]); - Assert.Equal(2, args.Length); - Assert.Equal("prefix-" + key, args[0]); - Assert.Equal("hello", args[1]); - } - - [Fact] - public void LuaScriptWithWrappedDatabase() - { - const string Script = "redis.call('set', @key, @value)"; - - using (var conn = Create(allowAdmin: true)) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Scripting), f => f.Scripting); - var db = conn.GetDatabase(); - var wrappedDb = DatabaseExtensions.WithKeyPrefix(db, "prefix-"); - var key = Me(); - db.KeyDelete(key, CommandFlags.FireAndForget); - - var prepared = LuaScript.Prepare(Script); - wrappedDb.ScriptEvaluate(prepared, new { key = (RedisKey)key, value = 123 }); - var val1 = wrappedDb.StringGet(key); - Assert.Equal(123, (int)val1); - - var val2 = db.StringGet("prefix-" + key); - Assert.Equal(123, (int)val2); - - var val3 = db.StringGet(key); - Assert.True(val3.IsNull); - } - } - - [Fact] - public async Task AsyncLuaScriptWithWrappedDatabase() - { - const string Script = "redis.call('set', @key, @value)"; - - using (var conn = Create(allowAdmin: true)) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Scripting), f => f.Scripting); - var db = conn.GetDatabase(); - var wrappedDb = DatabaseExtensions.WithKeyPrefix(db, "prefix-"); - var key = Me(); - await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); - - var prepared = LuaScript.Prepare(Script); - await wrappedDb.ScriptEvaluateAsync(prepared, new { key = (RedisKey)key, value = 123 }); - var val1 = await wrappedDb.StringGetAsync(key); - Assert.Equal(123, (int)val1); - - var val2 = await db.StringGetAsync("prefix-" + key); - Assert.Equal(123, (int)val2); - - var val3 = await db.StringGetAsync(key); - Assert.True(val3.IsNull); - } - } - - [Fact] - public void LoadedLuaScriptWithWrappedDatabase() - { - const string Script = "redis.call('set', @key, @value)"; - - using (var conn = Create(allowAdmin: true)) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Scripting), f => f.Scripting); - var db = conn.GetDatabase(); - var wrappedDb = DatabaseExtensions.WithKeyPrefix(db, "prefix2-"); - var key = Me(); - db.KeyDelete(key, CommandFlags.FireAndForget); - - var server = conn.GetServer(conn.GetEndPoints()[0]); - var prepared = LuaScript.Prepare(Script).Load(server); - wrappedDb.ScriptEvaluate(prepared, new { key = (RedisKey)key, value = 123 }, flags: CommandFlags.FireAndForget); - var val1 = wrappedDb.StringGet(key); - Assert.Equal(123, (int)val1); - - var val2 = db.StringGet("prefix2-" + key); - Assert.Equal(123, (int)val2); - - var val3 = db.StringGet(key); - Assert.True(val3.IsNull); - } - } - - [Fact] - public async Task AsyncLoadedLuaScriptWithWrappedDatabase() - { - const string Script = "redis.call('set', @key, @value)"; - - using (var conn = Create(allowAdmin: true)) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Scripting), f => f.Scripting); - var db = conn.GetDatabase(); - var wrappedDb = DatabaseExtensions.WithKeyPrefix(db, "prefix2-"); - var key = Me(); - await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); - - var server = conn.GetServer(conn.GetEndPoints()[0]); - var prepared = await LuaScript.Prepare(Script).LoadAsync(server); - await wrappedDb.ScriptEvaluateAsync(prepared, new { key = (RedisKey)key, value = 123 }, flags: CommandFlags.FireAndForget); - var val1 = await wrappedDb.StringGetAsync(key); - Assert.Equal(123, (int)val1); - - var val2 = await db.StringGetAsync("prefix2-" + key); - Assert.Equal(123, (int)val2); - - var val3 = await db.StringGetAsync(key); - Assert.True(val3.IsNull); - } - } - - [Fact] - public void ScriptWithKeyPrefixViaTokens() - { - using (var conn = Create()) - { - var p = conn.GetDatabase().WithKeyPrefix("prefix/"); - - var args = new { x = "abc", y = (RedisKey)"def", z = 123 }; - var script = LuaScript.Prepare(@" -local arr = {}; -arr[1] = @x; -arr[2] = @y; -arr[3] = @z; -return arr; -"); - var result = (RedisValue[])p.ScriptEvaluate(script, args); - Assert.Equal("abc", result[0]); - Assert.Equal("prefix/def", result[1]); - Assert.Equal("123", result[2]); - } - } - - [Fact] - public void ScriptWithKeyPrefixViaArrays() - { - using (var conn = Create()) - { - var p = conn.GetDatabase().WithKeyPrefix("prefix/"); - - const string script = @" -local arr = {}; -arr[1] = ARGV[1]; -arr[2] = KEYS[1]; -arr[3] = ARGV[2]; -return arr; -"; - var result = (RedisValue[])p.ScriptEvaluate(script, new RedisKey[] { "def" }, new RedisValue[] { "abc", 123 }); - Assert.Equal("abc", result[0]); - Assert.Equal("prefix/def", result[1]); - Assert.Equal("123", result[2]); - } - } - - [Fact] - public void ScriptWithKeyPrefixCompare() - { - using (var conn = Create()) - { - var p = conn.GetDatabase().WithKeyPrefix("prefix/"); - var args = new { k = (RedisKey)"key", s = "str", v = 123 }; - LuaScript lua = LuaScript.Prepare(@"return {@k, @s, @v}"); - var viaArgs = (RedisValue[])p.ScriptEvaluate(lua, args); - - var viaArr = (RedisValue[])p.ScriptEvaluate(@"return {KEYS[1], ARGV[1], ARGV[2]}", new[] { args.k }, new RedisValue[] { args.s, args.v }); - Assert.Equal(string.Join(",", viaArr), string.Join(",", viaArgs)); - } - } - - [Fact] - public void RedisResultUnderstandsNullArrayArray() => TestNullArray(RedisResult.NullArray); - [Fact] - public void RedisResultUnderstandsNullArrayNull() => TestNullArray(null); - - static void TestNullArray(RedisResult value) - { - Assert.True(value == null || value.IsNull); - - Assert.Null((RedisValue[])value); - Assert.Null((RedisKey[])value); - Assert.Null((bool[])value); - Assert.Null((long[])value); - Assert.Null((ulong[])value); - Assert.Null((string[])value); - Assert.Null((int[])value); - Assert.Null((double[])value); - Assert.Null((byte[][])value); - Assert.Null((RedisResult[])value); - } - - [Fact] - public void RedisResultUnderstandsNullNull() => TestNullValue(null); - [Fact] - public void RedisResultUnderstandsNullValue() => TestNullValue(RedisResult.Create(RedisValue.Null, ResultType.None)); - - static void TestNullValue(RedisResult value) - { - Assert.True(value == null || value.IsNull); - - Assert.True(((RedisValue)value).IsNull); - Assert.True(((RedisKey)value).IsNull); - Assert.Null((bool?)value); - Assert.Null((long?)value); - Assert.Null((ulong?)value); - Assert.Null((string)value); - Assert.Null((double?)value); - Assert.Null((byte[])value); - } - } -} diff --git a/tests/StackExchange.Redis.Tests/ScriptingTests.cs b/tests/StackExchange.Redis.Tests/ScriptingTests.cs new file mode 100644 index 000000000..15ea6adb1 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/ScriptingTests.cs @@ -0,0 +1,1156 @@ +#if NET // Since we're flushing and reloading scripts, only run this in once suite +using System; +using System.Diagnostics; +using System.Linq; +using System.Security.Cryptography; +using System.Text; +using System.Threading.Tasks; +using StackExchange.Redis.KeyspaceIsolation; +using Xunit; + +// ReSharper disable UseAwaitUsing # for consistency with existing tests +// ReSharper disable MethodHasAsyncOverload # grandfathered existing usage +// ReSharper disable StringLiteralTypo # because of Lua scripts +namespace StackExchange.Redis.Tests; + +[RunPerProtocol] +public class ScriptingTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + private IConnectionMultiplexer GetScriptConn(bool allowAdmin = false) + { + int syncTimeout = 5000; + if (Debugger.IsAttached) syncTimeout = 500000; + return Create(allowAdmin: allowAdmin, syncTimeout: syncTimeout, require: RedisFeatures.v2_6_0); + } + + [Fact] + public async Task ClientScripting() + { + await using var conn = GetScriptConn(); + _ = conn.GetDatabase().ScriptEvaluate(script: "return redis.call('info','server')", keys: null, values: null); + } + + [Fact] + public async Task BasicScripting() + { + await using var conn = GetScriptConn(); + + var db = conn.GetDatabase(); + var noCache = db.ScriptEvaluateAsync( + script: "return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}", + keys: ["key1", "key2"], + values: ["first", "second"]); + var cache = db.ScriptEvaluateAsync( + script: "return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}", + keys: ["key1", "key2"], + values: ["first", "second"]); + var results = (string[]?)(await noCache)!; + Assert.NotNull(results); + Assert.Equal(4, results.Length); + Assert.Equal("key1", results[0]); + Assert.Equal("key2", results[1]); + Assert.Equal("first", results[2]); + Assert.Equal("second", results[3]); + + results = (string[]?)(await cache)!; + Assert.NotNull(results); + Assert.Equal(4, results.Length); + Assert.Equal("key1", results[0]); + Assert.Equal("key2", results[1]); + Assert.Equal("first", results[2]); + Assert.Equal("second", results[3]); + } + + [Fact] + public async Task KeysScripting() + { + await using var conn = GetScriptConn(); + + var db = conn.GetDatabase(); + var key = Me(); + db.StringSet(key, "bar", flags: CommandFlags.FireAndForget); + var result = (string?)db.ScriptEvaluate(script: "return redis.call('get', KEYS[1])", keys: [key], values: null); + Assert.Equal("bar", result); + } + + [Fact] + public async Task TestRandomThingFromForum() + { + const string Script = """ + local currentVal = tonumber(redis.call('GET', KEYS[1])); + if (currentVal <= 0 ) then return 1 elseif (currentVal - (tonumber(ARGV[1])) < 0 ) then return 0 end; + return redis.call('INCRBY', KEYS[1], -tonumber(ARGV[1])); + """; + + await using var conn = GetScriptConn(); + + var prefix = Me(); + var db = conn.GetDatabase(); + db.StringSet(prefix + "A", "0", flags: CommandFlags.FireAndForget); + db.StringSet(prefix + "B", "5", flags: CommandFlags.FireAndForget); + db.StringSet(prefix + "C", "10", flags: CommandFlags.FireAndForget); + + var a = db.ScriptEvaluateAsync(script: Script, keys: [prefix + "A"], values: [6]).ForAwait(); + var b = db.ScriptEvaluateAsync(script: Script, keys: [prefix + "B"], values: [6]).ForAwait(); + var c = db.ScriptEvaluateAsync(script: Script, keys: [prefix + "C"], values: [6]).ForAwait(); + + var values = await db.StringGetAsync([prefix + "A", prefix + "B", prefix + "C"]).ForAwait(); + + Assert.Equal(1, (long)await a); // exit code when current val is non-positive + Assert.Equal(0, (long)await b); // exit code when result would be negative + Assert.Equal(4, (long)await c); // 10 - 6 = 4 + Assert.Equal("0", values[0]); + Assert.Equal("5", values[1]); + Assert.Equal("4", values[2]); + } + + [Fact] + public async Task MultiIncrWithoutReplies() + { + await using var conn = GetScriptConn(); + + var db = conn.GetDatabase(); + var prefix = Me(); + // prime some initial values + db.KeyDelete([prefix + "a", prefix + "b", prefix + "c"], CommandFlags.FireAndForget); + db.StringIncrement(prefix + "b", flags: CommandFlags.FireAndForget); + db.StringIncrement(prefix + "c", flags: CommandFlags.FireAndForget); + db.StringIncrement(prefix + "c", flags: CommandFlags.FireAndForget); + + // run the script, passing "a", "b", "c", "c" to + // increment a & b by 1, c twice + var result = db.ScriptEvaluateAsync( + script: "for i,key in ipairs(KEYS) do redis.call('incr', key) end", + keys: [prefix + "a", prefix + "b", prefix + "c", prefix + "c"], // <== aka "KEYS" in the script + values: null).ForAwait(); // <== aka "ARGV" in the script + + // check the incremented values + var a = db.StringGetAsync(prefix + "a").ForAwait(); + var b = db.StringGetAsync(prefix + "b").ForAwait(); + var c = db.StringGetAsync(prefix + "c").ForAwait(); + + var r = await result; + Assert.NotNull(r); + Assert.True(r.IsNull, "result"); + Assert.Equal(1, (long)await a); + Assert.Equal(2, (long)await b); + Assert.Equal(4, (long)await c); + } + + [Fact] + public async Task MultiIncrByWithoutReplies() + { + await using var conn = GetScriptConn(); + + var db = conn.GetDatabase(); + var prefix = Me(); + // prime some initial values + db.KeyDelete([prefix + "a", prefix + "b", prefix + "c"], CommandFlags.FireAndForget); + db.StringIncrement(prefix + "b", flags: CommandFlags.FireAndForget); + db.StringIncrement(prefix + "c", flags: CommandFlags.FireAndForget); + db.StringIncrement(prefix + "c", flags: CommandFlags.FireAndForget); + + // run the script, passing "a", "b", "c" and 1,2,3 + // increment a & b by 1, c twice + var result = db.ScriptEvaluateAsync( + script: "for i,key in ipairs(KEYS) do redis.call('incrby', key, ARGV[i]) end", + keys: [prefix + "a", prefix + "b", prefix + "c"], // <== aka "KEYS" in the script + values: [1, 1, 2]).ForAwait(); // <== aka "ARGV" in the script + + // check the incremented values + var a = db.StringGetAsync(prefix + "a").ForAwait(); + var b = db.StringGetAsync(prefix + "b").ForAwait(); + var c = db.StringGetAsync(prefix + "c").ForAwait(); + + Assert.True((await result).IsNull, "result"); + Assert.Equal(1, (long)await a); + Assert.Equal(2, (long)await b); + Assert.Equal(4, (long)await c); + } + + [Fact] + public async Task DisableStringInference() + { + await using var conn = GetScriptConn(); + + var db = conn.GetDatabase(); + var key = Me(); + db.StringSet(key, "bar", flags: CommandFlags.FireAndForget); + var result = (byte[]?)db.ScriptEvaluate(script: "return redis.call('get', KEYS[1])", keys: [key]); + Assert.NotNull(result); + Assert.Equal("bar", Encoding.UTF8.GetString(result)); + } + + [Fact] + public async Task FlushDetection() + { + // we don't expect this to handle everything; we just expect it to be predictable + await using var conn = GetScriptConn(allowAdmin: true); + + var db = conn.GetDatabase(); + var key = Me(); + db.StringSet(key, "bar", flags: CommandFlags.FireAndForget); + var result = (string?)db.ScriptEvaluate(script: "return redis.call('get', KEYS[1])", keys: [key], values: null); + Assert.Equal("bar", result); + + // now cause all kinds of problems + GetServer(conn).ScriptFlush(); + + // expect this one to fail just work fine (self-fix) + db.ScriptEvaluate(script: "return redis.call('get', KEYS[1])", keys: [key], values: null); + + result = (string?)db.ScriptEvaluate(script: "return redis.call('get', KEYS[1])", keys: [key], values: null); + Assert.Equal("bar", result); + } + + [Fact] + public async Task PrepareScript() + { + string[] scripts = ["return redis.call('get', KEYS[1])", "return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}"]; + await using (var conn = GetScriptConn(allowAdmin: true)) + { + var server = GetServer(conn); + server.ScriptFlush(); + + // when vanilla + server.ScriptLoad(scripts[0]); + server.ScriptLoad(scripts[1]); + + // when known to exist + server.ScriptLoad(scripts[0]); + server.ScriptLoad(scripts[1]); + } + await using (var conn = GetScriptConn()) + { + var server = GetServer(conn); + + // when vanilla + server.ScriptLoad(scripts[0]); + server.ScriptLoad(scripts[1]); + + // when known to exist + server.ScriptLoad(scripts[0]); + server.ScriptLoad(scripts[1]); + + // when known to exist + server.ScriptLoad(scripts[0]); + server.ScriptLoad(scripts[1]); + } + } + + [Fact] + public async Task NonAsciiScripts() + { + await using var conn = GetScriptConn(); + + const string Evil = "return '僕'"; + var db = conn.GetDatabase(); + GetServer(conn).ScriptLoad(Evil); + + var result = (string?)db.ScriptEvaluate(script: Evil, keys: null, values: null); + Assert.Equal("僕", result); + } + + [Fact] + public async Task ScriptThrowsError() + { + await using var conn = GetScriptConn(); + await Assert.ThrowsAsync(async () => + { + var db = conn.GetDatabase(); + try + { + await db.ScriptEvaluateAsync(script: "return redis.error_reply('oops')", keys: null, values: null).ForAwait(); + } + catch (AggregateException ex) + { + throw ex.InnerExceptions[0]; + } + }).ForAwait(); + } + + [Fact] + public async Task ScriptThrowsErrorInsideTransaction() + { + await using var conn = GetScriptConn(); + + var key = Me(); + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + var beforeTran = (string?)db.StringGet(key); + Assert.Null(beforeTran); + var tran = db.CreateTransaction(); + { + var a = tran.StringIncrementAsync(key); + var b = tran.ScriptEvaluateAsync(script: "return redis.error_reply('oops')", keys: null, values: null); + var c = tran.StringIncrementAsync(key); + var complete = tran.ExecuteAsync(); + + Assert.True(conn.Wait(complete)); + Assert.True(QuickWait(a).IsCompleted, a.Status.ToString()); + Assert.True(QuickWait(c).IsCompleted, "State: " + c.Status); + Assert.Equal(1L, a.Result); + Assert.Equal(2L, c.Result); + + Assert.True(QuickWait(b).IsFaulted, "should be faulted"); + Assert.NotNull(b.Exception); + Assert.Single(b.Exception.InnerExceptions); + var ex = b.Exception.InnerExceptions.Single(); + Assert.IsType(ex); + // 7.0 slightly changes the error format, accept either. + Assert.Contains(ex.Message, new[] { "ERR oops", "oops" }); + } + var afterTran = db.StringGetAsync(key); + Assert.Equal(2L, (long)db.Wait(afterTran)); + } + private static Task QuickWait(Task task) + { + if (!task.IsCompleted) + { + try { task.Wait(200); } catch { /* But don't error */ } + } + return task; + } + + [Fact] + public async Task ChangeDbInScript() + { + await using var conn = GetScriptConn(); + + var key = Me(); + conn.GetDatabase(1).StringSet(key, "db 1", flags: CommandFlags.FireAndForget); + conn.GetDatabase(2).StringSet(key, "db 2", flags: CommandFlags.FireAndForget); + + Log("Key: " + key); + var db = conn.GetDatabase(2); + var evalResult = db.ScriptEvaluateAsync( + script: @"redis.call('select', 1) + return redis.call('get','" + key + "')", + keys: null, + values: null); + var getResult = db.StringGetAsync(key); + + Assert.Equal("db 1", (string?)await evalResult); + // now, our connection thought it was in db 2, but the script changed to db 1 + Assert.Equal("db 2", await getResult); + } + + [Fact] + public async Task ChangeDbInTranScript() + { + await using var conn = GetScriptConn(); + + var key = Me(); + conn.GetDatabase(1).StringSet(key, "db 1", flags: CommandFlags.FireAndForget); + conn.GetDatabase(2).StringSet(key, "db 2", flags: CommandFlags.FireAndForget); + + var db = conn.GetDatabase(2); + var tran = db.CreateTransaction(); + var evalResult = tran.ScriptEvaluateAsync( + script: @"redis.call('select', 1) + return redis.call('get','" + key + "')", + keys: null, + values: null); + var getResult = tran.StringGetAsync(key); + Assert.True(tran.Execute()); + + Assert.Equal("db 1", (string?)await evalResult); + // now, our connection thought it was in db 2, but the script changed to db 1 + Assert.Equal("db 2", await getResult); + } + + [Fact] + public async Task TestBasicScripting() + { + await using var conn = Create(require: RedisFeatures.v2_6_0); + + RedisValue newId = Guid.NewGuid().ToString(); + RedisKey key = Me(); + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.HashSet(key, "id", 123, flags: CommandFlags.FireAndForget); + + var wasSet = (bool)db.ScriptEvaluate( + script: "if redis.call('hexists', KEYS[1], 'UniqueId') then return redis.call('hset', KEYS[1], 'UniqueId', ARGV[1]) else return 0 end", + keys: [key], + values: [newId]); + + Assert.True(wasSet); + + wasSet = (bool)db.ScriptEvaluate( + script: "if redis.call('hexists', KEYS[1], 'UniqueId') then return redis.call('hset', KEYS[1], 'UniqueId', ARGV[1]) else return 0 end", + keys: [key], + values: [newId]); + Assert.False(wasSet); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task CheckLoads(bool async) + { + await using var conn0 = Create(allowAdmin: true, require: RedisFeatures.v2_6_0); + await using var conn1 = Create(allowAdmin: true, require: RedisFeatures.v2_6_0); + + // note that these are on different connections (so we wouldn't expect + // the flush to drop the local cache - assume it is a surprise!) + var server = conn0.GetServer(TestConfig.Current.PrimaryServerAndPort); + var db = conn1.GetDatabase(); + var key = Me(); + var Script = $"return '{key}';"; + + // start empty + server.ScriptFlush(); + Assert.False(server.ScriptExists(Script)); + + // run once, causes to be cached + Assert.Equal(key, await EvaluateScript()); + + Assert.True(server.ScriptExists(Script)); + + // can run again + Assert.Equal(key, await EvaluateScript()); + + // ditch the scripts; should no longer exist + await db.PingAsync(); + server.ScriptFlush(); + Assert.False(server.ScriptExists(Script)); + await db.PingAsync(); + + // just works; magic + Assert.Equal(key, await EvaluateScript()); + + // but gets marked as unloaded, so we can use it again... + Assert.Equal(key, await EvaluateScript()); + + // which will cause it to be cached + Assert.True(server.ScriptExists(Script)); + + async Task EvaluateScript() + { + return async ? + (string?)await db.ScriptEvaluateAsync(script: Script) : + (string?)db.ScriptEvaluate(script: Script); + } + } + + [Fact] + public async Task CompareScriptToDirect() + { + Skip.UnlessLongRunning(); + await using var conn = Create(allowAdmin: true, require: RedisFeatures.v2_6_0); + + const string Script = "return redis.call('incr', KEYS[1])"; + var server = conn.GetServer(TestConfig.Current.PrimaryServerAndPort); + server.ScriptFlush(); + + server.ScriptLoad(Script); + var db = conn.GetDatabase(); + await db.PingAsync(); // k, we're all up to date now; clean db, minimal script cache + + // we're using a pipeline here, so send 1000 messages, but for timing: only care about the last + const int Loop = 5000; + RedisKey key = Me(); + RedisKey[] keys = [key]; // script takes an array + + // run via script + db.KeyDelete(key, CommandFlags.FireAndForget); + var watch = Stopwatch.StartNew(); + for (int i = 1; i < Loop; i++) // the i=1 is to do all-but-one + { + db.ScriptEvaluate(script: Script, keys: keys, flags: CommandFlags.FireAndForget); + } + var scriptResult = db.ScriptEvaluate(script: Script, keys: keys); // last one we wait for (no F+F) + watch.Stop(); + TimeSpan scriptTime = watch.Elapsed; + + // run via raw op + db.KeyDelete(key, CommandFlags.FireAndForget); + watch = Stopwatch.StartNew(); + for (int i = 1; i < Loop; i++) // the i=1 is to do all-but-one + { + db.StringIncrement(key, flags: CommandFlags.FireAndForget); + } + var directResult = db.StringIncrement(key); // last one we wait for (no F+F) + watch.Stop(); + TimeSpan directTime = watch.Elapsed; + + Assert.Equal(Loop, (long)scriptResult); + Assert.Equal(Loop, directResult); + + Log("script: {0}ms; direct: {1}ms", scriptTime.TotalMilliseconds, directTime.TotalMilliseconds); + } + + [Fact] + public async Task TestCallByHash() + { + await using var conn = Create(allowAdmin: true, require: RedisFeatures.v2_6_0); + + const string Script = "return redis.call('incr', KEYS[1])"; + var server = conn.GetServer(TestConfig.Current.PrimaryServerAndPort); + server.ScriptFlush(); + + byte[] hash = server.ScriptLoad(Script); + Assert.NotNull(hash); + + var db = conn.GetDatabase(); + var key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + RedisKey[] keys = [key]; + + string hexHash = string.Concat(hash.Select(x => x.ToString("X2"))); + Assert.Equal("2BAB3B661081DB58BD2341920E0BA7CF5DC77B25", hexHash); + + db.ScriptEvaluate(script: hexHash, keys: keys, flags: CommandFlags.FireAndForget); + db.ScriptEvaluate(hash, keys, flags: CommandFlags.FireAndForget); + + var count = (int)db.StringGet(keys)[0]; + Assert.Equal(2, count); + } + + [Fact] + public async Task SimpleLuaScript() + { + await using var conn = Create(allowAdmin: true, require: RedisFeatures.v2_6_0); + + const string Script = "return @ident"; + var server = conn.GetServer(TestConfig.Current.PrimaryServerAndPort); + server.ScriptFlush(); + + var prepared = LuaScript.Prepare(Script); + + var db = conn.GetDatabase(); + + // Scopes for repeated use + { + var val = prepared.Evaluate(db, new { ident = "hello" }); + Assert.Equal("hello", (string?)val); + } + + { + var val = prepared.Evaluate(db, new { ident = 123 }); + Assert.Equal(123, (int)val); + } + + { + var val = prepared.Evaluate(db, new { ident = 123L }); + Assert.Equal(123L, (long)val); + } + + { + var val = prepared.Evaluate(db, new { ident = 1.1 }); + Assert.Equal(1.1, (double)val); + } + + { + var val = prepared.Evaluate(db, new { ident = true }); + Assert.True((bool)val); + } + + { + var val = prepared.Evaluate(db, new { ident = new byte[] { 4, 5, 6 } }); + var valArray = (byte[]?)val; + Assert.NotNull(valArray); + Assert.True(new byte[] { 4, 5, 6 }.SequenceEqual(valArray)); + } + + { + var val = prepared.Evaluate(db, new { ident = new ReadOnlyMemory([4, 5, 6]) }); + var valArray = (byte[]?)val; + Assert.NotNull(valArray); + Assert.True(new byte[] { 4, 5, 6 }.SequenceEqual(valArray)); + } + } + + [Fact] + public async Task SimpleRawScriptEvaluate() + { + await using var conn = Create(allowAdmin: true, require: RedisFeatures.v2_6_0); + + const string Script = "return ARGV[1]"; + var server = conn.GetServer(TestConfig.Current.PrimaryServerAndPort); + server.ScriptFlush(); + + var db = conn.GetDatabase(); + + // Scopes for repeated use + { + var val = db.ScriptEvaluate(script: Script, values: ["hello"]); + Assert.Equal("hello", (string?)val); + } + + { + var val = db.ScriptEvaluate(script: Script, values: [123]); + Assert.Equal(123, (int)val); + } + + { + var val = db.ScriptEvaluate(script: Script, values: [123L]); + Assert.Equal(123L, (long)val); + } + + { + var val = db.ScriptEvaluate(script: Script, values: [1.1]); + Assert.Equal(1.1, (double)val); + } + + { + var val = db.ScriptEvaluate(script: Script, values: [true]); + Assert.True((bool)val); + } + + { + var val = db.ScriptEvaluate(script: Script, values: [new byte[] { 4, 5, 6 }]); + var valArray = (byte[]?)val; + Assert.NotNull(valArray); + Assert.True(new byte[] { 4, 5, 6 }.SequenceEqual(valArray)); + } + + { + var val = db.ScriptEvaluate(script: Script, values: [new ReadOnlyMemory([4, 5, 6])]); + var valArray = (byte[]?)val; + Assert.NotNull(valArray); + Assert.True(new byte[] { 4, 5, 6 }.SequenceEqual(valArray)); + } + } + + [Fact] + public async Task LuaScriptWithKeys() + { + await using var conn = Create(allowAdmin: true, require: RedisFeatures.v2_6_0); + + const string Script = "redis.call('set', @key, @value)"; + var server = conn.GetServer(TestConfig.Current.PrimaryServerAndPort); + server.ScriptFlush(); + + var script = LuaScript.Prepare(Script); + + var db = conn.GetDatabase(); + var key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + var p = new { key = (RedisKey)key, value = 123 }; + + script.Evaluate(db, p); + var val = db.StringGet(key); + Assert.Equal(123, (int)val); + + // no super clean way to extract this; so just abuse InternalsVisibleTo + script.ExtractParameters(p, null, out RedisKey[]? keys, out _); + Assert.NotNull(keys); + Assert.Single(keys); + Assert.Equal(key, keys[0]); + } + + [Fact] + public async Task NoInlineReplacement() + { + await using var conn = Create(allowAdmin: true, require: RedisFeatures.v2_6_0); + + const string Script = "redis.call('set', @key, 'hello@example')"; + var server = conn.GetServer(TestConfig.Current.PrimaryServerAndPort); + server.ScriptFlush(); + + var script = LuaScript.Prepare(Script); + + Assert.Equal("redis.call('set', ARGV[1], 'hello@example')", script.ExecutableScript); + + var db = conn.GetDatabase(); + var key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + var p = new { key }; + + script.Evaluate(db, p, flags: CommandFlags.FireAndForget); + var val = db.StringGet(key); + Assert.Equal("hello@example", val); + } + + [Fact] + public void EscapeReplacement() + { + const string Script = "redis.call('set', @key, @@escapeMe)"; + var script = LuaScript.Prepare(Script); + + Assert.Equal("redis.call('set', ARGV[1], @escapeMe)", script.ExecutableScript); + } + + [Fact] + public async Task SimpleLoadedLuaScript() + { + await using var conn = Create(allowAdmin: true, require: RedisFeatures.v2_6_0); + + const string Script = "return @ident"; + var server = conn.GetServer(TestConfig.Current.PrimaryServerAndPort); + server.ScriptFlush(); + + var prepared = LuaScript.Prepare(Script); + var loaded = prepared.Load(server); + + var db = conn.GetDatabase(); + + // Scopes for repeated use + { + var val = loaded.Evaluate(db, new { ident = "hello" }); + Assert.Equal("hello", (string?)val); + } + + { + var val = loaded.Evaluate(db, new { ident = 123 }); + Assert.Equal(123, (int)val); + } + + { + var val = loaded.Evaluate(db, new { ident = 123L }); + Assert.Equal(123L, (long)val); + } + + { + var val = loaded.Evaluate(db, new { ident = 1.1 }); + Assert.Equal(1.1, (double)val); + } + + { + var val = loaded.Evaluate(db, new { ident = true }); + Assert.True((bool)val); + } + + { + var val = loaded.Evaluate(db, new { ident = new byte[] { 4, 5, 6 } }); + var valArray = (byte[]?)val; + Assert.NotNull(valArray); + Assert.True(new byte[] { 4, 5, 6 }.SequenceEqual(valArray)); + } + + { + var val = loaded.Evaluate(db, new { ident = new ReadOnlyMemory([4, 5, 6]) }); + var valArray = (byte[]?)val; + Assert.NotNull(valArray); + Assert.True(new byte[] { 4, 5, 6 }.SequenceEqual(valArray)); + } + } + + [Fact] + public async Task LoadedLuaScriptWithKeys() + { + await using var conn = Create(allowAdmin: true, require: RedisFeatures.v2_6_0); + + const string Script = "redis.call('set', @key, @value)"; + var server = conn.GetServer(TestConfig.Current.PrimaryServerAndPort); + server.ScriptFlush(); + + var script = LuaScript.Prepare(Script); + var prepared = script.Load(server); + + var db = conn.GetDatabase(); + var key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + var p = new { key = (RedisKey)key, value = 123 }; + + prepared.Evaluate(db, p, flags: CommandFlags.FireAndForget); + var val = db.StringGet(key); + Assert.Equal(123, (int)val); + + // no super clean way to extract this; so just abuse InternalsVisibleTo + prepared.Original.ExtractParameters(p, null, out RedisKey[]? keys, out _); + Assert.NotNull(keys); + Assert.Single(keys); + Assert.Equal(key, keys[0]); + } + + [Fact] + public void PurgeLuaScriptCache() + { + const string Script = "redis.call('set', @PurgeLuaScriptCacheKey, @PurgeLuaScriptCacheValue)"; + var first = LuaScript.Prepare(Script); + var fromCache = LuaScript.Prepare(Script); + + Assert.True(ReferenceEquals(first, fromCache)); + + LuaScript.PurgeCache(); + var shouldBeNew = LuaScript.Prepare(Script); + + Assert.False(ReferenceEquals(first, shouldBeNew)); + } + + private static void PurgeLuaScriptOnFinalizeImpl(string script) + { + var first = LuaScript.Prepare(script); + var fromCache = LuaScript.Prepare(script); + Assert.True(ReferenceEquals(first, fromCache)); + Assert.Equal(1, LuaScript.GetCachedScriptCount()); + } + + [Fact] + public void PurgeLuaScriptOnFinalize() + { + Skip.UnlessLongRunning(); + const string Script = "redis.call('set', @PurgeLuaScriptOnFinalizeKey, @PurgeLuaScriptOnFinalizeValue)"; + LuaScript.PurgeCache(); + Assert.Equal(0, LuaScript.GetCachedScriptCount()); + + // This has to be a separate method to guarantee that the created LuaScript objects go out of scope, + // and are thus available to be garbage collected. + PurgeLuaScriptOnFinalizeImpl(Script); + CollectGarbage(); + + Assert.Equal(0, LuaScript.GetCachedScriptCount()); + + LuaScript.Prepare(Script); + Assert.Equal(1, LuaScript.GetCachedScriptCount()); + } + + [Fact] + public async Task DatabaseLuaScriptConvenienceMethods() + { + await using var conn = Create(allowAdmin: true, require: RedisFeatures.v2_6_0); + + const string Script = "redis.call('set', @key, @value)"; + var script = LuaScript.Prepare(Script); + var db = conn.GetDatabase(); + var key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.ScriptEvaluate(script, new { key = (RedisKey)key, value = "value" }); + var val = db.StringGet(key); + Assert.Equal("value", val); + + var prepared = script.Load(conn.GetServer(conn.GetEndPoints()[0])); + + db.ScriptEvaluate(prepared, new { key = (RedisKey)(key + "2"), value = "value2" }); + var val2 = db.StringGet(key + "2"); + Assert.Equal("value2", val2); + } + + [Fact] + public async Task ServerLuaScriptConvenienceMethods() + { + await using var conn = Create(allowAdmin: true, require: RedisFeatures.v2_6_0); + + const string Script = "redis.call('set', @key, @value)"; + var script = LuaScript.Prepare(Script); + var server = conn.GetServer(conn.GetEndPoints()[0]); + var db = conn.GetDatabase(); + var key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + var prepared = server.ScriptLoad(script); + + db.ScriptEvaluate(prepared, new { key = (RedisKey)key, value = "value3" }); + var val = db.StringGet(key); + Assert.Equal("value3", val); + } + + [Fact] + public void LuaScriptPrefixedKeys() + { + const string Script = "redis.call('set', @key, @value)"; + var prepared = LuaScript.Prepare(Script); + var key = Me(); + var p = new { key = (RedisKey)key, value = "hello" }; + + // no super clean way to extract this; so just abuse InternalsVisibleTo + prepared.ExtractParameters(p, "prefix-", out RedisKey[]? keys, out RedisValue[]? args); + Assert.NotNull(keys); + Assert.Single(keys); + Assert.Equal("prefix-" + key, keys[0]); + Assert.NotNull(args); + Assert.Equal(2, args.Length); + Assert.Equal("prefix-" + key, args[0]); + Assert.Equal("hello", args[1]); + } + + [Fact] + public async Task LuaScriptWithWrappedDatabase() + { + await using var conn = Create(allowAdmin: true, require: RedisFeatures.v2_6_0); + + const string Script = "redis.call('set', @key, @value)"; + var db = conn.GetDatabase(); + var wrappedDb = db.WithKeyPrefix("prefix-"); + var key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + var prepared = LuaScript.Prepare(Script); + wrappedDb.ScriptEvaluate(prepared, new { key = (RedisKey)key, value = 123 }); + var val1 = wrappedDb.StringGet(key); + Assert.Equal(123, (int)val1); + + var val2 = db.StringGet("prefix-" + key); + Assert.Equal(123, (int)val2); + + var val3 = db.StringGet(key); + Assert.True(val3.IsNull); + } + + [Fact] + public async Task AsyncLuaScriptWithWrappedDatabase() + { + await using var conn = Create(allowAdmin: true, require: RedisFeatures.v2_6_0); + + const string Script = "redis.call('set', @key, @value)"; + var db = conn.GetDatabase(); + var wrappedDb = db.WithKeyPrefix("prefix-"); + var key = Me(); + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + var prepared = LuaScript.Prepare(Script); + await wrappedDb.ScriptEvaluateAsync(prepared, new { key = (RedisKey)key, value = 123 }); + var val1 = await wrappedDb.StringGetAsync(key); + Assert.Equal(123, (int)val1); + + var val2 = await db.StringGetAsync("prefix-" + key); + Assert.Equal(123, (int)val2); + + var val3 = await db.StringGetAsync(key); + Assert.True(val3.IsNull); + } + + [Fact] + public async Task LoadedLuaScriptWithWrappedDatabase() + { + await using var conn = Create(allowAdmin: true, require: RedisFeatures.v2_6_0); + + const string Script = "redis.call('set', @key, @value)"; + var db = conn.GetDatabase(); + var wrappedDb = db.WithKeyPrefix("prefix2-"); + var key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + var server = conn.GetServer(conn.GetEndPoints()[0]); + var prepared = LuaScript.Prepare(Script).Load(server); + wrappedDb.ScriptEvaluate(prepared, new { key = (RedisKey)key, value = 123 }, flags: CommandFlags.FireAndForget); + var val1 = wrappedDb.StringGet(key); + Assert.Equal(123, (int)val1); + + var val2 = db.StringGet("prefix2-" + key); + Assert.Equal(123, (int)val2); + + var val3 = db.StringGet(key); + Assert.True(val3.IsNull); + } + + [Fact] + public async Task AsyncLoadedLuaScriptWithWrappedDatabase() + { + await using var conn = Create(allowAdmin: true, require: RedisFeatures.v2_6_0); + + const string Script = "redis.call('set', @key, @value)"; + var db = conn.GetDatabase(); + var wrappedDb = db.WithKeyPrefix("prefix2-"); + var key = Me(); + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + var server = conn.GetServer(conn.GetEndPoints()[0]); + var prepared = await LuaScript.Prepare(Script).LoadAsync(server); + await wrappedDb.ScriptEvaluateAsync(prepared, new { key = (RedisKey)key, value = 123 }, flags: CommandFlags.FireAndForget); + var val1 = await wrappedDb.StringGetAsync(key); + Assert.Equal(123, (int)val1); + + var val2 = await db.StringGetAsync("prefix2-" + key); + Assert.Equal(123, (int)val2); + + var val3 = await db.StringGetAsync(key); + Assert.True(val3.IsNull); + } + + [Fact] + public async Task ScriptWithKeyPrefixViaTokens() + { + await using var conn = Create(); + + var p = conn.GetDatabase().WithKeyPrefix("prefix/"); + + var args = new { x = "abc", y = (RedisKey)"def", z = 123 }; + var script = LuaScript.Prepare(@" +local arr = {}; +arr[1] = @x; +arr[2] = @y; +arr[3] = @z; +return arr; +"); + var result = (RedisValue[]?)p.ScriptEvaluate(script, args); + Assert.NotNull(result); + Assert.Equal("abc", result[0]); + Assert.Equal("prefix/def", result[1]); + Assert.Equal("123", result[2]); + } + + [Fact] + public async Task ScriptWithKeyPrefixViaArrays() + { + await using var conn = Create(); + + var p = conn.GetDatabase().WithKeyPrefix("prefix/"); + + const string Script = @" +local arr = {}; +arr[1] = ARGV[1]; +arr[2] = KEYS[1]; +arr[3] = ARGV[2]; +return arr; +"; + var result = (RedisValue[]?)p.ScriptEvaluate(script: Script, keys: ["def"], values: ["abc", 123]); + Assert.NotNull(result); + Assert.Equal("abc", result[0]); + Assert.Equal("prefix/def", result[1]); + Assert.Equal("123", result[2]); + } + + [Fact] + public async Task ScriptWithKeyPrefixCompare() + { + await using var conn = Create(); + + var p = conn.GetDatabase().WithKeyPrefix("prefix/"); + var args = new { k = (RedisKey)"key", s = "str", v = 123 }; + LuaScript lua = LuaScript.Prepare("return {@k, @s, @v}"); + var viaArgs = (RedisValue[]?)p.ScriptEvaluate(lua, args); + + var viaArr = (RedisValue[]?)p.ScriptEvaluate(script: "return {KEYS[1], ARGV[1], ARGV[2]}", keys: [args.k], values: [args.s, args.v]); + Assert.NotNull(viaArr); + Assert.NotNull(viaArgs); + Assert.Equal(string.Join(",", viaArr), string.Join(",", viaArgs)); + } + + [Fact] + public void RedisResultUnderstandsNullArrayArray() => TestNullArray(RedisResult.NullArray); + + [Fact] + public void RedisResultUnderstandsNullArrayNull() => TestNullArray(null); + + [Theory] + [InlineData(null, false)] + [InlineData("", false)] + [InlineData("829c3804401b0727f70f73d4415e162400cbe57b", true)] + [InlineData("$29c3804401b0727f70f73d4415e162400cbe57b", false)] + [InlineData("829c3804401b0727f70f73d4415e162400cbe57", false)] + [InlineData("829c3804401b0727f70f73d4415e162400cbe57bb", false)] + public void Sha1Detection(string? candidate, bool isSha) + { + Assert.Equal(isSha, ResultProcessor.ScriptLoadProcessor.IsSHA1(candidate)); + } + + private static void TestNullArray(RedisResult? value) + { + Assert.True(value == null || value.IsNull); + + Assert.Null((RedisValue[]?)value); + Assert.Null((RedisKey[]?)value); + Assert.Null((bool[]?)value); + Assert.Null((long[]?)value); + Assert.Null((ulong[]?)value); + Assert.Null((string[]?)value!); + Assert.Null((int[]?)value); + Assert.Null((double[]?)value); + Assert.Null((byte[][]?)value!); + Assert.Null((RedisResult[]?)value); + } + + [Fact] + public void RedisResultUnderstandsNullNull() => TestNullValue(null); + [Fact] + public void RedisResultUnderstandsNullValue() => TestNullValue(RedisResult.Create(RedisValue.Null, ResultType.None)); + + [Fact] + public async Task TestEvalReadonly() + { + await using var conn = GetScriptConn(); + var db = conn.GetDatabase(); + + string script = "return KEYS[1]"; + RedisKey[] keys = ["key1"]; + RedisValue[] values = ["first"]; + + var result = db.ScriptEvaluateReadOnly(script, keys, values); + Assert.Equal("key1", result.ToString()); + } + + [Fact] + public async Task TestEvalReadonlyAsync() + { + await using var conn = GetScriptConn(); + var db = conn.GetDatabase(); + + string script = "return KEYS[1]"; + RedisKey[] keys = ["key1"]; + RedisValue[] values = ["first"]; + + var result = await db.ScriptEvaluateReadOnlyAsync(script, keys, values); + Assert.Equal("key1", result.ToString()); + } + + [Fact] + public async Task TestEvalShaReadOnly() + { + await using var conn = GetScriptConn(); + var db = conn.GetDatabase(); + var key = Me(); + var script = $"return redis.call('get','{key}')"; + db.StringSet(key, "bar"); + db.ScriptEvaluate(script: script); + + SHA1 sha1Hash = SHA1.Create(); + byte[] hash = sha1Hash.ComputeHash(Encoding.UTF8.GetBytes(script)); + Log("Hash: " + Convert.ToBase64String(hash)); + var result = db.ScriptEvaluateReadOnly(hash); + + Assert.Equal("bar", result.ToString()); + } + + [Fact] + public async Task TestEvalShaReadOnlyAsync() + { + await using var conn = GetScriptConn(); + var db = conn.GetDatabase(); + var key = Me(); + var script = $"return redis.call('get','{key}')"; + db.StringSet(key, "bar"); + db.ScriptEvaluate(script: script); + + SHA1 sha1Hash = SHA1.Create(); + byte[] hash = sha1Hash.ComputeHash(Encoding.UTF8.GetBytes(script)); + Log("Hash: " + Convert.ToBase64String(hash)); + var result = await db.ScriptEvaluateReadOnlyAsync(hash); + + Assert.Equal("bar", result.ToString()); + } + + [Fact, TestCulture("en-US")] + public void LuaScriptEnglishParameters() => LuaScriptParameterShared(); + + [Fact, TestCulture("tr-TR")] + public void LuaScriptTurkishParameters() => LuaScriptParameterShared(); + + private void LuaScriptParameterShared() + { + const string Script = "redis.call('set', @key, @testIId)"; + var prepared = LuaScript.Prepare(Script); + var key = Me(); + var p = new { key = (RedisKey)key, testIId = "hello" }; + + prepared.ExtractParameters(p, null, out RedisKey[]? keys, out RedisValue[]? args); + Assert.NotNull(keys); + Assert.Single(keys); + Assert.Equal(key, keys[0]); + Assert.NotNull(args); + Assert.Equal(2, args.Length); + Assert.Equal(key, args[0]); + Assert.Equal("hello", args[1]); + } + + private static void TestNullValue(RedisResult? value) + { + Assert.True(value == null || value.IsNull); + + Assert.True(((RedisValue)value).IsNull); + Assert.True(((RedisKey)value).IsNull); + Assert.Null((bool?)value); + Assert.Null((long?)value); + Assert.Null((ulong?)value); + Assert.Null((string?)value); + Assert.Null((double?)value); + Assert.Null((byte[]?)value); + } +} +#endif diff --git a/tests/StackExchange.Redis.Tests/Secure.cs b/tests/StackExchange.Redis.Tests/Secure.cs deleted file mode 100644 index 2e7d70929..000000000 --- a/tests/StackExchange.Redis.Tests/Secure.cs +++ /dev/null @@ -1,81 +0,0 @@ -using System.Diagnostics; -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - [Collection(NonParallelCollection.Name)] - public class Secure : TestBase - { - protected override string GetConfiguration() => - TestConfig.Current.SecureServerAndPort + ",password=" + TestConfig.Current.SecurePassword + ",name=MyClient"; - - public Secure(ITestOutputHelper output) : base (output) { } - - [Fact] - public void MassiveBulkOpsFireAndForgetSecure() - { - using (var muxer = Create()) - { - RedisKey key = Me(); - var conn = muxer.GetDatabase(); - conn.Ping(); - - var watch = Stopwatch.StartNew(); - - for (int i = 0; i <= AsyncOpsQty; i++) - { - conn.StringSet(key, i, flags: CommandFlags.FireAndForget); - } - int val = (int)conn.StringGet(key); - Assert.Equal(AsyncOpsQty, val); - watch.Stop(); - Log("{2}: Time for {0} ops: {1}ms (any order); ops/s: {3}", AsyncOpsQty, watch.ElapsedMilliseconds, Me(), - AsyncOpsQty / watch.Elapsed.TotalSeconds); - } - } - - [Fact] - public void CheckConfig() - { - var config = ConfigurationOptions.Parse(GetConfiguration()); - foreach (var ep in config.EndPoints) - { - Log(ep.ToString()); - } - Assert.Single(config.EndPoints); - Assert.Equal("changeme", config.Password); - } - - [Fact] - public void Connect() - { - using (var server = Create()) - { - server.GetDatabase().Ping(); - } - } - - [Theory] - [InlineData("wrong")] - [InlineData("")] - public async Task ConnectWithWrongPassword(string password) - { - var config = ConfigurationOptions.Parse(GetConfiguration()); - config.Password = password; - config.ConnectRetry = 0; // we don't want to retry on closed sockets in this case. - - var ex = await Assert.ThrowsAsync(async () => - { - SetExpectedAmbientFailureCount(-1); - using (var conn = await ConnectionMultiplexer.ConnectAsync(config, Writer).ConfigureAwait(false)) - { - conn.GetDatabase().Ping(); - } - }).ConfigureAwait(false); - Log("Exception: " + ex.Message); - Assert.StartsWith("It was not possible to connect to the redis server(s). There was an authentication failure; check that passwords (or client certificates) are configured correctly.", ex.Message); - } - } -} diff --git a/tests/StackExchange.Redis.Tests/SecureTests.cs b/tests/StackExchange.Redis.Tests/SecureTests.cs new file mode 100644 index 000000000..8f90e04ba --- /dev/null +++ b/tests/StackExchange.Redis.Tests/SecureTests.cs @@ -0,0 +1,89 @@ +using System.Diagnostics; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +[Collection(NonParallelCollection.Name)] +public class SecureTests(ITestOutputHelper output) : TestBase(output) +{ + protected override string GetConfiguration() => + TestConfig.Current.SecureServerAndPort + ",password=" + TestConfig.Current.SecurePassword + ",name=MyClient"; + + [Fact] + public async Task MassiveBulkOpsFireAndForgetSecure() + { + await using var conn = Create(); + + RedisKey key = Me(); + var db = conn.GetDatabase(); + await db.PingAsync(); + + var watch = Stopwatch.StartNew(); + + for (int i = 0; i <= AsyncOpsQty; i++) + { + db.StringSet(key, i, flags: CommandFlags.FireAndForget); + } + int val = (int)db.StringGet(key); + Assert.Equal(AsyncOpsQty, val); + watch.Stop(); + Log("{2}: Time for {0} ops: {1}ms (any order); ops/s: {3}", AsyncOpsQty, watch.ElapsedMilliseconds, Me(), AsyncOpsQty / watch.Elapsed.TotalSeconds); + } + + [Fact] + public void CheckConfig() + { + var config = ConfigurationOptions.Parse(GetConfiguration()); + foreach (var ep in config.EndPoints) + { + Log(ep.ToString()); + } + Assert.Single(config.EndPoints); + Assert.Equal("changeme", config.Password); + } + + [Fact] + public async Task Connect() + { + await using var conn = Create(); + + await conn.GetDatabase().PingAsync(); + } + + [Theory] + [InlineData("wrong", "WRONGPASS invalid username-password pair or user is disabled.")] + [InlineData("", "NOAUTH Returned - connection has not yet authenticated")] + public async Task ConnectWithWrongPassword(string password, string exepctedMessage) + { + await using var checkConn = Create(); + var checkServer = GetServer(checkConn); + + var config = ConfigurationOptions.Parse(GetConfiguration()); + config.Password = password; + config.ConnectRetry = 0; // we don't want to retry on closed sockets in this case. + config.BacklogPolicy = BacklogPolicy.FailFast; + + var ex = await Assert.ThrowsAsync(async () => + { + SetExpectedAmbientFailureCount(-1); + + await using var conn = await ConnectionMultiplexer.ConnectAsync(config, Writer).ConfigureAwait(false); + + await conn.GetDatabase().PingAsync(); + }).ConfigureAwait(false); + Log($"Exception ({ex.FailureType}): {ex.Message}"); + Assert.Equal(ConnectionFailureType.AuthenticationFailure, ex.FailureType); + Assert.StartsWith("It was not possible to connect to the redis server(s). There was an authentication failure; check that passwords (or client certificates) are configured correctly: (RedisServerException) ", ex.Message); + + // This changed in some version...not sure which. For our purposes, splitting on v3 vs v6+ + if (checkServer.Version.IsAtLeast(RedisFeatures.v6_0_0)) + { + Assert.EndsWith(exepctedMessage, ex.Message); + } + else + { + Assert.EndsWith("NOAUTH Returned - connection has not yet authenticated", ex.Message); + } + } +} diff --git a/tests/StackExchange.Redis.Tests/Sentinel.cs b/tests/StackExchange.Redis.Tests/Sentinel.cs deleted file mode 100644 index 332c37877..000000000 --- a/tests/StackExchange.Redis.Tests/Sentinel.cs +++ /dev/null @@ -1,432 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Linq; -using System.Net; -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - public class Sentinel : SentinelBase - { - public Sentinel(ITestOutputHelper output) : base(output) { } - - [Fact] - public async Task MasterConnectTest() - { - var connectionString = $"{TestConfig.Current.SentinelServer},serviceName={ServiceOptions.ServiceName},allowAdmin=true"; - var conn = ConnectionMultiplexer.Connect(connectionString); - - var db = conn.GetDatabase(); - db.Ping(); - - var endpoints = conn.GetEndPoints(); - Assert.Equal(2, endpoints.Length); - - var servers = endpoints.Select(e => conn.GetServer(e)).ToArray(); - Assert.Equal(2, servers.Length); - - var master = servers.FirstOrDefault(s => !s.IsReplica); - Assert.NotNull(master); - var replica = servers.FirstOrDefault(s => s.IsReplica); - Assert.NotNull(replica); - Assert.NotEqual(master.EndPoint.ToString(), replica.EndPoint.ToString()); - - var expected = DateTime.Now.Ticks.ToString(); - Log("Tick Key: " + expected); - var key = Me(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.StringSet(key, expected); - - var value = db.StringGet(key); - Assert.Equal(expected, value); - - // force read from replica, replication has some lag - await WaitForReplicationAsync(servers.First(), TimeSpan.FromSeconds(10)).ForAwait(); - value = db.StringGet(key, CommandFlags.DemandReplica); - Assert.Equal(expected, value); - } - - [Fact] - public async Task MasterConnectAsyncTest() - { - var connectionString = $"{TestConfig.Current.SentinelServer},serviceName={ServiceOptions.ServiceName},allowAdmin=true"; - var conn = await ConnectionMultiplexer.ConnectAsync(connectionString); - - var db = conn.GetDatabase(); - await db.PingAsync(); - - var endpoints = conn.GetEndPoints(); - Assert.Equal(2, endpoints.Length); - - var servers = endpoints.Select(e => conn.GetServer(e)).ToArray(); - Assert.Equal(2, servers.Length); - - var master = servers.FirstOrDefault(s => !s.IsReplica); - Assert.NotNull(master); - var replica = servers.FirstOrDefault(s => s.IsReplica); - Assert.NotNull(replica); - Assert.NotEqual(master.EndPoint.ToString(), replica.EndPoint.ToString()); - - var expected = DateTime.Now.Ticks.ToString(); - Log("Tick Key: " + expected); - var key = Me(); - await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); - await db.StringSetAsync(key, expected); - - var value = await db.StringGetAsync(key); - Assert.Equal(expected, value); - - // force read from replica, replication has some lag - await WaitForReplicationAsync(servers.First(), TimeSpan.FromSeconds(10)).ForAwait(); - value = await db.StringGetAsync(key, CommandFlags.DemandReplica); - Assert.Equal(expected, value); - } - - [Fact] - public void SentinelConnectTest() - { - var options = ServiceOptions.Clone(); - options.EndPoints.Add(TestConfig.Current.SentinelServer, TestConfig.Current.SentinelPortA); - - var conn = ConnectionMultiplexer.SentinelConnect(options); - var db = conn.GetDatabase(); - - var test = db.Ping(); - Log("ping to sentinel {0}:{1} took {2} ms", TestConfig.Current.SentinelServer, - TestConfig.Current.SentinelPortA, test.TotalMilliseconds); - } - - [Fact] - public async Task SentinelConnectAsyncTest() - { - var options = ServiceOptions.Clone(); - options.EndPoints.Add(TestConfig.Current.SentinelServer, TestConfig.Current.SentinelPortA); - - var conn = await ConnectionMultiplexer.SentinelConnectAsync(options); - var db = conn.GetDatabase(); - - var test = await db.PingAsync(); - Log("ping to sentinel {0}:{1} took {2} ms", TestConfig.Current.SentinelServer, - TestConfig.Current.SentinelPortA, test.TotalMilliseconds); - } - - [Fact] - public void SentinelRole() - { - foreach (var server in SentinelsServers) - { - var role = server.Role(); - Assert.Equal(role.Value, RedisLiterals.sentinel); - var sentinel = role as Role.Sentinel; - Assert.NotNull(sentinel); - } - } - - [Fact] - public void PingTest() - { - var test = SentinelServerA.Ping(); - Log("ping to sentinel {0}:{1} took {2} ms", TestConfig.Current.SentinelServer, - TestConfig.Current.SentinelPortA, test.TotalMilliseconds); - test = SentinelServerB.Ping(); - Log("ping to sentinel {0}:{1} took {1} ms", TestConfig.Current.SentinelServer, - TestConfig.Current.SentinelPortB, test.TotalMilliseconds); - test = SentinelServerC.Ping(); - Log("ping to sentinel {0}:{1} took {1} ms", TestConfig.Current.SentinelServer, - TestConfig.Current.SentinelPortC, test.TotalMilliseconds); - } - - [Fact] - public void SentinelGetMasterAddressByNameTest() - { - foreach (var server in SentinelsServers) - { - var master = server.SentinelMaster(ServiceName); - var endpoint = server.SentinelGetMasterAddressByName(ServiceName); - Assert.NotNull(endpoint); - var ipEndPoint = endpoint as IPEndPoint; - Assert.NotNull(ipEndPoint); - Assert.Equal(master.ToDictionary()["ip"], ipEndPoint.Address.ToString()); - Assert.Equal(master.ToDictionary()["port"], ipEndPoint.Port.ToString()); - Log("{0}:{1}", ipEndPoint.Address, ipEndPoint.Port); - } - } - - [Fact] - public async Task SentinelGetMasterAddressByNameAsyncTest() - { - foreach (var server in SentinelsServers) - { - var master = server.SentinelMaster(ServiceName); - var endpoint = await server.SentinelGetMasterAddressByNameAsync(ServiceName).ForAwait(); - Assert.NotNull(endpoint); - var ipEndPoint = endpoint as IPEndPoint; - Assert.NotNull(ipEndPoint); - Assert.Equal(master.ToDictionary()["ip"], ipEndPoint.Address.ToString()); - Assert.Equal(master.ToDictionary()["port"], ipEndPoint.Port.ToString()); - Log("{0}:{1}", ipEndPoint.Address, ipEndPoint.Port); - } - } - - [Fact] - public void SentinelGetMasterAddressByNameNegativeTest() - { - foreach (var server in SentinelsServers) - { - var endpoint = server.SentinelGetMasterAddressByName("FakeServiceName"); - Assert.Null(endpoint); - } - } - - [Fact] - public async Task SentinelGetMasterAddressByNameAsyncNegativeTest() - { - foreach (var server in SentinelsServers) - { - var endpoint = await server.SentinelGetMasterAddressByNameAsync("FakeServiceName").ForAwait(); - Assert.Null(endpoint); - } - } - - [Fact] - public void SentinelMasterTest() - { - foreach (var server in SentinelsServers) - { - var dict = server.SentinelMaster(ServiceName).ToDictionary(); - Assert.Equal(ServiceName, dict["name"]); - Assert.StartsWith("master", dict["flags"]); - foreach (var kvp in dict) - { - Log("{0}:{1}", kvp.Key, kvp.Value); - } - } - } - - [Fact] - public async Task SentinelMasterAsyncTest() - { - foreach (var server in SentinelsServers) - { - var results = await server.SentinelMasterAsync(ServiceName).ForAwait(); - Assert.Equal(ServiceName, results.ToDictionary()["name"]); - Assert.StartsWith("master", results.ToDictionary()["flags"]); - foreach (var kvp in results) - { - Log("{0}:{1}", kvp.Key, kvp.Value); - } - } - } - - [Fact] - public void SentinelSentinelsTest() - { - var sentinels = SentinelServerA.SentinelSentinels(ServiceName); - - var expected = new List { - SentinelServerB.EndPoint.ToString(), - SentinelServerC.EndPoint.ToString() - }; - - var actual = new List(); - foreach (var kv in sentinels) - { - var data = kv.ToDictionary(); - actual.Add(data["ip"] + ":" + data["port"]); - } - - Assert.All(expected, ep => Assert.NotEqual(ep, SentinelServerA.EndPoint.ToString())); - Assert.True(sentinels.Length == 2); - Assert.All(expected, ep => Assert.Contains(ep, actual, _ipComparer)); - - sentinels = SentinelServerB.SentinelSentinels(ServiceName); - foreach (var kv in sentinels) - { - var data = kv.ToDictionary(); - actual.Add(data["ip"] + ":" + data["port"]); - } - expected = new List { - SentinelServerA.EndPoint.ToString(), - SentinelServerC.EndPoint.ToString() - }; - - Assert.All(expected, ep => Assert.NotEqual(ep, SentinelServerB.EndPoint.ToString())); - Assert.True(sentinels.Length == 2); - Assert.All(expected, ep => Assert.Contains(ep, actual, _ipComparer)); - - sentinels = SentinelServerC.SentinelSentinels(ServiceName); - foreach (var kv in sentinels) - { - var data = kv.ToDictionary(); - actual.Add(data["ip"] + ":" + data["port"]); - } - expected = new List { - SentinelServerA.EndPoint.ToString(), - SentinelServerB.EndPoint.ToString() - }; - - Assert.All(expected, ep => Assert.NotEqual(ep, SentinelServerC.EndPoint.ToString())); - Assert.True(sentinels.Length == 2); - Assert.All(expected, ep => Assert.Contains(ep, actual, _ipComparer)); - } - - [Fact] - public async Task SentinelSentinelsAsyncTest() - { - var sentinels = await SentinelServerA.SentinelSentinelsAsync(ServiceName).ForAwait(); - var expected = new List { - SentinelServerB.EndPoint.ToString(), - SentinelServerC.EndPoint.ToString() - }; - - var actual = new List(); - foreach (var kv in sentinels) - { - var data = kv.ToDictionary(); - actual.Add(data["ip"] + ":" + data["port"]); - } - Assert.All(expected, ep => Assert.NotEqual(ep, SentinelServerA.EndPoint.ToString())); - Assert.True(sentinels.Length == 2); - Assert.All(expected, ep => Assert.Contains(ep, actual, _ipComparer)); - - sentinels = await SentinelServerB.SentinelSentinelsAsync(ServiceName).ForAwait(); - - expected = new List { - SentinelServerA.EndPoint.ToString(), - SentinelServerC.EndPoint.ToString() - }; - - actual = new List(); - foreach (var kv in sentinels) - { - var data = kv.ToDictionary(); - actual.Add(data["ip"] + ":" + data["port"]); - } - Assert.All(expected, ep => Assert.NotEqual(ep, SentinelServerB.EndPoint.ToString())); - Assert.True(sentinels.Length == 2); - Assert.All(expected, ep => Assert.Contains(ep, actual, _ipComparer)); - - sentinels = await SentinelServerC.SentinelSentinelsAsync(ServiceName).ForAwait(); - expected = new List { - SentinelServerA.EndPoint.ToString(), - SentinelServerB.EndPoint.ToString() - }; - actual = new List(); - foreach (var kv in sentinels) - { - var data = kv.ToDictionary(); - actual.Add(data["ip"] + ":" + data["port"]); - } - Assert.All(expected, ep => Assert.NotEqual(ep, SentinelServerC.EndPoint.ToString())); - Assert.True(sentinels.Length == 2); - Assert.All(expected, ep => Assert.Contains(ep, actual, _ipComparer)); - } - - [Fact] - public void SentinelMastersTest() - { - var masterConfigs = SentinelServerA.SentinelMasters(); - Assert.Single(masterConfigs); - Assert.True(masterConfigs[0].ToDictionary().ContainsKey("name"), "replicaConfigs contains 'name'"); - Assert.Equal(ServiceName, masterConfigs[0].ToDictionary()["name"]); - Assert.StartsWith("master", masterConfigs[0].ToDictionary()["flags"]); - foreach (var config in masterConfigs) - { - foreach (var kvp in config) - { - Log("{0}:{1}", kvp.Key, kvp.Value); - } - } - } - - [Fact] - public async Task SentinelMastersAsyncTest() - { - var masterConfigs = await SentinelServerA.SentinelMastersAsync().ForAwait(); - Assert.Single(masterConfigs); - Assert.True(masterConfigs[0].ToDictionary().ContainsKey("name"), "replicaConfigs contains 'name'"); - Assert.Equal(ServiceName, masterConfigs[0].ToDictionary()["name"]); - Assert.StartsWith("master", masterConfigs[0].ToDictionary()["flags"]); - foreach (var config in masterConfigs) - { - foreach (var kvp in config) - { - Log("{0}:{1}", kvp.Key, kvp.Value); - } - } - } - - [Fact] - public void SentinelReplicasTest() - { - var replicaConfigs = SentinelServerA.SentinelReplicas(ServiceName); - Assert.True(replicaConfigs.Length > 0, "Has replicaConfigs"); - Assert.True(replicaConfigs[0].ToDictionary().ContainsKey("name"), "replicaConfigs contains 'name'"); - Assert.StartsWith("slave", replicaConfigs[0].ToDictionary()["flags"]); - - foreach (var config in replicaConfigs) - { - foreach (var kvp in config) - { - Log("{0}:{1}", kvp.Key, kvp.Value); - } - } - } - - [Fact] - public async Task SentinelReplicasAsyncTest() - { - var replicaConfigs = await SentinelServerA.SentinelReplicasAsync(ServiceName).ForAwait(); - Assert.True(replicaConfigs.Length > 0, "Has replicaConfigs"); - Assert.True(replicaConfigs[0].ToDictionary().ContainsKey("name"), "replicaConfigs contains 'name'"); - Assert.StartsWith("slave", replicaConfigs[0].ToDictionary()["flags"]); - foreach (var config in replicaConfigs) - { - foreach (var kvp in config) - { - Log("{0}:{1}", kvp.Key, kvp.Value); - } - } - } - - [Fact] - public async Task SentinelGetSentinelAddressesTest() - { - var addresses = await SentinelServerA.SentinelGetSentinelAddressesAsync(ServiceName).ForAwait(); - Assert.Contains(SentinelServerB.EndPoint, addresses); - Assert.Contains(SentinelServerC.EndPoint, addresses); - - addresses = await SentinelServerB.SentinelGetSentinelAddressesAsync(ServiceName).ForAwait(); - Assert.Contains(SentinelServerA.EndPoint, addresses); - Assert.Contains(SentinelServerC.EndPoint, addresses); - - addresses = await SentinelServerC.SentinelGetSentinelAddressesAsync(ServiceName).ForAwait(); - Assert.Contains(SentinelServerA.EndPoint, addresses); - Assert.Contains(SentinelServerB.EndPoint, addresses); - } - - [Fact] - public async Task ReadOnlyConnectionReplicasTest() - { - var replicas = SentinelServerA.SentinelGetReplicaAddresses(ServiceName); - var config = new ConfigurationOptions(); - - foreach (var replica in replicas) - { - config.EndPoints.Add(replica); - } - - var readonlyConn = await ConnectionMultiplexer.ConnectAsync(config); - - await UntilCondition(TimeSpan.FromSeconds(2), () => readonlyConn.IsConnected); - Assert.True(readonlyConn.IsConnected); - var db = readonlyConn.GetDatabase(); - var s = db.StringGet("test"); - Assert.True(s.IsNullOrEmpty); - //var ex = Assert.Throws(() => db.StringSet("test", "try write to read only instance")); - //Assert.StartsWith("No connection is available to service this operation", ex.Message); - } - } -} diff --git a/tests/StackExchange.Redis.Tests/SentinelBase.cs b/tests/StackExchange.Redis.Tests/SentinelBase.cs index b9d5a3aa1..826b9c613 100644 --- a/tests/StackExchange.Redis.Tests/SentinelBase.cs +++ b/tests/StackExchange.Redis.Tests/SentinelBase.cs @@ -1,195 +1,189 @@ using System; using System.Collections.Generic; using System.Diagnostics; -using System.IO; using System.Linq; using System.Net; using System.Threading.Tasks; using Xunit; -using Xunit.Abstractions; -namespace StackExchange.Redis.Tests +namespace StackExchange.Redis.Tests; + +public class SentinelBase : TestBase, IAsyncLifetime { - public class SentinelBase : TestBase, IAsyncLifetime - { - protected string ServiceName => TestConfig.Current.SentinelSeviceName; - protected ConfigurationOptions ServiceOptions => new ConfigurationOptions { ServiceName = ServiceName, AllowAdmin = true }; + protected static string ServiceName => TestConfig.Current.SentinelSeviceName; + protected static ConfigurationOptions ServiceOptions => new ConfigurationOptions { ServiceName = ServiceName, AllowAdmin = true }; - protected ConnectionMultiplexer Conn { get; set; } - protected IServer SentinelServerA { get; set; } - protected IServer SentinelServerB { get; set; } - protected IServer SentinelServerC { get; set; } - public IServer[] SentinelsServers { get; set; } + protected ConnectionMultiplexer Conn { get; set; } + protected IServer SentinelServerA { get; set; } + protected IServer SentinelServerB { get; set; } + protected IServer SentinelServerC { get; set; } + public IServer[] SentinelsServers { get; set; } - public SentinelBase(ITestOutputHelper output) : base(output) - { - Skip.IfNoConfig(nameof(TestConfig.Config.SentinelServer), TestConfig.Current.SentinelServer); - Skip.IfNoConfig(nameof(TestConfig.Config.SentinelSeviceName), TestConfig.Current.SentinelSeviceName); - } +#nullable disable + public SentinelBase(ITestOutputHelper output) : base(output) + { + Skip.IfNoConfig(nameof(TestConfig.Config.SentinelServer), TestConfig.Current.SentinelServer); + Skip.IfNoConfig(nameof(TestConfig.Config.SentinelSeviceName), TestConfig.Current.SentinelSeviceName); + } +#nullable enable - public Task DisposeAsync() => Task.CompletedTask; + public ValueTask DisposeAsync() => default; - public async Task InitializeAsync() - { - var options = ServiceOptions.Clone(); - options.EndPoints.Add(TestConfig.Current.SentinelServer, TestConfig.Current.SentinelPortA); - options.EndPoints.Add(TestConfig.Current.SentinelServer, TestConfig.Current.SentinelPortB); - options.EndPoints.Add(TestConfig.Current.SentinelServer, TestConfig.Current.SentinelPortC); - Conn = ConnectionMultiplexer.SentinelConnect(options, Writer); + public async ValueTask InitializeAsync() + { + var options = ServiceOptions.Clone(); + options.EndPoints.Add(TestConfig.Current.SentinelServer, TestConfig.Current.SentinelPortA); + options.EndPoints.Add(TestConfig.Current.SentinelServer, TestConfig.Current.SentinelPortB); + options.EndPoints.Add(TestConfig.Current.SentinelServer, TestConfig.Current.SentinelPortC); + Conn = ConnectionMultiplexer.SentinelConnect(options, Writer); - for (var i = 0; i < 150; i++) + for (var i = 0; i < 150; i++) + { + await Task.Delay(100).ForAwait(); + if (Conn.IsConnected) { - await Task.Delay(20).ForAwait(); - if (Conn.IsConnected && Conn.GetSentinelMasterConnection(options, Writer).IsConnected) + await using var checkConn = Conn.GetSentinelMasterConnection(options, Writer); + if (checkConn.IsConnected) { break; } } - Assert.True(Conn.IsConnected); - SentinelServerA = Conn.GetServer(TestConfig.Current.SentinelServer, TestConfig.Current.SentinelPortA); - SentinelServerB = Conn.GetServer(TestConfig.Current.SentinelServer, TestConfig.Current.SentinelPortB); - SentinelServerC = Conn.GetServer(TestConfig.Current.SentinelServer, TestConfig.Current.SentinelPortC); - SentinelsServers = new[] { SentinelServerA, SentinelServerB, SentinelServerC }; - - // wait until we are in a state of a single master and replica - await WaitForReadyAsync(); } + Assert.True(Conn.IsConnected); + SentinelServerA = Conn.GetServer(TestConfig.Current.SentinelServer, TestConfig.Current.SentinelPortA)!; + SentinelServerB = Conn.GetServer(TestConfig.Current.SentinelServer, TestConfig.Current.SentinelPortB)!; + SentinelServerC = Conn.GetServer(TestConfig.Current.SentinelServer, TestConfig.Current.SentinelPortC)!; + SentinelsServers = [SentinelServerA, SentinelServerB, SentinelServerC]; + + SentinelServerA.AllowReplicaWrites = true; + // Wait until we are in a state of a single primary and replica + await WaitForReadyAsync(); + } - // Sometimes it's global, sometimes it's local - // Depends what mood Redis is in but they're equal and not the point of our tests - protected static readonly IpComparer _ipComparer = new IpComparer(); - protected class IpComparer : IEqualityComparer - { - public bool Equals(string x, string y) => x == y || x?.Replace("0.0.0.0", "127.0.0.1") == y?.Replace("0.0.0.0", "127.0.0.1"); - public int GetHashCode(string obj) => obj.GetHashCode(); - } - - protected async Task DoFailoverAsync() - { - await WaitForReadyAsync(); - - // capture current replica - var replicas = SentinelServerA.SentinelGetReplicaAddresses(ServiceName); + // Sometimes it's global, sometimes it's local + // Depends what mood Redis is in but they're equal and not the point of our tests + protected static readonly IpComparer _ipComparer = new IpComparer(); + protected class IpComparer : IEqualityComparer + { + public bool Equals(string? x, string? y) => x == y || x?.Replace("0.0.0.0", "127.0.0.1") == y?.Replace("0.0.0.0", "127.0.0.1"); + public int GetHashCode(string? obj) => obj?.GetHashCode() ?? 0; + } - Log("Starting failover..."); - var sw = Stopwatch.StartNew(); - SentinelServerA.SentinelFailover(ServiceName); + protected async Task WaitForReadyAsync(EndPoint? expectedPrimary = null, bool waitForReplication = false, TimeSpan? duration = null) + { + duration ??= TimeSpan.FromSeconds(30); - // wait until the replica becomes the master - await WaitForReadyAsync(expectedMaster: replicas[0]); - Log($"Time to failover: {sw.Elapsed}"); - } + var sw = Stopwatch.StartNew(); - protected async Task WaitForReadyAsync(EndPoint expectedMaster = null, bool waitForReplication = false, TimeSpan? duration = null) + // wait until we have 1 primary and 1 replica and have verified their roles + var primary = SentinelServerA.SentinelGetMasterAddressByName(ServiceName); + if (expectedPrimary != null && expectedPrimary.ToString() != primary?.ToString()) { - duration ??= TimeSpan.FromSeconds(30); - - var sw = Stopwatch.StartNew(); - - // wait until we have 1 master and 1 replica and have verified their roles - var master = SentinelServerA.SentinelGetMasterAddressByName(ServiceName); - if (expectedMaster != null && expectedMaster.ToString() != master.ToString()) + while (sw.Elapsed < duration.Value) { - while (sw.Elapsed < duration.Value) + await Task.Delay(1000).ForAwait(); + try { - await Task.Delay(1000).ForAwait(); - try - { - master = SentinelServerA.SentinelGetMasterAddressByName(ServiceName); - if (expectedMaster.ToString() == master.ToString()) - break; - } - catch (Exception) - { - // ignore - } + primary = SentinelServerA.SentinelGetMasterAddressByName(ServiceName); + if (expectedPrimary.ToString() == primary?.ToString()) + break; + } + catch (Exception) + { + // ignore } } - if (expectedMaster != null && expectedMaster.ToString() != master.ToString()) - throw new RedisException($"Master was expected to be {expectedMaster}"); - Log($"Master is {master}"); + } + if (expectedPrimary != null && expectedPrimary.ToString() != primary?.ToString()) + throw new RedisException($"Primary was expected to be {expectedPrimary}"); + Log($"Primary is {primary}"); - var replicas = SentinelServerA.SentinelGetReplicaAddresses(ServiceName); - var checkConn = Conn.GetSentinelMasterConnection(ServiceOptions); + await using var checkConn = Conn.GetSentinelMasterConnection(ServiceOptions); - await WaitForRoleAsync(checkConn.GetServer(master), "master", duration.Value.Subtract(sw.Elapsed)).ForAwait(); - if (replicas.Length > 0) - { - await WaitForRoleAsync(checkConn.GetServer(replicas[0]), "slave", duration.Value.Subtract(sw.Elapsed)).ForAwait(); - } + await WaitForRoleAsync(checkConn.GetServer(primary), "master", duration.Value.Subtract(sw.Elapsed)).ForAwait(); - if (waitForReplication) - { - await WaitForReplicationAsync(checkConn.GetServer(master), duration.Value.Subtract(sw.Elapsed)).ForAwait(); - } + var replicas = SentinelServerA.SentinelGetReplicaAddresses(ServiceName); + if (replicas?.Length > 0) + { + await Task.Delay(100).ForAwait(); + replicas = SentinelServerA.SentinelGetReplicaAddresses(ServiceName); + await WaitForRoleAsync(checkConn.GetServer(replicas[0]), "slave", duration.Value.Subtract(sw.Elapsed)).ForAwait(); } - protected async Task WaitForRoleAsync(IServer server, string role, TimeSpan? duration = null) + if (waitForReplication) { - duration ??= TimeSpan.FromSeconds(30); + await WaitForReplicationAsync(checkConn.GetServer(primary), duration.Value.Subtract(sw.Elapsed)).ForAwait(); + } + } - Log($"Waiting for server ({server.EndPoint}) role to be \"{role}\"..."); - var sw = Stopwatch.StartNew(); - while (sw.Elapsed < duration.Value) + protected async Task WaitForRoleAsync(IServer server, string role, TimeSpan? duration = null) + { + duration ??= TimeSpan.FromSeconds(30); + + Log($"Waiting for server ({server.EndPoint}) role to be \"{role}\"..."); + var sw = Stopwatch.StartNew(); + while (sw.Elapsed < duration.Value) + { + try { - try + if (server.Role()?.Value == role) { - if (server.Role().Value == role) - { - Log($"Done waiting for server ({server.EndPoint}) role to be \"{role}\""); - return; - } - } - catch (Exception) - { - // ignore + Log($"Done waiting for server ({server.EndPoint}) role to be \"{role}\""); + return; } - - await Task.Delay(1000).ForAwait(); + } + catch (Exception) + { + // ignore } - throw new RedisException($"Timeout waiting for server ({server.EndPoint}) to have expected role (\"{role}\") assigned"); + await Task.Delay(100).ForAwait(); } - protected async Task WaitForReplicationAsync(IServer master, TimeSpan? duration = null) - { - duration ??= TimeSpan.FromSeconds(10); + throw new RedisException($"Timeout waiting for server ({server.EndPoint}) to have expected role (\"{role}\") assigned"); + } + + protected async Task WaitForReplicationAsync(IServer primary, TimeSpan? duration = null) + { + duration ??= TimeSpan.FromSeconds(10); - static void LogEndpoints(IServer master, Action log) + static void LogEndpoints(IServer primary, Action log) + { + if (primary.Multiplexer is ConnectionMultiplexer muxer) { - var serverEndpoints = (master.Multiplexer as ConnectionMultiplexer).GetServerSnapshot(); + var serverEndpoints = muxer.GetServerSnapshot(); log("Endpoints:"); foreach (var serverEndpoint in serverEndpoints) { log($" {serverEndpoint}:"); - var server = master.Multiplexer.GetServer(serverEndpoint.EndPoint); + var server = primary.Multiplexer.GetServer(serverEndpoint.EndPoint); log($" Server: (Connected={server.IsConnected}, Type={server.ServerType}, IsReplica={server.IsReplica}, Unselectable={serverEndpoint.GetUnselectableFlags()})"); } } + } - Log("Waiting for master/replica replication to be in sync..."); - var sw = Stopwatch.StartNew(); - while (sw.Elapsed < duration.Value) - { - var info = master.Info("replication"); - var replicationInfo = info.FirstOrDefault(f => f.Key == "Replication")?.ToArray().ToDictionary(); - var replicaInfo = replicationInfo?.FirstOrDefault(i => i.Key.StartsWith("slave")).Value?.Split(',').ToDictionary(i => i.Split('=').First(), i => i.Split('=').Last()); - var replicaOffset = replicaInfo?["offset"]; - var masterOffset = replicationInfo?["master_repl_offset"]; - - if (replicaOffset == masterOffset) - { - Log($"Done waiting for master ({masterOffset}) / replica ({replicaOffset}) replication to be in sync"); - LogEndpoints(master, Log); - return; - } - - Log($"Waiting for master ({masterOffset}) / replica ({replicaOffset}) replication to be in sync..."); + Log("Waiting for primary/replica replication to be in sync..."); + var sw = Stopwatch.StartNew(); + while (sw.Elapsed < duration.Value) + { + var info = primary.Info("replication"); + var replicationInfo = info.FirstOrDefault(f => f.Key == "Replication")?.ToArray().ToDictionary(); + var replicaInfo = replicationInfo?.FirstOrDefault(i => i.Key.StartsWith("slave")).Value?.Split(',').ToDictionary(i => i.Split('=').First(), i => i.Split('=').Last()); + var replicaOffset = replicaInfo?["offset"]; + var primaryOffset = replicationInfo?["master_repl_offset"]; - await Task.Delay(250).ForAwait(); + if (replicaOffset == primaryOffset) + { + Log($"Done waiting for primary ({primaryOffset}) / replica ({replicaOffset}) replication to be in sync"); + LogEndpoints(primary, m => Log(m)); + return; } - throw new RedisException("Timeout waiting for test servers master/replica replication to be in sync."); + Log($"Waiting for primary ({primaryOffset}) / replica ({replicaOffset}) replication to be in sync..."); + + await Task.Delay(250).ForAwait(); } + + throw new RedisException("Timeout waiting for test servers primary/replica replication to be in sync."); } } diff --git a/tests/StackExchange.Redis.Tests/SentinelFailover.cs b/tests/StackExchange.Redis.Tests/SentinelFailover.cs deleted file mode 100644 index 975fc4c98..000000000 --- a/tests/StackExchange.Redis.Tests/SentinelFailover.cs +++ /dev/null @@ -1,79 +0,0 @@ -using System; -using System.Linq; -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - [Collection(NonParallelCollection.Name)] - public class SentinelFailover : SentinelBase, IAsyncLifetime - { - public SentinelFailover(ITestOutputHelper output) : base(output) { } - - [Fact] - public async Task ManagedMasterConnectionEndToEndWithFailoverTest() - { - var connectionString = $"{TestConfig.Current.SentinelServer}:{TestConfig.Current.SentinelPortA},serviceName={ServiceOptions.ServiceName},allowAdmin=true"; - var conn = await ConnectionMultiplexer.ConnectAsync(connectionString); - conn.ConfigurationChanged += (s, e) => { - Log($"Configuration changed: {e.EndPoint}"); - }; - - var db = conn.GetDatabase(); - await db.PingAsync(); - - var endpoints = conn.GetEndPoints(); - Assert.Equal(2, endpoints.Length); - - var servers = endpoints.Select(e => conn.GetServer(e)).ToArray(); - Assert.Equal(2, servers.Length); - - var master = servers.FirstOrDefault(s => !s.IsReplica); - Assert.NotNull(master); - var replica = servers.FirstOrDefault(s => s.IsReplica); - Assert.NotNull(replica); - Assert.NotEqual(master.EndPoint.ToString(), replica.EndPoint.ToString()); - - // set string value on current master - var expected = DateTime.Now.Ticks.ToString(); - Log("Tick Key: " + expected); - var key = Me(); - await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); - await db.StringSetAsync(key, expected); - - var value = await db.StringGetAsync(key); - Assert.Equal(expected, value); - - // force read from replica, replication has some lag - await WaitForReplicationAsync(servers.First()).ForAwait(); - value = await db.StringGetAsync(key, CommandFlags.DemandReplica); - Assert.Equal(expected, value); - - // forces and verifies failover - await DoFailoverAsync(); - - endpoints = conn.GetEndPoints(); - Assert.Equal(2, endpoints.Length); - - servers = endpoints.Select(e => conn.GetServer(e)).ToArray(); - Assert.Equal(2, servers.Length); - - var newMaster = servers.FirstOrDefault(s => !s.IsReplica); - Assert.NotNull(newMaster); - Assert.Equal(replica.EndPoint.ToString(), newMaster.EndPoint.ToString()); - var newReplica = servers.FirstOrDefault(s => s.IsReplica); - Assert.NotNull(newReplica); - Assert.Equal(master.EndPoint.ToString(), newReplica.EndPoint.ToString()); - Assert.NotEqual(master.EndPoint.ToString(), replica.EndPoint.ToString()); - - value = await db.StringGetAsync(key); - Assert.Equal(expected, value); - - // force read from replica, replication has some lag - await WaitForReplicationAsync(newMaster).ForAwait(); - value = await db.StringGetAsync(key, CommandFlags.DemandReplica); - Assert.Equal(expected, value); - } - } -} diff --git a/tests/StackExchange.Redis.Tests/SentinelFailoverTests.cs b/tests/StackExchange.Redis.Tests/SentinelFailoverTests.cs new file mode 100644 index 000000000..358722839 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/SentinelFailoverTests.cs @@ -0,0 +1,104 @@ +using System; +using System.Diagnostics; +using System.Linq; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +[Collection(NonParallelCollection.Name)] +public class SentinelFailoverTests(ITestOutputHelper output) : SentinelBase(output) +{ + [Fact] + public async Task ManagedPrimaryConnectionEndToEndWithFailoverTest() + { + Skip.UnlessLongRunning(); + var connectionString = $"{TestConfig.Current.SentinelServer}:{TestConfig.Current.SentinelPortA},serviceName={ServiceOptions.ServiceName},allowAdmin=true"; + await using var conn = await ConnectionMultiplexer.ConnectAsync(connectionString); + + conn.ConfigurationChanged += (s, e) => Log($"Configuration changed: {e.EndPoint}"); + + var sub = conn.GetSubscriber(); +#pragma warning disable CS0618 + sub.Subscribe("*", (channel, message) => Log($"Sub: {channel}, message:{message}")); +#pragma warning restore CS0618 + + var db = conn.GetDatabase(); + await db.PingAsync(); + + var endpoints = conn.GetEndPoints(); + Assert.Equal(2, endpoints.Length); + + var servers = endpoints.Select(e => conn.GetServer(e)).ToArray(); + Assert.Equal(2, servers.Length); + + var primary = servers.FirstOrDefault(s => !s.IsReplica); + Assert.NotNull(primary); + var replica = servers.FirstOrDefault(s => s.IsReplica); + Assert.NotNull(replica); + Assert.NotEqual(primary.EndPoint.ToString(), replica.EndPoint.ToString()); + + // Set string value on current primary + var expected = DateTime.Now.Ticks.ToString(); + Log("Tick Key: " + expected); + var key = Me(); + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + await db.StringSetAsync(key, expected); + + var value = await db.StringGetAsync(key); + Assert.Equal(expected, value); + + Log("Waiting for first replication check..."); + // force read from replica, replication has some lag + await WaitForReplicationAsync(servers[0]).ForAwait(); + value = await db.StringGetAsync(key, CommandFlags.DemandReplica); + Assert.Equal(expected, value); + + Log("Waiting for ready pre-failover..."); + await WaitForReadyAsync(); + + // capture current replica + var replicas = SentinelServerA.SentinelGetReplicaAddresses(ServiceName); + + Log("Starting failover..."); + var sw = Stopwatch.StartNew(); + SentinelServerA.SentinelFailover(ServiceName); + + // There's no point in doing much for 10 seconds - this is a built-in delay of how Sentinel works. + // The actual completion invoking the replication of the former primary is handled via + // https://github.com/redis/redis/blob/f233c4c59d24828c77eb1118f837eaee14695f7f/src/sentinel.c#L4799-L4808 + // ...which is invoked by INFO polls every 10 seconds (https://github.com/redis/redis/blob/f233c4c59d24828c77eb1118f837eaee14695f7f/src/sentinel.c#L81) + // ...which is calling https://github.com/redis/redis/blob/f233c4c59d24828c77eb1118f837eaee14695f7f/src/sentinel.c#L2666 + // However, the quicker iteration on INFO during an o_down does not apply here: https://github.com/redis/redis/blob/f233c4c59d24828c77eb1118f837eaee14695f7f/src/sentinel.c#L3089-L3104 + // So...we're waiting 10 seconds, no matter what. Might as well just idle to be more stable. + await Task.Delay(TimeSpan.FromSeconds(10)); + + // wait until the replica becomes the primary + Log("Waiting for ready post-failover..."); + await WaitForReadyAsync(expectedPrimary: replicas[0]); + Log($"Time to failover: {sw.Elapsed}"); + + endpoints = conn.GetEndPoints(); + Assert.Equal(2, endpoints.Length); + + servers = endpoints.Select(e => conn.GetServer(e)).ToArray(); + Assert.Equal(2, servers.Length); + + var newPrimary = servers.FirstOrDefault(s => !s.IsReplica); + Assert.NotNull(newPrimary); + Assert.Equal(replica.EndPoint.ToString(), newPrimary.EndPoint.ToString()); + var newReplica = servers.FirstOrDefault(s => s.IsReplica); + Assert.NotNull(newReplica); + Assert.Equal(primary.EndPoint.ToString(), newReplica.EndPoint.ToString()); + Assert.NotEqual(primary.EndPoint.ToString(), replica.EndPoint.ToString()); + + value = await db.StringGetAsync(key); + Assert.Equal(expected, value); + + Log("Waiting for second replication check..."); + // force read from replica, replication has some lag + await WaitForReplicationAsync(newPrimary).ForAwait(); + value = await db.StringGetAsync(key, CommandFlags.DemandReplica); + Assert.Equal(expected, value); + } +} diff --git a/tests/StackExchange.Redis.Tests/SentinelTests.cs b/tests/StackExchange.Redis.Tests/SentinelTests.cs new file mode 100644 index 000000000..e58f530fd --- /dev/null +++ b/tests/StackExchange.Redis.Tests/SentinelTests.cs @@ -0,0 +1,475 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Net; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class SentinelTests(ITestOutputHelper output) : SentinelBase(output) +{ + [Fact] + public async Task PrimaryConnectTest() + { + var connectionString = $"{TestConfig.Current.SentinelServer},serviceName={ServiceOptions.ServiceName},allowAdmin=true"; + + var conn = ConnectionMultiplexer.Connect(connectionString); + + var db = conn.GetDatabase(); + await db.PingAsync(); + + var endpoints = conn.GetEndPoints(); + Assert.Equal(2, endpoints.Length); + + var servers = endpoints.Select(e => conn.GetServer(e)).ToArray(); + Assert.Equal(2, servers.Length); + + var primary = servers.FirstOrDefault(s => !s.IsReplica); + Assert.NotNull(primary); + var replica = servers.FirstOrDefault(s => s.IsReplica); + Assert.NotNull(replica); + Assert.NotEqual(primary.EndPoint.ToString(), replica.EndPoint.ToString()); + + var expected = DateTime.Now.Ticks.ToString(); + Log("Tick Key: " + expected); + var key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.StringSet(key, expected); + + var value = db.StringGet(key); + Assert.Equal(expected, value); + + // force read from replica, replication has some lag + await WaitForReplicationAsync(servers[0], TimeSpan.FromSeconds(10)).ForAwait(); + value = db.StringGet(key, CommandFlags.DemandReplica); + Assert.Equal(expected, value); + } + + [Fact] + public async Task PrimaryConnectAsyncTest() + { + var connectionString = $"{TestConfig.Current.SentinelServer},serviceName={ServiceOptions.ServiceName},allowAdmin=true"; + var conn = await ConnectionMultiplexer.ConnectAsync(connectionString); + + var db = conn.GetDatabase(); + await db.PingAsync(); + + var endpoints = conn.GetEndPoints(); + Assert.Equal(2, endpoints.Length); + + var servers = endpoints.Select(e => conn.GetServer(e)).ToArray(); + Assert.Equal(2, servers.Length); + + var primary = servers.FirstOrDefault(s => !s.IsReplica); + Assert.NotNull(primary); + var replica = servers.FirstOrDefault(s => s.IsReplica); + Assert.NotNull(replica); + Assert.NotEqual(primary.EndPoint.ToString(), replica.EndPoint.ToString()); + + var expected = DateTime.Now.Ticks.ToString(); + Log("Tick Key: " + expected); + var key = Me(); + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + await db.StringSetAsync(key, expected); + + var value = await db.StringGetAsync(key); + Assert.Equal(expected, value); + + // force read from replica, replication has some lag + await WaitForReplicationAsync(servers[0], TimeSpan.FromSeconds(10)).ForAwait(); + value = await db.StringGetAsync(key, CommandFlags.DemandReplica); + Assert.Equal(expected, value); + } + + [Fact] + [RunPerProtocol] + public async Task SentinelConnectTest() + { + var options = ServiceOptions.Clone(); + options.EndPoints.Add(TestConfig.Current.SentinelServer, TestConfig.Current.SentinelPortA); + await using var conn = ConnectionMultiplexer.SentinelConnect(options); + + var db = conn.GetDatabase(); + var test = await db.PingAsync(); + Log("ping to sentinel {0}:{1} took {2} ms", TestConfig.Current.SentinelServer, TestConfig.Current.SentinelPortA, test.TotalMilliseconds); + } + + [Fact] + public async Task SentinelRepeatConnectTest() + { + var options = ConfigurationOptions.Parse($"{TestConfig.Current.SentinelServer}:{TestConfig.Current.SentinelPortA}"); + options.ServiceName = ServiceName; + options.AllowAdmin = true; + + Log("Service Name: " + options.ServiceName); + foreach (var ep in options.EndPoints) + { + Log(" Endpoint: " + ep); + } + + await using var conn = await ConnectionMultiplexer.ConnectAsync(options); + + var db = conn.GetDatabase(); + var test = await db.PingAsync(); + Log("ping to 1st sentinel {0}:{1} took {2} ms", TestConfig.Current.SentinelServer, TestConfig.Current.SentinelPortA, test.TotalMilliseconds); + + Log("Service Name: " + options.ServiceName); + foreach (var ep in options.EndPoints) + { + Log(" Endpoint: " + ep); + } + + await using var conn2 = ConnectionMultiplexer.Connect(options); + + var db2 = conn2.GetDatabase(); + var test2 = await db2.PingAsync(); + Log("ping to 2nd sentinel {0}:{1} took {2} ms", TestConfig.Current.SentinelServer, TestConfig.Current.SentinelPortA, test2.TotalMilliseconds); + } + + [Fact] + public async Task SentinelConnectAsyncTest() + { + var options = ServiceOptions.Clone(); + options.EndPoints.Add(TestConfig.Current.SentinelServer, TestConfig.Current.SentinelPortA); + var conn = await ConnectionMultiplexer.SentinelConnectAsync(options); + + var db = conn.GetDatabase(); + var test = await db.PingAsync(); + Log("ping to sentinel {0}:{1} took {2} ms", TestConfig.Current.SentinelServer, TestConfig.Current.SentinelPortA, test.TotalMilliseconds); + } + + [Fact] + public void SentinelRole() + { + foreach (var server in SentinelsServers) + { + var role = server.Role(); + Assert.NotNull(role); + Assert.Equal(role.Value, RedisLiterals.sentinel); + var sentinel = role as Role.Sentinel; + Assert.NotNull(sentinel); + } + } + + [Fact] + public async Task PingTest() + { + var test = await SentinelServerA.PingAsync(); + Log("ping to sentinel {0}:{1} took {2} ms", TestConfig.Current.SentinelServer, TestConfig.Current.SentinelPortA, test.TotalMilliseconds); + test = await SentinelServerB.PingAsync(); + Log("ping to sentinel {0}:{1} took {1} ms", TestConfig.Current.SentinelServer, TestConfig.Current.SentinelPortB, test.TotalMilliseconds); + test = await SentinelServerC.PingAsync(); + Log("ping to sentinel {0}:{1} took {1} ms", TestConfig.Current.SentinelServer, TestConfig.Current.SentinelPortC, test.TotalMilliseconds); + } + + [Fact] + public void SentinelGetPrimaryAddressByNameTest() + { + foreach (var server in SentinelsServers) + { + var primary = server.SentinelMaster(ServiceName); + var endpoint = server.SentinelGetMasterAddressByName(ServiceName); + Assert.NotNull(endpoint); + var ipEndPoint = endpoint as IPEndPoint; + Assert.NotNull(ipEndPoint); + Assert.Equal(primary.ToDictionary()["ip"], ipEndPoint.Address.ToString()); + Assert.Equal(primary.ToDictionary()["port"], ipEndPoint.Port.ToString()); + Log("{0}:{1}", ipEndPoint.Address, ipEndPoint.Port); + } + } + + [Fact] + public async Task SentinelGetPrimaryAddressByNameAsyncTest() + { + foreach (var server in SentinelsServers) + { + var primary = server.SentinelMaster(ServiceName); + var endpoint = await server.SentinelGetMasterAddressByNameAsync(ServiceName).ForAwait(); + Assert.NotNull(endpoint); + var ipEndPoint = endpoint as IPEndPoint; + Assert.NotNull(ipEndPoint); + Assert.Equal(primary.ToDictionary()["ip"], ipEndPoint.Address.ToString()); + Assert.Equal(primary.ToDictionary()["port"], ipEndPoint.Port.ToString()); + Log("{0}:{1}", ipEndPoint.Address, ipEndPoint.Port); + } + } + + [Fact] + public void SentinelGetMasterAddressByNameNegativeTest() + { + foreach (var server in SentinelsServers) + { + var endpoint = server.SentinelGetMasterAddressByName("FakeServiceName"); + Assert.Null(endpoint); + } + } + + [Fact] + public async Task SentinelGetMasterAddressByNameAsyncNegativeTest() + { + foreach (var server in SentinelsServers) + { + var endpoint = await server.SentinelGetMasterAddressByNameAsync("FakeServiceName").ForAwait(); + Assert.Null(endpoint); + } + } + + [Fact] + public void SentinelPrimaryTest() + { + foreach (var server in SentinelsServers) + { + var dict = server.SentinelMaster(ServiceName).ToDictionary(); + Assert.Equal(ServiceName, dict["name"]); + Assert.StartsWith("master", dict["flags"]); + foreach (var kvp in dict) + { + Log("{0}:{1}", kvp.Key, kvp.Value); + } + } + } + + [Fact] + public async Task SentinelPrimaryAsyncTest() + { + foreach (var server in SentinelsServers) + { + var results = await server.SentinelMasterAsync(ServiceName).ForAwait(); + Assert.Equal(ServiceName, results.ToDictionary()["name"]); + Assert.StartsWith("master", results.ToDictionary()["flags"]); + foreach (var kvp in results) + { + Log("{0}:{1}", kvp.Key, kvp.Value); + } + } + } + + [Fact] + public void SentinelSentinelsTest() + { + var sentinels = SentinelServerA.SentinelSentinels(ServiceName); + + var expected = new List + { + SentinelServerB.EndPoint.ToString(), + SentinelServerC.EndPoint.ToString(), + }; + + var actual = new List(); + foreach (var kv in sentinels) + { + var data = kv.ToDictionary(); + actual.Add(data["ip"] + ":" + data["port"]); + } + + Assert.All(expected, ep => Assert.NotEqual(ep, SentinelServerA.EndPoint.ToString())); + Assert.Equal(2, sentinels.Length); + Assert.All(expected, ep => Assert.Contains(ep, actual, _ipComparer)); + + sentinels = SentinelServerB.SentinelSentinels(ServiceName); + foreach (var kv in sentinels) + { + var data = kv.ToDictionary(); + actual.Add(data["ip"] + ":" + data["port"]); + } + + expected = + [ + SentinelServerA.EndPoint.ToString(), + SentinelServerC.EndPoint.ToString(), + ]; + + Assert.All(expected, ep => Assert.NotEqual(ep, SentinelServerB.EndPoint.ToString())); + Assert.Equal(2, sentinels.Length); + Assert.All(expected, ep => Assert.Contains(ep, actual, _ipComparer)); + + sentinels = SentinelServerC.SentinelSentinels(ServiceName); + foreach (var kv in sentinels) + { + var data = kv.ToDictionary(); + actual.Add(data["ip"] + ":" + data["port"]); + } + + expected = + [ + SentinelServerA.EndPoint.ToString(), + SentinelServerB.EndPoint.ToString(), + ]; + + Assert.All(expected, ep => Assert.NotEqual(ep, SentinelServerC.EndPoint.ToString())); + Assert.Equal(2, sentinels.Length); + Assert.All(expected, ep => Assert.Contains(ep, actual, _ipComparer)); + } + + [Fact] + public async Task SentinelSentinelsAsyncTest() + { + var sentinels = await SentinelServerA.SentinelSentinelsAsync(ServiceName).ForAwait(); + var expected = new List + { + SentinelServerB.EndPoint.ToString(), + SentinelServerC.EndPoint.ToString(), + }; + + var actual = new List(); + foreach (var kv in sentinels) + { + var data = kv.ToDictionary(); + actual.Add(data["ip"] + ":" + data["port"]); + } + + Assert.All(expected, ep => Assert.NotEqual(ep, SentinelServerA.EndPoint.ToString())); + Assert.Equal(2, sentinels.Length); + Assert.All(expected, ep => Assert.Contains(ep, actual, _ipComparer)); + + sentinels = await SentinelServerB.SentinelSentinelsAsync(ServiceName).ForAwait(); + + expected = + [ + SentinelServerA.EndPoint.ToString(), + SentinelServerC.EndPoint.ToString(), + ]; + + actual = []; + foreach (var kv in sentinels) + { + var data = kv.ToDictionary(); + actual.Add(data["ip"] + ":" + data["port"]); + } + + Assert.All(expected, ep => Assert.NotEqual(ep, SentinelServerB.EndPoint.ToString())); + Assert.Equal(2, sentinels.Length); + Assert.All(expected, ep => Assert.Contains(ep, actual, _ipComparer)); + + sentinels = await SentinelServerC.SentinelSentinelsAsync(ServiceName).ForAwait(); + expected = + [ + SentinelServerA.EndPoint.ToString(), + SentinelServerB.EndPoint.ToString(), + ]; + actual = []; + foreach (var kv in sentinels) + { + var data = kv.ToDictionary(); + actual.Add(data["ip"] + ":" + data["port"]); + } + + Assert.All(expected, ep => Assert.NotEqual(ep, SentinelServerC.EndPoint.ToString())); + Assert.Equal(2, sentinels.Length); + Assert.All(expected, ep => Assert.Contains(ep, actual, _ipComparer)); + } + + [Fact] + public void SentinelPrimariesTest() + { + var primaryConfigs = SentinelServerA.SentinelMasters(); + Assert.Single(primaryConfigs); + Assert.True(primaryConfigs[0].ToDictionary().ContainsKey("name"), "replicaConfigs contains 'name'"); + Assert.Equal(ServiceName, primaryConfigs[0].ToDictionary()["name"]); + Assert.StartsWith("master", primaryConfigs[0].ToDictionary()["flags"]); + foreach (var config in primaryConfigs) + { + foreach (var kvp in config) + { + Log("{0}:{1}", kvp.Key, kvp.Value); + } + } + } + + [Fact] + public async Task SentinelPrimariesAsyncTest() + { + var primaryConfigs = await SentinelServerA.SentinelMastersAsync().ForAwait(); + Assert.Single(primaryConfigs); + Assert.True(primaryConfigs[0].ToDictionary().ContainsKey("name"), "replicaConfigs contains 'name'"); + Assert.Equal(ServiceName, primaryConfigs[0].ToDictionary()["name"]); + Assert.StartsWith("master", primaryConfigs[0].ToDictionary()["flags"]); + foreach (var config in primaryConfigs) + { + foreach (var kvp in config) + { + Log("{0}:{1}", kvp.Key, kvp.Value); + } + } + } + + [Fact] + public async Task SentinelReplicasTest() + { + // Give previous test run a moment to reset when multi-framework failover is in play. + await UntilConditionAsync(TimeSpan.FromSeconds(5), () => SentinelServerA.SentinelReplicas(ServiceName).Length > 0); + + var replicaConfigs = SentinelServerA.SentinelReplicas(ServiceName); + Assert.True(replicaConfigs.Length > 0, "Has replicaConfigs"); + Assert.True(replicaConfigs[0].ToDictionary().ContainsKey("name"), "replicaConfigs contains 'name'"); + Assert.StartsWith("slave", replicaConfigs[0].ToDictionary()["flags"]); + + foreach (var config in replicaConfigs) + { + foreach (var kvp in config) + { + Log("{0}:{1}", kvp.Key, kvp.Value); + } + } + } + + [Fact] + public async Task SentinelReplicasAsyncTest() + { + // Give previous test run a moment to reset when multi-framework failover is in play. + await UntilConditionAsync(TimeSpan.FromSeconds(5), () => SentinelServerA.SentinelReplicas(ServiceName).Length > 0); + + var replicaConfigs = await SentinelServerA.SentinelReplicasAsync(ServiceName).ForAwait(); + Assert.True(replicaConfigs.Length > 0, "Has replicaConfigs"); + Assert.True(replicaConfigs[0].ToDictionary().ContainsKey("name"), "replicaConfigs contains 'name'"); + Assert.StartsWith("slave", replicaConfigs[0].ToDictionary()["flags"]); + foreach (var config in replicaConfigs) + { + foreach (var kvp in config) + { + Log("{0}:{1}", kvp.Key, kvp.Value); + } + } + } + + [Fact] + public async Task SentinelGetSentinelAddressesTest() + { + var addresses = await SentinelServerA.SentinelGetSentinelAddressesAsync(ServiceName).ForAwait(); + Assert.Contains(SentinelServerB.EndPoint, addresses); + Assert.Contains(SentinelServerC.EndPoint, addresses); + + addresses = await SentinelServerB.SentinelGetSentinelAddressesAsync(ServiceName).ForAwait(); + Assert.Contains(SentinelServerA.EndPoint, addresses); + Assert.Contains(SentinelServerC.EndPoint, addresses); + + addresses = await SentinelServerC.SentinelGetSentinelAddressesAsync(ServiceName).ForAwait(); + Assert.Contains(SentinelServerA.EndPoint, addresses); + Assert.Contains(SentinelServerB.EndPoint, addresses); + } + + [Fact] + public async Task ReadOnlyConnectionReplicasTest() + { + var replicas = SentinelServerA.SentinelGetReplicaAddresses(ServiceName); + if (replicas.Length == 0) + { + Assert.Skip("Sentinel race: 0 replicas to test against."); + } + + var config = new ConfigurationOptions(); + foreach (var replica in replicas) + { + config.EndPoints.Add(replica); + } + + var readonlyConn = await ConnectionMultiplexer.ConnectAsync(config); + + await UntilConditionAsync(TimeSpan.FromSeconds(2), () => readonlyConn.IsConnected); + Assert.True(readonlyConn.IsConnected); + var db = readonlyConn.GetDatabase(); + var s = db.StringGet("test"); + Assert.True(s.IsNullOrEmpty); + } +} diff --git a/tests/StackExchange.Redis.Tests/ServerSnapshotTests.cs b/tests/StackExchange.Redis.Tests/ServerSnapshotTests.cs new file mode 100644 index 000000000..2c81c3826 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/ServerSnapshotTests.cs @@ -0,0 +1,126 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using System.Linq; +using System.Runtime.Serialization; +using Xunit; +using static StackExchange.Redis.ConnectionMultiplexer; + +namespace StackExchange.Redis.Tests; + +public class ServerSnapshotTests +{ + [Fact] + [SuppressMessage("Assertions", "xUnit2012:Do not use boolean check to check if a value exists in a collection", Justification = "Explicit testing")] + [SuppressMessage("Assertions", "xUnit2013:Do not use equality check to check for collection size.", Justification = "Explicit testing")] + [SuppressMessage("Assertions", "xUnit2029:Do not use Empty() to check if a value does not exist in a collection", Justification = "Explicit testing")] + [SuppressMessage("Performance", "CA1829:Use Length/Count property instead of Count() when available", Justification = "Explicit testing")] + [SuppressMessage("Performance", "CA1860:Avoid using 'Enumerable.Any()' extension method", Justification = "Explicit testing")] + public void EmptyBehaviour() + { + var snapshot = ServerSnapshot.Empty; + Assert.Same(snapshot, snapshot.Add(null!)); + + Assert.Equal(0, snapshot.Count); + Assert.Equal(0, ManualCount(snapshot)); + Assert.Equal(0, ManualCount(snapshot, static _ => true)); + Assert.Equal(0, ManualCount(snapshot, static _ => false)); + + Assert.Equal(0, Enumerable.Count(snapshot)); + Assert.Equal(0, Enumerable.Count(snapshot, static _ => true)); + Assert.Equal(0, Enumerable.Count(snapshot, static _ => false)); + + Assert.False(Enumerable.Any(snapshot)); + Assert.False(snapshot.Any()); + + Assert.False(Enumerable.Any(snapshot, static _ => true)); + Assert.False(snapshot.Any(static _ => true)); + Assert.False(Enumerable.Any(snapshot, static _ => false)); + Assert.False(snapshot.Any(static _ => false)); + + Assert.Empty(snapshot); + Assert.Empty(Enumerable.Where(snapshot, static _ => true)); + Assert.Empty(snapshot.Where(static _ => true)); + Assert.Empty(Enumerable.Where(snapshot, static _ => false)); + Assert.Empty(snapshot.Where(static _ => false)); + + Assert.Empty(snapshot.Where(CommandFlags.DemandMaster)); + Assert.Empty(snapshot.Where(CommandFlags.DemandReplica)); + Assert.Empty(snapshot.Where(CommandFlags.None)); + Assert.Empty(snapshot.Where(CommandFlags.FireAndForget | CommandFlags.NoRedirect | CommandFlags.NoScriptCache)); + } + + [Theory] + [InlineData(1, 0)] + [InlineData(1, 1)] + [InlineData(5, 0)] + [InlineData(5, 3)] + [InlineData(5, 5)] + [SuppressMessage("Assertions", "xUnit2012:Do not use boolean check to check if a value exists in a collection", Justification = "Explicit testing")] + [SuppressMessage("Assertions", "xUnit2029:Do not use Empty() to check if a value does not exist in a collection", Justification = "Explicit testing")] + [SuppressMessage("Assertions", "xUnit2030:Do not use Assert.NotEmpty to check if a value exists in a collection", Justification = "Explicit testing")] + [SuppressMessage("Performance", "CA1829:Use Length/Count property instead of Count() when available", Justification = "Explicit testing")] + [SuppressMessage("Performance", "CA1860:Avoid using 'Enumerable.Any()' extension method", Justification = "Explicit testing")] + public void NonEmptyBehaviour(int count, int replicaCount) + { + var snapshot = ServerSnapshot.Empty; + for (int i = 0; i < count; i++) + { +#pragma warning disable SYSLIB0050 // Type or member is obsolete + var dummy = (ServerEndPoint)FormatterServices.GetSafeUninitializedObject(typeof(ServerEndPoint)); +#pragma warning restore SYSLIB0050 // Type or member is obsolete + dummy.IsReplica = i < replicaCount; + snapshot = snapshot.Add(dummy); + } + + Assert.Equal(count, snapshot.Count); + Assert.Equal(count, ManualCount(snapshot)); + Assert.Equal(count, ManualCount(snapshot, static _ => true)); + Assert.Equal(0, ManualCount(snapshot, static _ => false)); + Assert.Equal(replicaCount, ManualCount(snapshot, static s => s.IsReplica)); + + Assert.Equal(count, Enumerable.Count(snapshot)); + Assert.Equal(count, Enumerable.Count(snapshot, static _ => true)); + Assert.Equal(0, Enumerable.Count(snapshot, static _ => false)); + Assert.Equal(replicaCount, Enumerable.Count(snapshot, static s => s.IsReplica)); + + Assert.True(Enumerable.Any(snapshot)); + Assert.True(snapshot.Any()); + + Assert.True(Enumerable.Any(snapshot, static _ => true)); + Assert.True(snapshot.Any(static _ => true)); + Assert.False(Enumerable.Any(snapshot, static _ => false)); + Assert.False(snapshot.Any(static _ => false)); + + Assert.NotEmpty(snapshot); + Assert.NotEmpty(Enumerable.Where(snapshot, static _ => true)); + Assert.NotEmpty(snapshot.Where(static _ => true)); + Assert.Empty(Enumerable.Where(snapshot, static _ => false)); + Assert.Empty(snapshot.Where(static _ => false)); + + Assert.Equal(snapshot.Count - replicaCount, snapshot.Where(CommandFlags.DemandMaster).Count()); + Assert.Equal(replicaCount, snapshot.Where(CommandFlags.DemandReplica).Count()); + Assert.Equal(snapshot.Count, snapshot.Where(CommandFlags.None).Count()); + Assert.Equal(snapshot.Count, snapshot.Where(CommandFlags.FireAndForget | CommandFlags.NoRedirect | CommandFlags.NoScriptCache).Count()); + } + + private static int ManualCount(ServerSnapshot snapshot, Func? predicate = null) + { + // ^^^ tests the custom iterator implementation + int count = 0; + if (predicate is null) + { + foreach (var item in snapshot) + { + count++; + } + } + else + { + foreach (var item in snapshot.Where(predicate)) + { + count++; + } + } + return count; + } +} diff --git a/tests/StackExchange.Redis.Tests/SetTests.cs b/tests/StackExchange.Redis.Tests/SetTests.cs new file mode 100644 index 000000000..9326ca7a7 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/SetTests.cs @@ -0,0 +1,389 @@ +using System; +using System.Linq; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +[RunPerProtocol] +public class SetTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + [Fact] + public async Task SetContains() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var key = Me(); + var db = conn.GetDatabase(); + db.KeyDelete(key); + for (int i = 1; i < 1001; i++) + { + db.SetAdd(key, i, CommandFlags.FireAndForget); + } + + // Single member + var isMemeber = db.SetContains(key, 1); + Assert.True(isMemeber); + + // Multi members + var areMemebers = db.SetContains(key, [0, 1, 2]); + Assert.Equal(3, areMemebers.Length); + Assert.False(areMemebers[0]); + Assert.True(areMemebers[1]); + + // key not exists + db.KeyDelete(key); + isMemeber = db.SetContains(key, 1); + Assert.False(isMemeber); + areMemebers = db.SetContains(key, [0, 1, 2]); + Assert.Equal(3, areMemebers.Length); + Assert.True(areMemebers.All(i => !i)); // Check that all the elements are False + } + + [Fact] + public async Task SetContainsAsync() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var key = Me(); + var db = conn.GetDatabase(); + await db.KeyDeleteAsync(key); + for (int i = 1; i < 1001; i++) + { + db.SetAdd(key, i, CommandFlags.FireAndForget); + } + + // Single member + var isMemeber = await db.SetContainsAsync(key, 1); + Assert.True(isMemeber); + + // Multi members + var areMemebers = await db.SetContainsAsync(key, [0, 1, 2]); + Assert.Equal(3, areMemebers.Length); + Assert.False(areMemebers[0]); + Assert.True(areMemebers[1]); + + // key not exists + await db.KeyDeleteAsync(key); + isMemeber = await db.SetContainsAsync(key, 1); + Assert.False(isMemeber); + areMemebers = await db.SetContainsAsync(key, [0, 1, 2]); + Assert.Equal(3, areMemebers.Length); + Assert.True(areMemebers.All(i => !i)); // Check that all the elements are False + } + + [Fact] + public async Task SetIntersectionLength() + { + await using var conn = Create(require: RedisFeatures.v7_0_0_rc1); + + var db = conn.GetDatabase(); + + var key1 = Me() + "1"; + db.KeyDelete(key1, CommandFlags.FireAndForget); + db.SetAdd(key1, [0, 1, 2, 3, 4], CommandFlags.FireAndForget); + var key2 = Me() + "2"; + db.KeyDelete(key2, CommandFlags.FireAndForget); + db.SetAdd(key2, [1, 2, 3, 4, 5], CommandFlags.FireAndForget); + + Assert.Equal(4, db.SetIntersectionLength([key1, key2])); + // with limit + Assert.Equal(3, db.SetIntersectionLength([key1, key2], 3)); + + // Missing keys should be 0 + var key3 = Me() + "3"; + var key4 = Me() + "4"; + db.KeyDelete(key3, CommandFlags.FireAndForget); + Assert.Equal(0, db.SetIntersectionLength([key1, key3])); + Assert.Equal(0, db.SetIntersectionLength([key3, key4])); + } + + [Fact] + public async Task SetIntersectionLengthAsync() + { + await using var conn = Create(require: RedisFeatures.v7_0_0_rc1); + + var db = conn.GetDatabase(); + + var key1 = Me() + "1"; + db.KeyDelete(key1, CommandFlags.FireAndForget); + db.SetAdd(key1, [0, 1, 2, 3, 4], CommandFlags.FireAndForget); + var key2 = Me() + "2"; + db.KeyDelete(key2, CommandFlags.FireAndForget); + db.SetAdd(key2, [1, 2, 3, 4, 5], CommandFlags.FireAndForget); + + Assert.Equal(4, await db.SetIntersectionLengthAsync([key1, key2])); + // with limit + Assert.Equal(3, await db.SetIntersectionLengthAsync([key1, key2], 3)); + + // Missing keys should be 0 + var key3 = Me() + "3"; + var key4 = Me() + "4"; + db.KeyDelete(key3, CommandFlags.FireAndForget); + Assert.Equal(0, await db.SetIntersectionLengthAsync([key1, key3])); + Assert.Equal(0, await db.SetIntersectionLengthAsync([key3, key4])); + } + + [Fact] + public async Task SScan() + { + await using var conn = Create(); + + var server = GetAnyPrimary(conn); + + var key = Me(); + var db = conn.GetDatabase(); + int totalUnfiltered = 0, totalFiltered = 0; + for (int i = 1; i < 1001; i++) + { + db.SetAdd(key, i, CommandFlags.FireAndForget); + totalUnfiltered += i; + if (i.ToString().Contains('3')) totalFiltered += i; + } + + var unfilteredActual = db.SetScan(key).Select(x => (int)x).Sum(); + Assert.Equal(totalUnfiltered, unfilteredActual); + if (server.Features.Scan) + { + var filteredActual = db.SetScan(key, "*3*").Select(x => (int)x).Sum(); + Assert.Equal(totalFiltered, filteredActual); + } + } + + [Fact] + public async Task SetRemoveArgTests() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + var key = Me(); + + RedisValue[]? values = null; + Assert.Throws(() => db.SetRemove(key, values!)); + await Assert.ThrowsAsync(async () => await db.SetRemoveAsync(key, values!).ForAwait()).ForAwait(); + + values = []; + Assert.Equal(0, db.SetRemove(key, values)); + Assert.Equal(0, await db.SetRemoveAsync(key, values).ForAwait()); + } + + [Fact] + public async Task SetPopMulti_Multi() + { + await using var conn = Create(require: RedisFeatures.v3_2_0); + + var db = conn.GetDatabase(); + var key = Me(); + + db.KeyDelete(key, CommandFlags.FireAndForget); + for (int i = 1; i < 11; i++) + { + _ = db.SetAddAsync(key, i, CommandFlags.FireAndForget); + } + + var random = db.SetPop(key); + Assert.False(random.IsNull); + Assert.True((int)random > 0); + Assert.True((int)random <= 10); + Assert.Equal(9, db.SetLength(key)); + + var moreRandoms = db.SetPop(key, 2); + Assert.Equal(2, moreRandoms.Length); + Assert.False(moreRandoms[0].IsNull); + Assert.Equal(7, db.SetLength(key)); + } + + [Fact] + public async Task SetPopMulti_Single() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + var key = Me(); + + db.KeyDelete(key, CommandFlags.FireAndForget); + for (int i = 1; i < 11; i++) + { + db.SetAdd(key, i, CommandFlags.FireAndForget); + } + + var random = db.SetPop(key); + Assert.False(random.IsNull); + Assert.True((int)random > 0); + Assert.True((int)random <= 10); + Assert.Equal(9, db.SetLength(key)); + + var moreRandoms = db.SetPop(key, 1); + Assert.Single(moreRandoms); + Assert.False(moreRandoms[0].IsNull); + Assert.Equal(8, db.SetLength(key)); + } + + [Fact] + public async Task SetPopMulti_Multi_Async() + { + await using var conn = Create(require: RedisFeatures.v3_2_0); + + var db = conn.GetDatabase(); + var key = Me(); + + db.KeyDelete(key, CommandFlags.FireAndForget); + for (int i = 1; i < 11; i++) + { + db.SetAdd(key, i, CommandFlags.FireAndForget); + } + + var random = await db.SetPopAsync(key).ForAwait(); + Assert.False(random.IsNull); + Assert.True((int)random > 0); + Assert.True((int)random <= 10); + Assert.Equal(9, db.SetLength(key)); + + var moreRandoms = await db.SetPopAsync(key, 2).ForAwait(); + Assert.Equal(2, moreRandoms.Length); + Assert.False(moreRandoms[0].IsNull); + Assert.Equal(7, db.SetLength(key)); + } + + [Fact] + public async Task SetPopMulti_Single_Async() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + var key = Me(); + + db.KeyDelete(key, CommandFlags.FireAndForget); + for (int i = 1; i < 11; i++) + { + db.SetAdd(key, i, CommandFlags.FireAndForget); + } + + var random = await db.SetPopAsync(key).ForAwait(); + Assert.False(random.IsNull); + Assert.True((int)random > 0); + Assert.True((int)random <= 10); + Assert.Equal(9, db.SetLength(key)); + + var moreRandoms = db.SetPop(key, 1); + Assert.Single(moreRandoms); + Assert.False(moreRandoms[0].IsNull); + Assert.Equal(8, db.SetLength(key)); + } + + [Fact] + public async Task SetPopMulti_Zero_Async() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + var key = Me(); + + db.KeyDelete(key, CommandFlags.FireAndForget); + for (int i = 1; i < 11; i++) + { + db.SetAdd(key, i, CommandFlags.FireAndForget); + } + + var t = db.SetPopAsync(key, count: 0); + Assert.True(t.IsCompleted); // sync + var arr = await t; + Assert.Empty(arr); + + Assert.Equal(10, db.SetLength(key)); + } + + [Fact] + public async Task SetAdd_Zero() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + var key = Me(); + + db.KeyDelete(key, CommandFlags.FireAndForget); + + var result = db.SetAdd(key, Array.Empty()); + Assert.Equal(0, result); + + Assert.Equal(0, db.SetLength(key)); + } + + [Fact] + public async Task SetAdd_Zero_Async() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + var key = Me(); + + db.KeyDelete(key, CommandFlags.FireAndForget); + + var t = db.SetAddAsync(key, Array.Empty()); + Assert.True(t.IsCompleted); // sync + var count = await t; + Assert.Equal(0, count); + + Assert.Equal(0, db.SetLength(key)); + } + + [Fact] + public async Task SetPopMulti_Nil() + { + await using var conn = Create(require: RedisFeatures.v3_2_0); + + var db = conn.GetDatabase(); + var key = Me(); + + db.KeyDelete(key, CommandFlags.FireAndForget); + + var arr = db.SetPop(key, 1); + Assert.Empty(arr); + } + + [Fact] + public async Task TestSortReadonlyPrimary() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + var key = Me(); + await db.KeyDeleteAsync(key); + + var random = new Random(); + var items = Enumerable.Repeat(0, 200).Select(_ => random.Next()).ToList(); + await db.SetAddAsync(key, items.Select(x => (RedisValue)x).ToArray()); + items.Sort(); + + var result = db.Sort(key).Select(x => (int)x); + Assert.Equal(items, result); + + result = (await db.SortAsync(key)).Select(x => (int)x); + Assert.Equal(items, result); + } + + [Fact] + public async Task TestSortReadonlyReplica() + { + await using var conn = Create(require: RedisFeatures.v7_0_0_rc1); + + var db = conn.GetDatabase(); + var key = Me(); + await db.KeyDeleteAsync(key); + + var random = new Random(); + var items = Enumerable.Repeat(0, 200).Select(_ => random.Next()).ToList(); + await db.SetAddAsync(key, items.Select(x => (RedisValue)x).ToArray()); + + await using var readonlyConn = Create(configuration: TestConfig.Current.ReplicaServerAndPort, require: RedisFeatures.v7_0_0_rc1); + var readonlyDb = conn.GetDatabase(); + + items.Sort(); + + var result = readonlyDb.Sort(key).Select(x => (int)x); + Assert.Equal(items, result); + + result = (await readonlyDb.SortAsync(key)).Select(x => (int)x); + Assert.Equal(items, result); + } +} diff --git a/tests/StackExchange.Redis.Tests/Sets.cs b/tests/StackExchange.Redis.Tests/Sets.cs deleted file mode 100644 index c17d22598..000000000 --- a/tests/StackExchange.Redis.Tests/Sets.cs +++ /dev/null @@ -1,229 +0,0 @@ -using System; -using System.Linq; -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - [Collection(SharedConnectionFixture.Key)] - public class Sets : TestBase - { - public Sets(ITestOutputHelper output, SharedConnectionFixture fixture) : base (output, fixture) { } - - [Fact] - public void SScan() - { - using (var conn = Create()) - { - var server = GetAnyMaster(conn); - - RedisKey key = Me(); - var db = conn.GetDatabase(); - int totalUnfiltered = 0, totalFiltered = 0; - for (int i = 1; i < 1001; i++) - { - db.SetAdd(key, i, CommandFlags.FireAndForget); - totalUnfiltered += i; - if (i.ToString().Contains("3")) totalFiltered += i; - } - - var unfilteredActual = db.SetScan(key).Select(x => (int)x).Sum(); - Assert.Equal(totalUnfiltered, unfilteredActual); - if (server.Features.Scan) - { - var filteredActual = db.SetScan(key, "*3*").Select(x => (int)x).Sum(); - Assert.Equal(totalFiltered, filteredActual); - } - } - } - - [Fact] - public async Task SetRemoveArgTests() - { - using (var conn = Create()) - { - var db = conn.GetDatabase(); - var key = Me(); - - RedisValue[] values = null; - Assert.Throws(() => db.SetRemove(key, values)); - await Assert.ThrowsAsync(async () => await db.SetRemoveAsync(key, values).ForAwait()).ForAwait(); - - values = new RedisValue[0]; - Assert.Equal(0, db.SetRemove(key, values)); - Assert.Equal(0, await db.SetRemoveAsync(key, values).ForAwait()); - } - } - - [Fact] - public void SetPopMulti_Multi() - { - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.SetPopMultiple), r => r.SetPopMultiple); - - var db = conn.GetDatabase(); - var key = Me(); - - db.KeyDelete(key, CommandFlags.FireAndForget); - for (int i = 1; i < 11; i++) - { - db.SetAddAsync(key, i, CommandFlags.FireAndForget); - } - - var random = db.SetPop(key); - Assert.False(random.IsNull); - Assert.True((int)random > 0); - Assert.True((int)random <= 10); - Assert.Equal(9, db.SetLength(key)); - - var moreRandoms = db.SetPop(key, 2); - Assert.Equal(2, moreRandoms.Length); - Assert.False(moreRandoms[0].IsNull); - Assert.Equal(7, db.SetLength(key)); - } - } - [Fact] - public void SetPopMulti_Single() - { - using (var conn = Create()) - { - var db = conn.GetDatabase(); - var key = Me(); - - db.KeyDelete(key, CommandFlags.FireAndForget); - for (int i = 1; i < 11; i++) - { - db.SetAdd(key, i, CommandFlags.FireAndForget); - } - - var random = db.SetPop(key); - Assert.False(random.IsNull); - Assert.True((int)random > 0); - Assert.True((int)random <= 10); - Assert.Equal(9, db.SetLength(key)); - - var moreRandoms = db.SetPop(key, 1); - Assert.Single(moreRandoms); - Assert.False(moreRandoms[0].IsNull); - Assert.Equal(8, db.SetLength(key)); - } - } - - [Fact] - public async Task SetPopMulti_Multi_Async() - { - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.SetPopMultiple), r => r.SetPopMultiple); - - var db = conn.GetDatabase(); - var key = Me(); - - db.KeyDelete(key, CommandFlags.FireAndForget); - for (int i = 1; i < 11; i++) - { - db.SetAdd(key, i, CommandFlags.FireAndForget); - } - - var random = await db.SetPopAsync(key).ForAwait(); - Assert.False(random.IsNull); - Assert.True((int)random > 0); - Assert.True((int)random <= 10); - Assert.Equal(9, db.SetLength(key)); - - var moreRandoms = await db.SetPopAsync(key, 2).ForAwait(); - Assert.Equal(2, moreRandoms.Length); - Assert.False(moreRandoms[0].IsNull); - Assert.Equal(7, db.SetLength(key)); - } - } - - [Fact] - public async Task SetPopMulti_Single_Async() - { - using (var conn = Create()) - { - var db = conn.GetDatabase(); - var key = Me(); - - db.KeyDelete(key, CommandFlags.FireAndForget); - for (int i = 1; i < 11; i++) - { - db.SetAdd(key, i, CommandFlags.FireAndForget); - } - - var random = await db.SetPopAsync(key).ForAwait(); - Assert.False(random.IsNull); - Assert.True((int)random > 0); - Assert.True((int)random <= 10); - Assert.Equal(9, db.SetLength(key)); - - var moreRandoms = db.SetPop(key, 1); - Assert.Single(moreRandoms); - Assert.False(moreRandoms[0].IsNull); - Assert.Equal(8, db.SetLength(key)); - } - } - - [Fact] - public async Task SetPopMulti_Zero_Async() - { - using (var conn = Create()) - { - var db = conn.GetDatabase(); - var key = Me(); - - db.KeyDelete(key, CommandFlags.FireAndForget); - for (int i = 1; i < 11; i++) - { - db.SetAdd(key, i, CommandFlags.FireAndForget); - } - - var t = db.SetPopAsync(key, count: 0); - Assert.True(t.IsCompleted); // sync - var arr = await t; - Assert.Empty(arr); - - Assert.Equal(10, db.SetLength(key)); - } - } - - [Fact] - public void SetAdd_Zero() - { - using (var conn = Create()) - { - var db = conn.GetDatabase(); - var key = Me(); - - db.KeyDelete(key, CommandFlags.FireAndForget); - - var result = db.SetAdd(key, new RedisValue[0]); - Assert.Equal(0, result); - - Assert.Equal(0, db.SetLength(key)); - } - } - - [Fact] - public async Task SetAdd_Zero_Async() - { - using (var conn = Create()) - { - var db = conn.GetDatabase(); - var key = Me(); - - db.KeyDelete(key, CommandFlags.FireAndForget); - - var t = db.SetAddAsync(key, new RedisValue[0]); - Assert.True(t.IsCompleted); // sync - var count = await t; - Assert.Equal(0, count); - - Assert.Equal(0, db.SetLength(key)); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/SharedConnectionFixture.cs b/tests/StackExchange.Redis.Tests/SharedConnectionFixture.cs deleted file mode 100644 index b832262a2..000000000 --- a/tests/StackExchange.Redis.Tests/SharedConnectionFixture.cs +++ /dev/null @@ -1,341 +0,0 @@ -using System; -using System.Collections.Generic; -using System.IO; -using System.Linq; -using System.Net; -using System.Threading; -using System.Threading.Tasks; -using StackExchange.Redis.Profiling; -using Xunit; - -namespace StackExchange.Redis.Tests -{ - public class SharedConnectionFixture : IDisposable - { - public bool IsEnabled { get; } - - public const string Key = "Shared Muxer"; - private readonly ConnectionMultiplexer _actualConnection; - internal IInternalConnectionMultiplexer Connection { get; } - public string Configuration { get; } - - public SharedConnectionFixture() - { - IsEnabled = TestConfig.Current.UseSharedConnection; - Configuration = TestBase.GetDefaultConfiguration(); - _actualConnection = TestBase.CreateDefault( - output: null, - clientName: nameof(SharedConnectionFixture), - configuration: Configuration, - allowAdmin: true - ); - _actualConnection.InternalError += OnInternalError; - _actualConnection.ConnectionFailed += OnConnectionFailed; - - Connection = new NonDisposingConnection(_actualConnection); - } - - private class NonDisposingConnection : IInternalConnectionMultiplexer - { - public bool AllowConnect - { - get => _inner.AllowConnect; - set => _inner.AllowConnect = value; - } - - public bool IgnoreConnect - { - get => _inner.IgnoreConnect; - set => _inner.IgnoreConnect = value; - } - - public ReadOnlySpan GetServerSnapshot() => _inner.GetServerSnapshot(); - - private readonly IInternalConnectionMultiplexer _inner; - public NonDisposingConnection(IInternalConnectionMultiplexer inner) => _inner = inner; - - public string ClientName => _inner.ClientName; - - public string Configuration => _inner.Configuration; - - public int TimeoutMilliseconds => _inner.TimeoutMilliseconds; - - public long OperationCount => _inner.OperationCount; - -#pragma warning disable CS0618 - public bool PreserveAsyncOrder { get => _inner.PreserveAsyncOrder; set => _inner.PreserveAsyncOrder = value; } -#pragma warning restore CS0618 - - public bool IsConnected => _inner.IsConnected; - - public bool IsConnecting => _inner.IsConnecting; - - public bool IncludeDetailInExceptions { get => _inner.IncludeDetailInExceptions; set => _inner.IncludeDetailInExceptions = value; } - public int StormLogThreshold { get => _inner.StormLogThreshold; set => _inner.StormLogThreshold = value; } - - public event EventHandler ErrorMessage - { - add - { - _inner.ErrorMessage += value; - } - - remove - { - _inner.ErrorMessage -= value; - } - } - - public event EventHandler ConnectionFailed - { - add - { - _inner.ConnectionFailed += value; - } - - remove - { - _inner.ConnectionFailed -= value; - } - } - - public event EventHandler InternalError - { - add - { - _inner.InternalError += value; - } - - remove - { - _inner.InternalError -= value; - } - } - - public event EventHandler ConnectionRestored - { - add - { - _inner.ConnectionRestored += value; - } - - remove - { - _inner.ConnectionRestored -= value; - } - } - - public event EventHandler ConfigurationChanged - { - add - { - _inner.ConfigurationChanged += value; - } - - remove - { - _inner.ConfigurationChanged -= value; - } - } - - public event EventHandler ConfigurationChangedBroadcast - { - add - { - _inner.ConfigurationChangedBroadcast += value; - } - - remove - { - _inner.ConfigurationChangedBroadcast -= value; - } - } - - public event EventHandler HashSlotMoved - { - add - { - _inner.HashSlotMoved += value; - } - - remove - { - _inner.HashSlotMoved -= value; - } - } - - public void Close(bool allowCommandsToComplete = true) - { - _inner.Close(allowCommandsToComplete); - } - - public Task CloseAsync(bool allowCommandsToComplete = true) - { - return _inner.CloseAsync(allowCommandsToComplete); - } - - public bool Configure(TextWriter log = null) - { - return _inner.Configure(log); - } - - public Task ConfigureAsync(TextWriter log = null) - { - return _inner.ConfigureAsync(log); - } - - public void Dispose() { } // DO NOT call _inner.Dispose(); - - public ServerCounters GetCounters() - { - return _inner.GetCounters(); - } - - public IDatabase GetDatabase(int db = -1, object asyncState = null) - { - return _inner.GetDatabase(db, asyncState); - } - - public EndPoint[] GetEndPoints(bool configuredOnly = false) - { - return _inner.GetEndPoints(configuredOnly); - } - - public int GetHashSlot(RedisKey key) - { - return _inner.GetHashSlot(key); - } - - public IServer GetServer(string host, int port, object asyncState = null) - { - return _inner.GetServer(host, port, asyncState); - } - - public IServer GetServer(string hostAndPort, object asyncState = null) - { - return _inner.GetServer(hostAndPort, asyncState); - } - - public IServer GetServer(IPAddress host, int port) - { - return _inner.GetServer(host, port); - } - - public IServer GetServer(EndPoint endpoint, object asyncState = null) - { - return _inner.GetServer(endpoint, asyncState); - } - - public string GetStatus() - { - return _inner.GetStatus(); - } - - public void GetStatus(TextWriter log) - { - _inner.GetStatus(log); - } - - public string GetStormLog() - { - return _inner.GetStormLog(); - } - - public ISubscriber GetSubscriber(object asyncState = null) - { - return _inner.GetSubscriber(asyncState); - } - - public int HashSlot(RedisKey key) - { - return _inner.HashSlot(key); - } - - public long PublishReconfigure(CommandFlags flags = CommandFlags.None) - { - return _inner.PublishReconfigure(flags); - } - - public Task PublishReconfigureAsync(CommandFlags flags = CommandFlags.None) - { - return _inner.PublishReconfigureAsync(flags); - } - - public void RegisterProfiler(Func profilingSessionProvider) - { - _inner.RegisterProfiler(profilingSessionProvider); - } - - public void ResetStormLog() - { - _inner.ResetStormLog(); - } - - public void Wait(Task task) - { - _inner.Wait(task); - } - - public T Wait(Task task) - { - return _inner.Wait(task); - } - - public void WaitAll(params Task[] tasks) - { - _inner.WaitAll(tasks); - } - - public void ExportConfiguration(Stream destination, ExportOptions options = ExportOptions.All) - => _inner.ExportConfiguration(destination, options); - } - - public void Dispose() => _actualConnection.Dispose(); - - protected void OnInternalError(object sender, InternalErrorEventArgs e) - { - Interlocked.Increment(ref privateFailCount); - lock (privateExceptions) - { - privateExceptions.Add(TestBase.Time() + ": Internal error: " + e.Origin + ", " + EndPointCollection.ToString(e.EndPoint) + "/" + e.ConnectionType); - } - } - protected void OnConnectionFailed(object sender, ConnectionFailedEventArgs e) - { - Interlocked.Increment(ref privateFailCount); - lock (privateExceptions) - { - privateExceptions.Add($"{TestBase.Time()}: Connection failed ({e.FailureType}): {EndPointCollection.ToString(e.EndPoint)}/{e.ConnectionType}: {e.Exception}"); - } - } - private readonly List privateExceptions = new List(); - private int privateFailCount; - - public void Teardown(TextWriter output) - { - var innerPrivateFailCount = Interlocked.Exchange(ref privateFailCount, 0); - if (innerPrivateFailCount != 0) - { - lock (privateExceptions) - { - foreach (var item in privateExceptions.Take(5)) - { - TestBase.LogNoTime(output, item); - } - privateExceptions.Clear(); - } - //Assert.True(false, $"There were {privateFailCount} private ambient exceptions."); - } - TestBase.Log(output, $"Service Counts: (Scheduler) Queue: {SocketManager.Shared?.SchedulerPool?.TotalServicedByQueue.ToString()}, Pool: {SocketManager.Shared?.SchedulerPool?.TotalServicedByPool.ToString()}"); - } - } - - // https://stackoverflow.com/questions/13829737/xunit-net-run-code-once-before-and-after-all-tests - [CollectionDefinition(SharedConnectionFixture.Key)] - public class ConnectionCollection : ICollectionFixture - { - // This class has no code, and is never created. Its purpose is simply - // to be the place to apply [CollectionDefinition] and all the - // ICollectionFixture<> interfaces. - } -} diff --git a/tests/StackExchange.Redis.Tests/SocketTests.cs b/tests/StackExchange.Redis.Tests/SocketTests.cs new file mode 100644 index 000000000..2d11c0014 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/SocketTests.cs @@ -0,0 +1,28 @@ +using System.Diagnostics; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class SocketTests(ITestOutputHelper output) : TestBase(output) +{ + [Fact] + public async Task CheckForSocketLeaks() + { + Skip.UnlessLongRunning(); + const int count = 2000; + for (var i = 0; i < count; i++) + { + await using var _ = Create(clientName: "Test: " + i); + // Intentionally just creating and disposing to leak sockets here + // ...so we can figure out what's happening. + } + // Force GC before memory dump in debug below... + CollectGarbage(); + + if (Debugger.IsAttached) + { + Debugger.Break(); + } + } +} diff --git a/tests/StackExchange.Redis.Tests/Sockets.cs b/tests/StackExchange.Redis.Tests/Sockets.cs deleted file mode 100644 index 252e4650f..000000000 --- a/tests/StackExchange.Redis.Tests/Sockets.cs +++ /dev/null @@ -1,32 +0,0 @@ -using System.Diagnostics; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - public class Sockets : TestBase - { - protected override string GetConfiguration() => TestConfig.Current.MasterServerAndPort; - public Sockets(ITestOutputHelper output) : base (output) { } - - [FactLongRunning] - public void CheckForSocketLeaks() - { - const int count = 2000; - for (var i = 0; i < count; i++) - { - using (var _ = Create(clientName: "Test: " + i)) - { - // Intentionally just creating and disposing to leak sockets here - // ...so we can figure out what's happening. - } - } - // Force GC before memory dump in debug below... - CollectGarbage(); - - if (Debugger.IsAttached) - { - Debugger.Break(); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/SortedSetTests.cs b/tests/StackExchange.Redis.Tests/SortedSetTests.cs new file mode 100644 index 000000000..a6e6271ea --- /dev/null +++ b/tests/StackExchange.Redis.Tests/SortedSetTests.cs @@ -0,0 +1,1466 @@ +using System; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +[RunPerProtocol] +public class SortedSetTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + private static readonly SortedSetEntry[] entries = + [ + new SortedSetEntry("a", 1), + new SortedSetEntry("b", 2), + new SortedSetEntry("c", 3), + new SortedSetEntry("d", 4), + new SortedSetEntry("e", 5), + new SortedSetEntry("f", 6), + new SortedSetEntry("g", 7), + new SortedSetEntry("h", 8), + new SortedSetEntry("i", 9), + new SortedSetEntry("j", 10), + ]; + + private static readonly SortedSetEntry[] entriesPow2 = + [ + new SortedSetEntry("a", 1), + new SortedSetEntry("b", 2), + new SortedSetEntry("c", 4), + new SortedSetEntry("d", 8), + new SortedSetEntry("e", 16), + new SortedSetEntry("f", 32), + new SortedSetEntry("g", 64), + new SortedSetEntry("h", 128), + new SortedSetEntry("i", 256), + new SortedSetEntry("j", 512), + ]; + + private static readonly SortedSetEntry[] entriesPow3 = + [ + new SortedSetEntry("a", 1), + new SortedSetEntry("c", 4), + new SortedSetEntry("e", 16), + new SortedSetEntry("g", 64), + new SortedSetEntry("i", 256), + ]; + + private static readonly SortedSetEntry[] lexEntries = + [ + new SortedSetEntry("a", 0), + new SortedSetEntry("b", 0), + new SortedSetEntry("c", 0), + new SortedSetEntry("d", 0), + new SortedSetEntry("e", 0), + new SortedSetEntry("f", 0), + new SortedSetEntry("g", 0), + new SortedSetEntry("h", 0), + new SortedSetEntry("i", 0), + new SortedSetEntry("j", 0), + ]; + + [Fact] + public async Task SortedSetCombine() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var key1 = Me(); + db.KeyDelete(key1, CommandFlags.FireAndForget); + var key2 = Me() + "2"; + db.KeyDelete(key2, CommandFlags.FireAndForget); + + db.SortedSetAdd(key1, entries); + db.SortedSetAdd(key2, entriesPow3); + + var diff = db.SortedSetCombine(SetOperation.Difference, [key1, key2]); + Assert.Equal(5, diff.Length); + Assert.Equal("b", diff[0]); + + var inter = db.SortedSetCombine(SetOperation.Intersect, [key1, key2]); + Assert.Equal(5, inter.Length); + Assert.Equal("a", inter[0]); + + var union = db.SortedSetCombine(SetOperation.Union, [key1, key2]); + Assert.Equal(10, union.Length); + Assert.Equal("a", union[0]); + } + + [Fact] + public async Task SortedSetCombineAsync() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var key1 = Me(); + db.KeyDelete(key1, CommandFlags.FireAndForget); + var key2 = Me() + "2"; + db.KeyDelete(key2, CommandFlags.FireAndForget); + + db.SortedSetAdd(key1, entries); + db.SortedSetAdd(key2, entriesPow3); + + var diff = await db.SortedSetCombineAsync(SetOperation.Difference, [key1, key2]); + Assert.Equal(5, diff.Length); + Assert.Equal("b", diff[0]); + + var inter = await db.SortedSetCombineAsync(SetOperation.Intersect, [key1, key2]); + Assert.Equal(5, inter.Length); + Assert.Equal("a", inter[0]); + + var union = await db.SortedSetCombineAsync(SetOperation.Union, [key1, key2]); + Assert.Equal(10, union.Length); + Assert.Equal("a", union[0]); + } + + [Fact] + public async Task SortedSetCombineWithScores() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var key1 = Me(); + db.KeyDelete(key1, CommandFlags.FireAndForget); + var key2 = Me() + "2"; + db.KeyDelete(key2, CommandFlags.FireAndForget); + + db.SortedSetAdd(key1, entries); + db.SortedSetAdd(key2, entriesPow3); + + var diff = db.SortedSetCombineWithScores(SetOperation.Difference, [key1, key2]); + Assert.Equal(5, diff.Length); + Assert.Equal(new SortedSetEntry("b", 2), diff[0]); + + var inter = db.SortedSetCombineWithScores(SetOperation.Intersect, [key1, key2]); + Assert.Equal(5, inter.Length); + Assert.Equal(new SortedSetEntry("a", 2), inter[0]); + + var union = db.SortedSetCombineWithScores(SetOperation.Union, [key1, key2]); + Assert.Equal(10, union.Length); + Assert.Equal(new SortedSetEntry("a", 2), union[0]); + } + + [Fact] + public async Task SortedSetCombineWithScoresAsync() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var key1 = Me(); + db.KeyDelete(key1, CommandFlags.FireAndForget); + var key2 = Me() + "2"; + db.KeyDelete(key2, CommandFlags.FireAndForget); + + db.SortedSetAdd(key1, entries); + db.SortedSetAdd(key2, entriesPow3); + + var diff = await db.SortedSetCombineWithScoresAsync(SetOperation.Difference, [key1, key2]); + Assert.Equal(5, diff.Length); + Assert.Equal(new SortedSetEntry("b", 2), diff[0]); + + var inter = await db.SortedSetCombineWithScoresAsync(SetOperation.Intersect, [key1, key2]); + Assert.Equal(5, inter.Length); + Assert.Equal(new SortedSetEntry("a", 2), inter[0]); + + var union = await db.SortedSetCombineWithScoresAsync(SetOperation.Union, [key1, key2]); + Assert.Equal(10, union.Length); + Assert.Equal(new SortedSetEntry("a", 2), union[0]); + } + + [Fact] + public async Task SortedSetCombineAndStore() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var key1 = Me(); + db.KeyDelete(key1, CommandFlags.FireAndForget); + var key2 = Me() + "2"; + db.KeyDelete(key2, CommandFlags.FireAndForget); + var destination = Me() + "dest"; + db.KeyDelete(destination, CommandFlags.FireAndForget); + + db.SortedSetAdd(key1, entries); + db.SortedSetAdd(key2, entriesPow3); + + var diff = db.SortedSetCombineAndStore(SetOperation.Difference, destination, [key1, key2]); + Assert.Equal(5, diff); + + var inter = db.SortedSetCombineAndStore(SetOperation.Intersect, destination, [key1, key2]); + Assert.Equal(5, inter); + + var union = db.SortedSetCombineAndStore(SetOperation.Union, destination, [key1, key2]); + Assert.Equal(10, union); + } + + [Fact] + public async Task SortedSetCombineAndStoreAsync() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var key1 = Me(); + db.KeyDelete(key1, CommandFlags.FireAndForget); + var key2 = Me() + "2"; + db.KeyDelete(key2, CommandFlags.FireAndForget); + var destination = Me() + "dest"; + db.KeyDelete(destination, CommandFlags.FireAndForget); + + db.SortedSetAdd(key1, entries); + db.SortedSetAdd(key2, entriesPow3); + + var diff = await db.SortedSetCombineAndStoreAsync(SetOperation.Difference, destination, [key1, key2]); + Assert.Equal(5, diff); + + var inter = await db.SortedSetCombineAndStoreAsync(SetOperation.Intersect, destination, [key1, key2]); + Assert.Equal(5, inter); + + var union = await db.SortedSetCombineAndStoreAsync(SetOperation.Union, destination, [key1, key2]); + Assert.Equal(10, union); + } + + [Fact] + public async Task SortedSetCombineErrors() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var key1 = Me(); + db.KeyDelete(key1, CommandFlags.FireAndForget); + var key2 = Me() + "2"; + db.KeyDelete(key2, CommandFlags.FireAndForget); + var destination = Me() + "dest"; + db.KeyDelete(destination, CommandFlags.FireAndForget); + + db.SortedSetAdd(key1, entries); + db.SortedSetAdd(key2, entriesPow3); + + // ZDIFF can't be used with weights + var ex = Assert.Throws(() => db.SortedSetCombine(SetOperation.Difference, [key1, key2], [1, 2])); + Assert.Equal("ZDIFF cannot be used with weights or aggregation.", ex.Message); + ex = Assert.Throws(() => db.SortedSetCombineWithScores(SetOperation.Difference, [key1, key2], [1, 2])); + Assert.Equal("ZDIFF cannot be used with weights or aggregation.", ex.Message); + ex = Assert.Throws(() => db.SortedSetCombineAndStore(SetOperation.Difference, destination, [key1, key2], [1, 2])); + Assert.Equal("ZDIFFSTORE cannot be used with weights or aggregation.", ex.Message); + // and Async... + ex = await Assert.ThrowsAsync(() => db.SortedSetCombineAsync(SetOperation.Difference, [key1, key2], [1, 2])); + Assert.Equal("ZDIFF cannot be used with weights or aggregation.", ex.Message); + ex = await Assert.ThrowsAsync(() => db.SortedSetCombineWithScoresAsync(SetOperation.Difference, [key1, key2], [1, 2])); + Assert.Equal("ZDIFF cannot be used with weights or aggregation.", ex.Message); + ex = await Assert.ThrowsAsync(() => db.SortedSetCombineAndStoreAsync(SetOperation.Difference, destination, [key1, key2], [1, 2])); + Assert.Equal("ZDIFFSTORE cannot be used with weights or aggregation.", ex.Message); + + // ZDIFF can't be used with aggregation + ex = Assert.Throws(() => db.SortedSetCombine(SetOperation.Difference, [key1, key2], aggregate: Aggregate.Max)); + Assert.Equal("ZDIFF cannot be used with weights or aggregation.", ex.Message); + ex = Assert.Throws(() => db.SortedSetCombineWithScores(SetOperation.Difference, [key1, key2], aggregate: Aggregate.Max)); + Assert.Equal("ZDIFF cannot be used with weights or aggregation.", ex.Message); + ex = Assert.Throws(() => db.SortedSetCombineAndStore(SetOperation.Difference, destination, [key1, key2], aggregate: Aggregate.Max)); + Assert.Equal("ZDIFFSTORE cannot be used with weights or aggregation.", ex.Message); + // and Async... + ex = await Assert.ThrowsAsync(() => db.SortedSetCombineAsync(SetOperation.Difference, [key1, key2], aggregate: Aggregate.Max)); + Assert.Equal("ZDIFF cannot be used with weights or aggregation.", ex.Message); + ex = await Assert.ThrowsAsync(() => db.SortedSetCombineWithScoresAsync(SetOperation.Difference, [key1, key2], aggregate: Aggregate.Max)); + Assert.Equal("ZDIFF cannot be used with weights or aggregation.", ex.Message); + ex = await Assert.ThrowsAsync(() => db.SortedSetCombineAndStoreAsync(SetOperation.Difference, destination, [key1, key2], aggregate: Aggregate.Max)); + Assert.Equal("ZDIFFSTORE cannot be used with weights or aggregation.", ex.Message); + + // Too many weights + ex = Assert.Throws(() => db.SortedSetCombine(SetOperation.Union, [key1, key2], [1, 2, 3])); + Assert.StartsWith("Keys and weights should have the same number of elements.", ex.Message); + ex = Assert.Throws(() => db.SortedSetCombineWithScores(SetOperation.Union, [key1, key2], [1, 2, 3])); + Assert.StartsWith("Keys and weights should have the same number of elements.", ex.Message); + ex = Assert.Throws(() => db.SortedSetCombineAndStore(SetOperation.Union, destination, [key1, key2], [1, 2, 3])); + Assert.StartsWith("Keys and weights should have the same number of elements.", ex.Message); + // and Async... + ex = await Assert.ThrowsAsync(() => db.SortedSetCombineAsync(SetOperation.Union, [key1, key2], [1, 2, 3])); + Assert.StartsWith("Keys and weights should have the same number of elements.", ex.Message); + ex = await Assert.ThrowsAsync(() => db.SortedSetCombineWithScoresAsync(SetOperation.Union, [key1, key2], [1, 2, 3])); + Assert.StartsWith("Keys and weights should have the same number of elements.", ex.Message); + ex = await Assert.ThrowsAsync(() => db.SortedSetCombineAndStoreAsync(SetOperation.Union, destination, [key1, key2], [1, 2, 3])); + Assert.StartsWith("Keys and weights should have the same number of elements.", ex.Message); + } + + [Fact] + public async Task SortedSetIntersectionLength() + { + await using var conn = Create(require: RedisFeatures.v7_0_0_rc1); + + var db = conn.GetDatabase(); + var key1 = Me(); + db.KeyDelete(key1, CommandFlags.FireAndForget); + var key2 = Me() + "2"; + db.KeyDelete(key2, CommandFlags.FireAndForget); + + db.SortedSetAdd(key1, entries); + db.SortedSetAdd(key2, entriesPow3); + + var inter = db.SortedSetIntersectionLength([key1, key2]); + Assert.Equal(5, inter); + + // with limit + inter = db.SortedSetIntersectionLength([key1, key2], 3); + Assert.Equal(3, inter); + } + + [Fact] + public async Task SortedSetIntersectionLengthAsync() + { + await using var conn = Create(require: RedisFeatures.v7_0_0_rc1); + + var db = conn.GetDatabase(); + var key1 = Me(); + db.KeyDelete(key1, CommandFlags.FireAndForget); + var key2 = Me() + "2"; + db.KeyDelete(key2, CommandFlags.FireAndForget); + + db.SortedSetAdd(key1, entries); + db.SortedSetAdd(key2, entriesPow3); + + var inter = await db.SortedSetIntersectionLengthAsync([key1, key2]); + Assert.Equal(5, inter); + + // with limit + inter = await db.SortedSetIntersectionLengthAsync([key1, key2], 3); + Assert.Equal(3, inter); + } + + [Fact] + public async Task SortedSetRangeViaScript() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + var db = conn.GetDatabase(); + var key = Me(); + + db.KeyDelete(key, CommandFlags.FireAndForget); + db.SortedSetAdd(key, entries, CommandFlags.FireAndForget); + + var result = db.ScriptEvaluate(script: "return redis.call('ZRANGE', KEYS[1], 0, -1, 'WITHSCORES')", keys: [key]); + AssertFlatArrayEntries(result); + } + + [Fact] + public async Task SortedSetRangeViaExecute() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + var db = conn.GetDatabase(); + var key = Me(); + + db.KeyDelete(key, CommandFlags.FireAndForget); + db.SortedSetAdd(key, entries, CommandFlags.FireAndForget); + + var result = db.Execute("ZRANGE", [key, 0, -1, "WITHSCORES"]); + + if (TestContext.Current.IsResp3()) + { + AssertJaggedArrayEntries(result); + } + else + { + AssertFlatArrayEntries(result); + } + } + + private void AssertFlatArrayEntries(RedisResult result) + { + Assert.Equal(ResultType.Array, result.Resp2Type); + Assert.Equal(entries.Length * 2, (int)result.Length); + int index = 0; + foreach (var entry in entries) + { + var e = result[index++]; + Assert.Equal(ResultType.BulkString, e.Resp2Type); + Assert.Equal(entry.Element, e.AsRedisValue()); + + e = result[index++]; + Assert.Equal(ResultType.BulkString, e.Resp2Type); + Assert.Equal(entry.Score, e.AsDouble()); + } + } + + private void AssertJaggedArrayEntries(RedisResult result) + { + Assert.Equal(ResultType.Array, result.Resp2Type); + Assert.Equal(entries.Length, (int)result.Length); + int index = 0; + foreach (var entry in entries) + { + var arr = result[index++]; + Assert.Equal(ResultType.Array, arr.Resp2Type); + Assert.Equal(2, arr.Length); + + var e = arr[0]; + Assert.Equal(ResultType.BulkString, e.Resp2Type); + Assert.Equal(entry.Element, e.AsRedisValue()); + + e = arr[1]; + Assert.Equal(ResultType.SimpleString, e.Resp2Type); + Assert.Equal(ResultType.Double, e.Resp3Type); + Assert.Equal(entry.Score, e.AsDouble()); + } + } + + [Fact] + public async Task SortedSetPopMulti_Multi() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + + db.KeyDelete(key, CommandFlags.FireAndForget); + db.SortedSetAdd(key, entries, CommandFlags.FireAndForget); + + var first = db.SortedSetPop(key, Order.Ascending); + Assert.True(first.HasValue); + Assert.Equal(entries[0], first.Value); + Assert.Equal(9, db.SortedSetLength(key)); + + var lasts = db.SortedSetPop(key, 2, Order.Descending); + Assert.Equal(2, lasts.Length); + Assert.Equal(entries[9], lasts[0]); + Assert.Equal(entries[8], lasts[1]); + Assert.Equal(7, db.SortedSetLength(key)); + } + + [Fact] + public async Task SortedSetPopMulti_Single() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + + db.KeyDelete(key, CommandFlags.FireAndForget); + db.SortedSetAdd(key, entries, CommandFlags.FireAndForget); + + var last = db.SortedSetPop(key, Order.Descending); + Assert.True(last.HasValue); + Assert.Equal(entries[9], last.Value); + Assert.Equal(9, db.SortedSetLength(key)); + + var firsts = db.SortedSetPop(key, 1, Order.Ascending); + Assert.Single(firsts); + Assert.Equal(entries[0], firsts[0]); + Assert.Equal(8, db.SortedSetLength(key)); + } + + [Fact] + public async Task SortedSetPopMulti_Multi_Async() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + + db.KeyDelete(key, CommandFlags.FireAndForget); + db.SortedSetAdd(key, entries, CommandFlags.FireAndForget); + + var last = await db.SortedSetPopAsync(key, Order.Descending).ForAwait(); + Assert.True(last.HasValue); + Assert.Equal(entries[9], last.Value); + Assert.Equal(9, db.SortedSetLength(key)); + + var moreLasts = await db.SortedSetPopAsync(key, 2, Order.Descending).ForAwait(); + Assert.Equal(2, moreLasts.Length); + Assert.Equal(entries[8], moreLasts[0]); + Assert.Equal(entries[7], moreLasts[1]); + Assert.Equal(7, db.SortedSetLength(key)); + } + + [Fact] + public async Task SortedSetPopMulti_Single_Async() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + + db.KeyDelete(key, CommandFlags.FireAndForget); + db.SortedSetAdd(key, entries, CommandFlags.FireAndForget); + + var first = await db.SortedSetPopAsync(key).ForAwait(); + Assert.True(first.HasValue); + Assert.Equal(entries[0], first.Value); + Assert.Equal(9, db.SortedSetLength(key)); + + var moreFirsts = await db.SortedSetPopAsync(key, 1).ForAwait(); + Assert.Single(moreFirsts); + Assert.Equal(entries[1], moreFirsts[0]); + Assert.Equal(8, db.SortedSetLength(key)); + } + + [Fact] + public async Task SortedSetPopMulti_Zero_Async() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + + db.KeyDelete(key, CommandFlags.FireAndForget); + db.SortedSetAdd(key, entries, CommandFlags.FireAndForget); + + var t = db.SortedSetPopAsync(key, count: 0); + Assert.True(t.IsCompleted); // sync + var arr = await t; + Assert.NotNull(arr); + Assert.Empty(arr); + + Assert.Equal(10, db.SortedSetLength(key)); + } + + [Fact] + public async Task SortedSetRandomMembers() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var key = Me(); + var key0 = Me() + "non-existing"; + + db.KeyDelete(key, CommandFlags.FireAndForget); + db.KeyDelete(key0, CommandFlags.FireAndForget); + db.SortedSetAdd(key, entries, CommandFlags.FireAndForget); + + // single member + var randMember = db.SortedSetRandomMember(key); + Assert.True(Array.Exists(entries, element => element.Element.Equals(randMember))); + + // with count + var randMemberArray = db.SortedSetRandomMembers(key, 5); + Assert.Equal(5, randMemberArray.Length); + randMemberArray = db.SortedSetRandomMembers(key, 15); + Assert.Equal(10, randMemberArray.Length); + randMemberArray = db.SortedSetRandomMembers(key, -5); + Assert.Equal(5, randMemberArray.Length); + randMemberArray = db.SortedSetRandomMembers(key, -15); + Assert.Equal(15, randMemberArray.Length); + + // with scores + var randMemberArray2 = db.SortedSetRandomMembersWithScores(key, 2); + Assert.Equal(2, randMemberArray2.Length); + foreach (var member in randMemberArray2) + { + Assert.Contains(member, entries); + } + + // check missing key case + randMember = db.SortedSetRandomMember(key0); + Assert.True(randMember.IsNull); + randMemberArray = db.SortedSetRandomMembers(key0, 2); + Assert.True(randMemberArray.Length == 0); + randMemberArray2 = db.SortedSetRandomMembersWithScores(key0, 2); + Assert.True(randMemberArray2.Length == 0); + } + + [Fact] + public async Task SortedSetRandomMembersAsync() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var key = Me(); + var key0 = Me() + "non-existing"; + + db.KeyDelete(key, CommandFlags.FireAndForget); + db.KeyDelete(key0, CommandFlags.FireAndForget); + db.SortedSetAdd(key, entries, CommandFlags.FireAndForget); + + var randMember = await db.SortedSetRandomMemberAsync(key); + Assert.True(Array.Exists(entries, element => element.Element.Equals(randMember))); + + // with count + var randMemberArray = await db.SortedSetRandomMembersAsync(key, 5); + Assert.Equal(5, randMemberArray.Length); + randMemberArray = await db.SortedSetRandomMembersAsync(key, 15); + Assert.Equal(10, randMemberArray.Length); + randMemberArray = await db.SortedSetRandomMembersAsync(key, -5); + Assert.Equal(5, randMemberArray.Length); + randMemberArray = await db.SortedSetRandomMembersAsync(key, -15); + Assert.Equal(15, randMemberArray.Length); + + // with scores + var randMemberArray2 = await db.SortedSetRandomMembersWithScoresAsync(key, 2); + Assert.Equal(2, randMemberArray2.Length); + foreach (var member in randMemberArray2) + { + Assert.Contains(member, entries); + } + + // check missing key case + randMember = await db.SortedSetRandomMemberAsync(key0); + Assert.True(randMember.IsNull); + randMemberArray = await db.SortedSetRandomMembersAsync(key0, 2); + Assert.True(randMemberArray.Length == 0); + randMemberArray2 = await db.SortedSetRandomMembersWithScoresAsync(key0, 2); + Assert.True(randMemberArray2.Length == 0); + } + + [Fact] + public async Task SortedSetRangeStoreByRankAsync() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var me = Me(); + var sourceKey = $"{me}:ZSetSource"; + var destinationKey = $"{me}:ZSetDestination"; + + db.KeyDelete([sourceKey, destinationKey], CommandFlags.FireAndForget); + await db.SortedSetAddAsync(sourceKey, entries, CommandFlags.FireAndForget); + var res = await db.SortedSetRangeAndStoreAsync(sourceKey, destinationKey, 0, -1); + Assert.Equal(entries.Length, res); + } + + [Fact] + public async Task SortedSetRangeStoreByRankLimitedAsync() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var me = Me(); + var sourceKey = $"{me}:ZSetSource"; + var destinationKey = $"{me}:ZSetDestination"; + + db.KeyDelete([sourceKey, destinationKey], CommandFlags.FireAndForget); + await db.SortedSetAddAsync(sourceKey, entries, CommandFlags.FireAndForget); + var res = await db.SortedSetRangeAndStoreAsync(sourceKey, destinationKey, 1, 4); + var range = await db.SortedSetRangeByRankWithScoresAsync(destinationKey); + Assert.Equal(4, res); + for (var i = 1; i < 5; i++) + { + Assert.Equal(entries[i], range[i - 1]); + } + } + + [Fact] + public async Task SortedSetRangeStoreByScoreAsync() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var me = Me(); + var sourceKey = $"{me}:ZSetSource"; + var destinationKey = $"{me}:ZSetDestination"; + + db.KeyDelete([sourceKey, destinationKey], CommandFlags.FireAndForget); + await db.SortedSetAddAsync(sourceKey, entriesPow2, CommandFlags.FireAndForget); + var res = await db.SortedSetRangeAndStoreAsync(sourceKey, destinationKey, 64, 128, SortedSetOrder.ByScore); + var range = await db.SortedSetRangeByRankWithScoresAsync(destinationKey); + Assert.Equal(2, res); + for (var i = 6; i < 8; i++) + { + Assert.Equal(entriesPow2[i], range[i - 6]); + } + } + + [Fact] + public async Task SortedSetRangeStoreByScoreAsyncDefault() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var me = Me(); + var sourceKey = $"{me}:ZSetSource"; + var destinationKey = $"{me}:ZSetDestination"; + + db.KeyDelete([sourceKey, destinationKey], CommandFlags.FireAndForget); + await db.SortedSetAddAsync(sourceKey, entriesPow2, CommandFlags.FireAndForget); + var res = await db.SortedSetRangeAndStoreAsync(sourceKey, destinationKey, double.NegativeInfinity, double.PositiveInfinity, SortedSetOrder.ByScore); + var range = await db.SortedSetRangeByRankWithScoresAsync(destinationKey); + Assert.Equal(10, res); + for (var i = 0; i < entriesPow2.Length; i++) + { + Assert.Equal(entriesPow2[i], range[i]); + } + } + + [Fact] + public async Task SortedSetRangeStoreByScoreAsyncLimited() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var me = Me(); + var sourceKey = $"{me}:ZSetSource"; + var destinationKey = $"{me}:ZSetDestination"; + + db.KeyDelete([sourceKey, destinationKey], CommandFlags.FireAndForget); + await db.SortedSetAddAsync(sourceKey, entriesPow2, CommandFlags.FireAndForget); + var res = await db.SortedSetRangeAndStoreAsync(sourceKey, destinationKey, double.NegativeInfinity, double.PositiveInfinity, SortedSetOrder.ByScore, skip: 1, take: 6); + var range = await db.SortedSetRangeByRankWithScoresAsync(destinationKey); + Assert.Equal(6, res); + for (var i = 1; i < 7; i++) + { + Assert.Equal(entriesPow2[i], range[i - 1]); + } + } + + [Fact] + public async Task SortedSetRangeStoreByScoreAsyncExclusiveRange() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var me = Me(); + var sourceKey = $"{me}:ZSetSource"; + var destinationKey = $"{me}:ZSetDestination"; + + db.KeyDelete([sourceKey, destinationKey], CommandFlags.FireAndForget); + await db.SortedSetAddAsync(sourceKey, entriesPow2, CommandFlags.FireAndForget); + var res = await db.SortedSetRangeAndStoreAsync(sourceKey, destinationKey, 32, 256, SortedSetOrder.ByScore, exclude: Exclude.Both); + var range = await db.SortedSetRangeByRankWithScoresAsync(destinationKey); + Assert.Equal(2, res); + for (var i = 6; i < 8; i++) + { + Assert.Equal(entriesPow2[i], range[i - 6]); + } + } + + [Fact] + public async Task SortedSetRangeStoreByScoreAsyncReverse() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var me = Me(); + var sourceKey = $"{me}:ZSetSource"; + var destinationKey = $"{me}:ZSetDestination"; + + db.KeyDelete([sourceKey, destinationKey], CommandFlags.FireAndForget); + await db.SortedSetAddAsync(sourceKey, entriesPow2, CommandFlags.FireAndForget); + var res = await db.SortedSetRangeAndStoreAsync(sourceKey, destinationKey, start: double.PositiveInfinity, double.NegativeInfinity, SortedSetOrder.ByScore, order: Order.Descending); + var range = await db.SortedSetRangeByRankWithScoresAsync(destinationKey); + Assert.Equal(10, res); + for (var i = 0; i < entriesPow2.Length; i++) + { + Assert.Equal(entriesPow2[i], range[i]); + } + } + + [Fact] + public async Task SortedSetRangeStoreByLexAsync() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var me = Me(); + var sourceKey = $"{me}:ZSetSource"; + var destinationKey = $"{me}:ZSetDestination"; + + db.KeyDelete([sourceKey, destinationKey], CommandFlags.FireAndForget); + await db.SortedSetAddAsync(sourceKey, lexEntries, CommandFlags.FireAndForget); + var res = await db.SortedSetRangeAndStoreAsync(sourceKey, destinationKey, "a", "j", SortedSetOrder.ByLex); + var range = await db.SortedSetRangeByRankWithScoresAsync(destinationKey); + Assert.Equal(10, res); + for (var i = 0; i < lexEntries.Length; i++) + { + Assert.Equal(lexEntries[i], range[i]); + } + } + + [Fact] + public async Task SortedSetRangeStoreByLexExclusiveRangeAsync() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var me = Me(); + var sourceKey = $"{me}:ZSetSource"; + var destinationKey = $"{me}:ZSetDestination"; + + db.KeyDelete([sourceKey, destinationKey], CommandFlags.FireAndForget); + await db.SortedSetAddAsync(sourceKey, lexEntries, CommandFlags.FireAndForget); + var res = await db.SortedSetRangeAndStoreAsync(sourceKey, destinationKey, "a", "j", SortedSetOrder.ByLex, Exclude.Both); + var range = await db.SortedSetRangeByRankWithScoresAsync(destinationKey); + Assert.Equal(8, res); + for (var i = 1; i < lexEntries.Length - 1; i++) + { + Assert.Equal(lexEntries[i], range[i - 1]); + } + } + + [Fact] + public async Task SortedSetRangeStoreByLexRevRangeAsync() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var me = Me(); + var sourceKey = $"{me}:ZSetSource"; + var destinationKey = $"{me}:ZSetDestination"; + + db.KeyDelete([sourceKey, destinationKey], CommandFlags.FireAndForget); + await db.SortedSetAddAsync(sourceKey, lexEntries, CommandFlags.FireAndForget); + var res = await db.SortedSetRangeAndStoreAsync(sourceKey, destinationKey, "j", "a", SortedSetOrder.ByLex, exclude: Exclude.None, order: Order.Descending); + var range = await db.SortedSetRangeByRankWithScoresAsync(destinationKey); + Assert.Equal(10, res); + for (var i = 0; i < lexEntries.Length; i++) + { + Assert.Equal(lexEntries[i], range[i]); + } + } + + [Fact] + public async Task SortedSetRangeStoreByRank() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var me = Me(); + var sourceKey = $"{me}:ZSetSource"; + var destinationKey = $"{me}:ZSetDestination"; + + db.KeyDelete([sourceKey, destinationKey], CommandFlags.FireAndForget); + db.SortedSetAdd(sourceKey, entries, CommandFlags.FireAndForget); + var res = db.SortedSetRangeAndStore(sourceKey, destinationKey, 0, -1); + Assert.Equal(entries.Length, res); + } + + [Fact] + public async Task SortedSetRangeStoreByRankLimited() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var me = Me(); + var sourceKey = $"{me}:ZSetSource"; + var destinationKey = $"{me}:ZSetDestination"; + + db.KeyDelete([sourceKey, destinationKey], CommandFlags.FireAndForget); + db.SortedSetAdd(sourceKey, entries, CommandFlags.FireAndForget); + var res = db.SortedSetRangeAndStore(sourceKey, destinationKey, 1, 4); + var range = db.SortedSetRangeByRankWithScores(destinationKey); + Assert.Equal(4, res); + for (var i = 1; i < 5; i++) + { + Assert.Equal(entries[i], range[i - 1]); + } + } + + [Fact] + public async Task SortedSetRangeStoreByScore() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var me = Me(); + var sourceKey = $"{me}:ZSetSource"; + var destinationKey = $"{me}:ZSetDestination"; + + db.KeyDelete([sourceKey, destinationKey], CommandFlags.FireAndForget); + db.SortedSetAdd(sourceKey, entriesPow2, CommandFlags.FireAndForget); + var res = db.SortedSetRangeAndStore(sourceKey, destinationKey, 64, 128, SortedSetOrder.ByScore); + var range = db.SortedSetRangeByRankWithScores(destinationKey); + Assert.Equal(2, res); + for (var i = 6; i < 8; i++) + { + Assert.Equal(entriesPow2[i], range[i - 6]); + } + } + + [Fact] + public async Task SortedSetRangeStoreByScoreDefault() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var me = Me(); + var sourceKey = $"{me}:ZSetSource"; + var destinationKey = $"{me}:ZSetDestination"; + + db.KeyDelete([sourceKey, destinationKey], CommandFlags.FireAndForget); + db.SortedSetAdd(sourceKey, entriesPow2, CommandFlags.FireAndForget); + var res = db.SortedSetRangeAndStore(sourceKey, destinationKey, double.NegativeInfinity, double.PositiveInfinity, SortedSetOrder.ByScore); + var range = db.SortedSetRangeByRankWithScores(destinationKey); + Assert.Equal(10, res); + for (var i = 0; i < entriesPow2.Length; i++) + { + Assert.Equal(entriesPow2[i], range[i]); + } + } + + [Fact] + public async Task SortedSetRangeStoreByScoreLimited() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var me = Me(); + var sourceKey = $"{me}:ZSetSource"; + var destinationKey = $"{me}:ZSetDestination"; + + db.KeyDelete([sourceKey, destinationKey], CommandFlags.FireAndForget); + db.SortedSetAdd(sourceKey, entriesPow2, CommandFlags.FireAndForget); + var res = db.SortedSetRangeAndStore(sourceKey, destinationKey, double.NegativeInfinity, double.PositiveInfinity, SortedSetOrder.ByScore, skip: 1, take: 6); + var range = db.SortedSetRangeByRankWithScores(destinationKey); + Assert.Equal(6, res); + for (var i = 1; i < 7; i++) + { + Assert.Equal(entriesPow2[i], range[i - 1]); + } + } + + [Fact] + public async Task SortedSetRangeStoreByScoreExclusiveRange() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var me = Me(); + var sourceKey = $"{me}:ZSetSource"; + var destinationKey = $"{me}:ZSetDestination"; + + db.KeyDelete([sourceKey, destinationKey], CommandFlags.FireAndForget); + db.SortedSetAdd(sourceKey, entriesPow2, CommandFlags.FireAndForget); + var res = db.SortedSetRangeAndStore(sourceKey, destinationKey, 32, 256, SortedSetOrder.ByScore, exclude: Exclude.Both); + var range = db.SortedSetRangeByRankWithScores(destinationKey); + Assert.Equal(2, res); + for (var i = 6; i < 8; i++) + { + Assert.Equal(entriesPow2[i], range[i - 6]); + } + } + + [Fact] + public async Task SortedSetRangeStoreByScoreReverse() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var me = Me(); + var sourceKey = $"{me}:ZSetSource"; + var destinationKey = $"{me}:ZSetDestination"; + + db.KeyDelete([sourceKey, destinationKey], CommandFlags.FireAndForget); + db.SortedSetAdd(sourceKey, entriesPow2, CommandFlags.FireAndForget); + var res = db.SortedSetRangeAndStore(sourceKey, destinationKey, start: double.PositiveInfinity, double.NegativeInfinity, SortedSetOrder.ByScore, order: Order.Descending); + var range = db.SortedSetRangeByRankWithScores(destinationKey); + Assert.Equal(10, res); + for (var i = 0; i < entriesPow2.Length; i++) + { + Assert.Equal(entriesPow2[i], range[i]); + } + } + + [Fact] + public async Task SortedSetRangeStoreByLex() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var me = Me(); + var sourceKey = $"{me}:ZSetSource"; + var destinationKey = $"{me}:ZSetDestination"; + + db.KeyDelete([sourceKey, destinationKey], CommandFlags.FireAndForget); + db.SortedSetAdd(sourceKey, lexEntries, CommandFlags.FireAndForget); + var res = db.SortedSetRangeAndStore(sourceKey, destinationKey, "a", "j", SortedSetOrder.ByLex); + var range = db.SortedSetRangeByRankWithScores(destinationKey); + Assert.Equal(10, res); + for (var i = 0; i < lexEntries.Length; i++) + { + Assert.Equal(lexEntries[i], range[i]); + } + } + + [Fact] + public async Task SortedSetRangeStoreByLexExclusiveRange() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var me = Me(); + var sourceKey = $"{me}:ZSetSource"; + var destinationKey = $"{me}:ZSetDestination"; + + db.KeyDelete([sourceKey, destinationKey], CommandFlags.FireAndForget); + db.SortedSetAdd(sourceKey, lexEntries, CommandFlags.FireAndForget); + var res = db.SortedSetRangeAndStore(sourceKey, destinationKey, "a", "j", SortedSetOrder.ByLex, Exclude.Both); + var range = db.SortedSetRangeByRankWithScores(destinationKey); + Assert.Equal(8, res); + for (var i = 1; i < lexEntries.Length - 1; i++) + { + Assert.Equal(lexEntries[i], range[i - 1]); + } + } + + [Fact] + public async Task SortedSetRangeStoreByLexRevRange() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var me = Me(); + var sourceKey = $"{me}:ZSetSource"; + var destinationKey = $"{me}:ZSetDestination"; + + db.KeyDelete([sourceKey, destinationKey], CommandFlags.FireAndForget); + db.SortedSetAdd(sourceKey, lexEntries, CommandFlags.FireAndForget); + var res = db.SortedSetRangeAndStore(sourceKey, destinationKey, "j", "a", SortedSetOrder.ByLex, Exclude.None, Order.Descending); + var range = db.SortedSetRangeByRankWithScores(destinationKey); + Assert.Equal(10, res); + for (var i = 0; i < lexEntries.Length; i++) + { + Assert.Equal(lexEntries[i], range[i]); + } + } + + [Fact] + public async Task SortedSetRangeStoreFailErroneousTake() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var me = Me(); + var sourceKey = $"{me}:ZSetSource"; + var destinationKey = $"{me}:ZSetDestination"; + + db.KeyDelete([sourceKey, destinationKey], CommandFlags.FireAndForget); + db.SortedSetAdd(sourceKey, lexEntries, CommandFlags.FireAndForget); + var exception = Assert.Throws(() => db.SortedSetRangeAndStore(sourceKey, destinationKey, 0, -1, take: 5)); + Assert.Equal("take", exception.ParamName); + } + + [Fact] + public async Task SortedSetRangeStoreFailExclude() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var me = Me(); + var sourceKey = $"{me}:ZSetSource"; + var destinationKey = $"{me}:ZSetDestination"; + + db.KeyDelete([sourceKey, destinationKey], CommandFlags.FireAndForget); + db.SortedSetAdd(sourceKey, lexEntries, CommandFlags.FireAndForget); + var exception = Assert.Throws(() => db.SortedSetRangeAndStore(sourceKey, destinationKey, 0, -1, exclude: Exclude.Both)); + Assert.Equal("exclude", exception.ParamName); + } + + [Fact] + public async Task SortedSetMultiPopSingleKey() + { + await using var conn = Create(require: RedisFeatures.v7_0_0_rc1); + + var db = conn.GetDatabase(); + var key = Me(); + db.KeyDelete(key); + + db.SortedSetAdd( + key, + [ + new SortedSetEntry("rays", 100), + new SortedSetEntry("yankees", 92), + new SortedSetEntry("red sox", 92), + new SortedSetEntry("blue jays", 91), + new SortedSetEntry("orioles", 52), + ]); + + var highest = db.SortedSetPop([key], 1, order: Order.Descending); + Assert.False(highest.IsNull); + Assert.Equal(key, highest.Key); + var entry = Assert.Single(highest.Entries); + Assert.Equal("rays", entry.Element); + Assert.Equal(100, entry.Score); + + var bottom2 = db.SortedSetPop([key], 2); + Assert.False(bottom2.IsNull); + Assert.Equal(key, bottom2.Key); + Assert.Equal(2, bottom2.Entries.Length); + Assert.Equal("orioles", bottom2.Entries[0].Element); + Assert.Equal(52, bottom2.Entries[0].Score); + Assert.Equal("blue jays", bottom2.Entries[1].Element); + Assert.Equal(91, bottom2.Entries[1].Score); + } + + [Fact] + public async Task SortedSetMultiPopMultiKey() + { + await using var conn = Create(require: RedisFeatures.v7_0_0_rc1); + + var db = conn.GetDatabase(); + var key = Me(); + db.KeyDelete(key); + + db.SortedSetAdd( + key, + [ + new SortedSetEntry("rays", 100), + new SortedSetEntry("yankees", 92), + new SortedSetEntry("red sox", 92), + new SortedSetEntry("blue jays", 91), + new SortedSetEntry("orioles", 52), + ]); + + var highest = db.SortedSetPop(["not a real key", key, "yet another not a real key"], 1, order: Order.Descending); + Assert.False(highest.IsNull); + Assert.Equal(key, highest.Key); + var entry = Assert.Single(highest.Entries); + Assert.Equal("rays", entry.Element); + Assert.Equal(100, entry.Score); + + var bottom2 = db.SortedSetPop(["not a real key", key, "yet another not a real key"], 2); + Assert.False(bottom2.IsNull); + Assert.Equal(key, bottom2.Key); + Assert.Equal(2, bottom2.Entries.Length); + Assert.Equal("orioles", bottom2.Entries[0].Element); + Assert.Equal(52, bottom2.Entries[0].Score); + Assert.Equal("blue jays", bottom2.Entries[1].Element); + Assert.Equal(91, bottom2.Entries[1].Score); + } + + [Fact] + public async Task SortedSetMultiPopNoSet() + { + await using var conn = Create(require: RedisFeatures.v7_0_0_rc1); + + var db = conn.GetDatabase(); + var key = Me(); + db.KeyDelete(key); + var res = db.SortedSetPop([key], 1); + Assert.True(res.IsNull); + } + + [Fact] + public async Task SortedSetMultiPopCount0() + { + await using var conn = Create(require: RedisFeatures.v7_0_0_rc1); + + var db = conn.GetDatabase(); + var key = Me(); + db.KeyDelete(key); + var exception = Assert.Throws(() => db.SortedSetPop([key], 0)); + Assert.Contains("ERR count should be greater than 0", exception.Message); + } + + [Fact] + public async Task SortedSetMultiPopAsync() + { + await using var conn = Create(require: RedisFeatures.v7_0_0_rc1); + + var db = conn.GetDatabase(); + var key = Me(); + db.KeyDelete(key); + + db.SortedSetAdd( + key, + [ + new SortedSetEntry("rays", 100), + new SortedSetEntry("yankees", 92), + new SortedSetEntry("red sox", 92), + new SortedSetEntry("blue jays", 91), + new SortedSetEntry("orioles", 52), + ]); + + var highest = await db.SortedSetPopAsync( + ["not a real key", key, "yet another not a real key"], 1, order: Order.Descending); + Assert.False(highest.IsNull); + Assert.Equal(key, highest.Key); + var entry = Assert.Single(highest.Entries); + Assert.Equal("rays", entry.Element); + Assert.Equal(100, entry.Score); + + var bottom2 = await db.SortedSetPopAsync(["not a real key", key, "yet another not a real key"], 2); + Assert.False(bottom2.IsNull); + Assert.Equal(key, bottom2.Key); + Assert.Equal(2, bottom2.Entries.Length); + Assert.Equal("orioles", bottom2.Entries[0].Element); + Assert.Equal(52, bottom2.Entries[0].Score); + Assert.Equal("blue jays", bottom2.Entries[1].Element); + Assert.Equal(91, bottom2.Entries[1].Score); + } + + [Fact] + public async Task SortedSetMultiPopEmptyKeys() + { + await using var conn = Create(require: RedisFeatures.v7_0_0_rc1); + + var db = conn.GetDatabase(); + var exception = Assert.Throws(() => db.SortedSetPop(Array.Empty(), 5)); + Assert.Contains("keys must have a size of at least 1", exception.Message); + } + + [Fact] + public async Task SortedSetRangeStoreFailForReplica() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var me = Me(); + var sourceKey = $"{me}:ZSetSource"; + var destinationKey = $"{me}:ZSetDestination"; + + db.KeyDelete([sourceKey, destinationKey], CommandFlags.FireAndForget); + db.SortedSetAdd(sourceKey, lexEntries, CommandFlags.FireAndForget); + var exception = Assert.Throws(() => db.SortedSetRangeAndStore(sourceKey, destinationKey, 0, -1, flags: CommandFlags.DemandReplica)); + Assert.Contains("Command cannot be issued to a replica", exception.Message); + } + + [Fact] + public async Task SortedSetScoresSingle() + { + await using var conn = Create(require: RedisFeatures.v2_1_0); + + var db = conn.GetDatabase(); + var key = Me(); + const string memberName = "member"; + + db.KeyDelete(key); + db.SortedSetAdd(key, memberName, 1.5); + + var score = db.SortedSetScore(key, memberName); + + Assert.NotNull(score); + Assert.Equal((double)1.5, score); + } + + [Fact] + public async Task SortedSetScoresSingleAsync() + { + await using var conn = Create(require: RedisFeatures.v2_1_0); + + var db = conn.GetDatabase(); + var key = Me(); + const string memberName = "member"; + + await db.KeyDeleteAsync(key); + await db.SortedSetAddAsync(key, memberName, 1.5); + + var score = await db.SortedSetScoreAsync(key, memberName); + + Assert.NotNull(score); + Assert.Equal((double)1.5, score.Value); + } + + [Fact] + public async Task SortedSetScoresSingle_MissingSetStillReturnsNull() + { + await using var conn = Create(require: RedisFeatures.v2_1_0); + + var db = conn.GetDatabase(); + var key = Me(); + + db.KeyDelete(key); + + // Attempt to retrieve score for a missing set, should still return null. + var score = db.SortedSetScore(key, "bogusMemberName"); + + Assert.Null(score); + } + + [Fact] + public async Task SortedSetScoresSingle_MissingSetStillReturnsNullAsync() + { + await using var conn = Create(require: RedisFeatures.v2_1_0); + + var db = conn.GetDatabase(); + var key = Me(); + + await db.KeyDeleteAsync(key); + + // Attempt to retrieve score for a missing set, should still return null. + var score = await db.SortedSetScoreAsync(key, "bogusMemberName"); + + Assert.Null(score); + } + + [Fact] + public async Task SortedSetScoresSingle_ReturnsNullForMissingMember() + { + await using var conn = Create(require: RedisFeatures.v2_1_0); + + var db = conn.GetDatabase(); + var key = Me(); + + db.KeyDelete(key); + db.SortedSetAdd(key, "member1", 1.5); + + // Attempt to retrieve score for a missing member, should return null. + var score = db.SortedSetScore(key, "bogusMemberName"); + + Assert.Null(score); + } + + [Fact] + public async Task SortedSetScoresSingle_ReturnsNullForMissingMemberAsync() + { + await using var conn = Create(require: RedisFeatures.v2_1_0); + + var db = conn.GetDatabase(); + var key = Me(); + + await db.KeyDeleteAsync(key); + await db.SortedSetAddAsync(key, "member1", 1.5); + + // Attempt to retrieve score for a missing member, should return null. + var score = await db.SortedSetScoreAsync(key, "bogusMemberName"); + + Assert.Null(score); + } + + [Fact] + public async Task SortedSetScoresMultiple() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var key = Me(); + const string member1 = "member1", + member2 = "member2", + member3 = "member3"; + + db.KeyDelete(key); + db.SortedSetAdd(key, member1, 1.5); + db.SortedSetAdd(key, member2, 1.75); + db.SortedSetAdd(key, member3, 2); + + var scores = db.SortedSetScores(key, [member1, member2, member3]); + + Assert.NotNull(scores); + Assert.Equal(3, scores.Length); + Assert.Equal((double)1.5, scores[0]); + Assert.Equal((double)1.75, scores[1]); + Assert.Equal(2, scores[2]); + } + + [Fact] + public async Task SortedSetScoresMultipleAsync() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var key = Me(); + const string member1 = "member1", + member2 = "member2", + member3 = "member3"; + + await db.KeyDeleteAsync(key); + await db.SortedSetAddAsync(key, member1, 1.5); + await db.SortedSetAddAsync(key, member2, 1.75); + await db.SortedSetAddAsync(key, member3, 2); + + var scores = await db.SortedSetScoresAsync(key, [member1, member2, member3]); + + Assert.NotNull(scores); + Assert.Equal(3, scores.Length); + Assert.Equal((double)1.5, scores[0]); + Assert.Equal((double)1.75, scores[1]); + Assert.Equal(2, scores[2]); + } + + [Fact] + public async Task SortedSetScoresMultiple_ReturnsNullItemsForMissingSet() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var key = Me(); + + db.KeyDelete(key); + + // Missing set but should still return an array of nulls. + var scores = db.SortedSetScores(key, ["bogus1", "bogus2", "bogus3"]); + + Assert.NotNull(scores); + Assert.Equal(3, scores.Length); + Assert.Null(scores[0]); + Assert.Null(scores[1]); + Assert.Null(scores[2]); + } + + [Fact] + public async Task SortedSetScoresMultiple_ReturnsNullItemsForMissingSetAsync() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var key = Me(); + + await db.KeyDeleteAsync(key); + + // Missing set but should still return an array of nulls. + var scores = await db.SortedSetScoresAsync(key, ["bogus1", "bogus2", "bogus3"]); + + Assert.NotNull(scores); + Assert.Equal(3, scores.Length); + Assert.Null(scores[0]); + Assert.Null(scores[1]); + Assert.Null(scores[2]); + } + + [Fact] + public async Task SortedSetScoresMultiple_ReturnsScoresAndNullItems() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var key = Me(); + const string member1 = "member1", + member2 = "member2", + member3 = "member3", + bogusMember = "bogusMember"; + + db.KeyDelete(key); + + db.SortedSetAdd(key, member1, 1.5); + db.SortedSetAdd(key, member2, 1.75); + db.SortedSetAdd(key, member3, 2); + + var scores = db.SortedSetScores(key, [member1, bogusMember, member2, member3]); + + Assert.NotNull(scores); + Assert.Equal(4, scores.Length); + Assert.Null(scores[1]); + Assert.Equal((double)1.5, scores[0]); + Assert.Equal((double)1.75, scores[2]); + Assert.Equal(2, scores[3]); + } + + [Fact] + public async Task SortedSetScoresMultiple_ReturnsScoresAndNullItemsAsync() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var key = Me(); + const string member1 = "member1", + member2 = "member2", + member3 = "member3", + bogusMember = "bogusMember"; + + await db.KeyDeleteAsync(key); + + await db.SortedSetAddAsync(key, member1, 1.5); + await db.SortedSetAddAsync(key, member2, 1.75); + await db.SortedSetAddAsync(key, member3, 2); + + var scores = await db.SortedSetScoresAsync(key, [member1, bogusMember, member2, member3]); + + Assert.NotNull(scores); + Assert.Equal(4, scores.Length); + Assert.Null(scores[1]); + Assert.Equal((double)1.5, scores[0]); + Assert.Equal((double)1.75, scores[2]); + Assert.Equal(2, scores[3]); + } + + [Fact] + public async Task SortedSetUpdate() + { + await using var conn = Create(require: RedisFeatures.v3_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + var member = "a"; + var values = new SortedSetEntry[] { new SortedSetEntry(member, 5) }; + db.KeyDelete(key, CommandFlags.FireAndForget); + db.SortedSetAdd(key, member, 2); + + Assert.True(db.SortedSetUpdate(key, member, 1)); + Assert.Equal(1, db.SortedSetUpdate(key, values)); + + Assert.True(await db.SortedSetUpdateAsync(key, member, 1)); + Assert.Equal(1, await db.SortedSetUpdateAsync(key, values)); + } +} diff --git a/tests/StackExchange.Redis.Tests/SortedSetWhenTests.cs b/tests/StackExchange.Redis.Tests/SortedSetWhenTests.cs new file mode 100644 index 000000000..17c587079 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/SortedSetWhenTests.cs @@ -0,0 +1,40 @@ +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class SortedSetWhenTest(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + [Fact] + public async Task GreaterThanLessThan() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var key = Me(); + var member = "a"; + db.KeyDelete(key, CommandFlags.FireAndForget); + db.SortedSetAdd(key, member, 2); + + Assert.True(db.SortedSetUpdate(key, member, 5, when: SortedSetWhen.GreaterThan)); + Assert.False(db.SortedSetUpdate(key, member, 1, when: SortedSetWhen.GreaterThan)); + Assert.True(db.SortedSetUpdate(key, member, 1, when: SortedSetWhen.LessThan)); + Assert.False(db.SortedSetUpdate(key, member, 5, when: SortedSetWhen.LessThan)); + } + + [Fact] + public async Task IllegalCombinations() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var key = Me(); + var member = "a"; + db.KeyDelete(key, CommandFlags.FireAndForget); + + Assert.Throws(() => db.SortedSetAdd(key, member, 5, when: SortedSetWhen.LessThan | SortedSetWhen.GreaterThan)); + Assert.Throws(() => db.SortedSetAdd(key, member, 5, when: SortedSetWhen.Exists | SortedSetWhen.NotExists)); + Assert.Throws(() => db.SortedSetAdd(key, member, 5, when: SortedSetWhen.GreaterThan | SortedSetWhen.NotExists)); + Assert.Throws(() => db.SortedSetAdd(key, member, 5, when: SortedSetWhen.LessThan | SortedSetWhen.NotExists)); + } +} diff --git a/tests/StackExchange.Redis.Tests/SortedSets.cs b/tests/StackExchange.Redis.Tests/SortedSets.cs deleted file mode 100644 index cea1f518c..000000000 --- a/tests/StackExchange.Redis.Tests/SortedSets.cs +++ /dev/null @@ -1,150 +0,0 @@ -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - [Collection(SharedConnectionFixture.Key)] - public class SortedSets : TestBase - { - public SortedSets(ITestOutputHelper output, SharedConnectionFixture fixture) : base(output, fixture) { } - - public static SortedSetEntry[] entries = new SortedSetEntry[] - { - new SortedSetEntry("a", 1), - new SortedSetEntry("b", 2), - new SortedSetEntry("c", 3), - new SortedSetEntry("d", 4), - new SortedSetEntry("e", 5), - new SortedSetEntry("f", 6), - new SortedSetEntry("g", 7), - new SortedSetEntry("h", 8), - new SortedSetEntry("i", 9), - new SortedSetEntry("j", 10) - }; - - [Fact] - public void SortedSetPopMulti_Multi() - { - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.SortedSetPop), r => r.SortedSetPop); - - var db = conn.GetDatabase(); - var key = Me(); - - db.KeyDelete(key, CommandFlags.FireAndForget); - db.SortedSetAdd(key, entries, CommandFlags.FireAndForget); - - var first = db.SortedSetPop(key, Order.Ascending); - Assert.True(first.HasValue); - Assert.Equal(entries[0], first.Value); - Assert.Equal(9, db.SortedSetLength(key)); - - var lasts = db.SortedSetPop(key, 2, Order.Descending); - Assert.Equal(2, lasts.Length); - Assert.Equal(entries[9], lasts[0]); - Assert.Equal(entries[8], lasts[1]); - Assert.Equal(7, db.SortedSetLength(key)); - } - } - - [Fact] - public void SortedSetPopMulti_Single() - { - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.SortedSetPop), r => r.SortedSetPop); - - var db = conn.GetDatabase(); - var key = Me(); - - db.KeyDelete(key, CommandFlags.FireAndForget); - db.SortedSetAdd(key, entries, CommandFlags.FireAndForget); - - var last = db.SortedSetPop(key, Order.Descending); - Assert.True(last.HasValue); - Assert.Equal(entries[9], last.Value); - Assert.Equal(9, db.SortedSetLength(key)); - - var firsts = db.SortedSetPop(key, 1, Order.Ascending); - Assert.Single(firsts); - Assert.Equal(entries[0], firsts[0]); - Assert.Equal(8, db.SortedSetLength(key)); - } - } - - [Fact] - public async Task SortedSetPopMulti_Multi_Async() - { - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.SortedSetPop), r => r.SortedSetPop); - - var db = conn.GetDatabase(); - var key = Me(); - - db.KeyDelete(key, CommandFlags.FireAndForget); - db.SortedSetAdd(key, entries, CommandFlags.FireAndForget); - - var last = await db.SortedSetPopAsync(key, Order.Descending).ForAwait(); - Assert.True(last.HasValue); - Assert.Equal(entries[9], last.Value); - Assert.Equal(9, db.SortedSetLength(key)); - - var moreLasts = await db.SortedSetPopAsync(key, 2, Order.Descending).ForAwait(); - Assert.Equal(2, moreLasts.Length); - Assert.Equal(entries[8], moreLasts[0]); - Assert.Equal(entries[7], moreLasts[1]); - Assert.Equal(7, db.SortedSetLength(key)); - } - } - - [Fact] - public async Task SortedSetPopMulti_Single_Async() - { - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.SortedSetPop), r => r.SortedSetPop); - - var db = conn.GetDatabase(); - var key = Me(); - - db.KeyDelete(key, CommandFlags.FireAndForget); - db.SortedSetAdd(key, entries, CommandFlags.FireAndForget); - - var first = await db.SortedSetPopAsync(key).ForAwait(); - Assert.True(first.HasValue); - Assert.Equal(entries[0], first.Value); - Assert.Equal(9, db.SortedSetLength(key)); - - var moreFirsts = await db.SortedSetPopAsync(key, 1).ForAwait(); - Assert.Single(moreFirsts); - Assert.Equal(entries[1], moreFirsts[0]); - Assert.Equal(8, db.SortedSetLength(key)); - } - } - - [Fact] - public async Task SortedSetPopMulti_Zero_Async() - { - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.SortedSetPop), r => r.SortedSetPop); - - var db = conn.GetDatabase(); - var key = Me(); - - db.KeyDelete(key, CommandFlags.FireAndForget); - db.SortedSetAdd(key, entries, CommandFlags.FireAndForget); - - var t = db.SortedSetPopAsync(key, count: 0); - Assert.True(t.IsCompleted); // sync - var arr = await t; - Assert.Empty(arr); - - Assert.Equal(10, db.SortedSetLength(key)); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/StackExchange.Redis.Tests.csproj b/tests/StackExchange.Redis.Tests/StackExchange.Redis.Tests.csproj index 184407fd9..4c312d448 100644 --- a/tests/StackExchange.Redis.Tests/StackExchange.Redis.Tests.csproj +++ b/tests/StackExchange.Redis.Tests/StackExchange.Redis.Tests.csproj @@ -1,10 +1,13 @@  - net472;netcoreapp3.1 + + net481;net10.0 + Exe StackExchange.Redis.Tests true true full + enable true @@ -12,24 +15,23 @@ + + + - + - + + - - - - - diff --git a/tests/StackExchange.Redis.Tests/StreamTests.cs b/tests/StackExchange.Redis.Tests/StreamTests.cs new file mode 100644 index 000000000..7e625d399 --- /dev/null +++ b/tests/StackExchange.Redis.Tests/StreamTests.cs @@ -0,0 +1,2542 @@ +using System; +using System.Linq; +using System.Runtime.CompilerServices; +using System.Threading.Tasks; +using Newtonsoft.Json; +using Xunit; + +namespace StackExchange.Redis.Tests; + +[RunPerProtocol] +public class StreamTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + public override string Me([CallerFilePath] string? filePath = null, [CallerMemberName] string? caller = null) => + base.Me(filePath, caller) + DateTimeOffset.UtcNow.ToUnixTimeMilliseconds(); + + [Fact] + public async Task IsStreamType() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + db.StreamAdd(key, "field1", "value1"); + + var keyType = db.KeyType(key); + + Assert.Equal(RedisType.Stream, keyType); + } + + [Fact] + public async Task StreamAddSinglePairWithAutoId() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + var messageId = db.StreamAdd(key, "field1", "value1"); + + Assert.True(messageId != RedisValue.Null && ((string?)messageId)?.Length > 0); + } + + [Fact] + public async Task StreamAddMultipleValuePairsWithAutoId() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + var fields = new[] + { + new NameValueEntry("field1", "value1"), + new NameValueEntry("field2", "value2"), + }; + + var messageId = db.StreamAdd(key, fields); + + var entries = db.StreamRange(key); + + Assert.Single(entries); + Assert.Equal(messageId, entries[0].Id); + var vals = entries[0].Values; + Assert.NotNull(vals); + Assert.Equal(2, vals.Length); + Assert.Equal("field1", vals[0].Name); + Assert.Equal("value1", vals[0].Value); + Assert.Equal("field2", vals[1].Name); + Assert.Equal("value2", vals[1].Value); + } + + [Fact] + public async Task StreamAddWithManualId() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + const string id = "42-0"; + var key = Me(); + + var messageId = db.StreamAdd(key, "field1", "value1", id); + + Assert.Equal(id, messageId); + } + + [Theory] + [InlineData(false, false, false)] + [InlineData(false, false, true)] + [InlineData(false, true, false)] + [InlineData(false, true, true)] + [InlineData(true, false, false)] + [InlineData(true, false, true)] + [InlineData(true, true, false)] + [InlineData(true, true, true)] + public async Task StreamAddIdempotentId(bool iid, bool pairs, bool async) + { + await using var conn = Create(require: RedisFeatures.v8_6_0); + var db = conn.GetDatabase(); + StreamIdempotentId id = iid ? new StreamIdempotentId("pid", "iid") : new StreamIdempotentId("pid"); + Log($"id: {id}"); + var key = Me(); + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + async Task Add() + { + if (pairs) + { + NameValueEntry[] fields = [new("field1", "value1"), new("field2", "value2"), new("field3", "value3")]; + if (async) + { + return await db.StreamAddAsync(key, fields, idempotentId: id); + } + + return db.StreamAdd(key, fields, idempotentId: id); + } + + if (async) + { + return await db.StreamAddAsync(key, "field1", "value1", idempotentId: id); + } + + return db.StreamAdd(key, "field1", "value1", idempotentId: id); + } + + RedisValue first = await Add(); + Log($"Message ID: {first}"); + + RedisValue second = await Add(); + Assert.Equal(first, second); // idempotent id has avoided a duplicate + } + + [Theory] + [InlineData(null, null, false)] + [InlineData(null, 42, false)] + [InlineData(13, null, false)] + [InlineData(13, 42, false)] + [InlineData(null, null, true)] + [InlineData(null, 42, true)] + [InlineData(13, null, true)] + [InlineData(13, 42, true)] + public async Task StreamConfigure(int? duration, int? maxsize, bool async) + { + await using var conn = Create(require: RedisFeatures.v8_6_0); + var db = conn.GetDatabase(); + + var key = Me(); + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + var id = await db.StreamAddAsync(key, "field1", "value1"); + Log($"id: {id}"); + var settings = new StreamConfiguration { IdmpDuration = duration, IdmpMaxSize = maxsize }; + bool doomed = duration is null && maxsize is null; + if (async) + { + if (doomed) + { + var ex = await Assert.ThrowsAsync(async () => await db.StreamConfigureAsync(key, settings)); + Assert.StartsWith("ERR At least one parameter must be specified", ex.Message); + } + else + { + await db.StreamConfigureAsync(key, settings); + } + } + else + { + if (doomed) + { + var ex = Assert.Throws(() => db.StreamConfigure(key, settings)); + Assert.StartsWith("ERR At least one parameter must be specified", ex.Message); + } + else + { + db.StreamConfigure(key, settings); + } + } + var info = async ? await db.StreamInfoAsync(key) : db.StreamInfo(key); + const int SERVER_DEFAULT = 100; + Assert.Equal(duration ?? SERVER_DEFAULT, info.IdmpDuration); + Assert.Equal(maxsize ?? SERVER_DEFAULT, info.IdmpMaxSize); + } + + [Fact] + public async Task StreamAddMultipleValuePairsWithManualId() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + const string id = "42-0"; + var key = Me(); + + var fields = new[] + { + new NameValueEntry("field1", "value1"), + new NameValueEntry("field2", "value2"), + }; + + var messageId = db.StreamAdd(key, fields, id); + var entries = db.StreamRange(key); + + Assert.Equal(id, messageId); + Assert.NotNull(entries); + Assert.Single(entries); + Assert.Equal(id, entries[0].Id); + } + + [Fact] + public async Task StreamAutoClaim_MissingKey() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var key = Me(); + var db = conn.GetDatabase(); + const string group = "consumerGroup", + consumer = "consumer"; + + db.KeyDelete(key); + + var ex = Assert.Throws(() => db.StreamAutoClaim(key, group, consumer, 0, "0-0")); + Assert.StartsWith("NOGROUP No such key", ex.Message); + + ex = await Assert.ThrowsAsync(() => db.StreamAutoClaimAsync(key, group, consumer, 0, "0-0")); + Assert.StartsWith("NOGROUP No such key", ex.Message); + } + + [Fact] + public async Task StreamAutoClaim_ClaimsPendingMessages() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var key = Me(); + var db = conn.GetDatabase(); + const string group = "consumerGroup", + consumer1 = "c1", + consumer2 = "c2"; + + // Create Consumer Group, add messages, and read messages into a consumer. + _ = StreamAutoClaim_PrepareTestData(db, key, group, consumer1); + + // Claim any pending messages and reassign them to consumer2. + var result = db.StreamAutoClaim(key, group, consumer2, 0, "0-0"); + + Assert.Equal("0-0", result.NextStartId); + Assert.NotEmpty(result.ClaimedEntries); + Assert.Empty(result.DeletedIds); + Assert.Equal(2, result.ClaimedEntries.Length); + Assert.Equal("value1", result.ClaimedEntries[0].Values[0].Value); + Assert.Equal("value2", result.ClaimedEntries[1].Values[0].Value); + } + + [Fact] + public async Task StreamAutoClaim_ClaimsPendingMessagesAsync() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var key = Me(); + var db = conn.GetDatabase(); + const string group = "consumerGroup", + consumer1 = "c1", + consumer2 = "c2"; + + // Create Consumer Group, add messages, and read messages into a consumer. + _ = StreamAutoClaim_PrepareTestData(db, key, group, consumer1); + + // Claim any pending messages and reassign them to consumer2. + var result = await db.StreamAutoClaimAsync(key, group, consumer2, 0, "0-0"); + + Assert.Equal("0-0", result.NextStartId); + Assert.NotEmpty(result.ClaimedEntries); + Assert.Empty(result.DeletedIds); + Assert.Equal(2, result.ClaimedEntries.Length); + Assert.Equal("value1", result.ClaimedEntries[0].Values[0].Value); + Assert.Equal("value2", result.ClaimedEntries[1].Values[0].Value); + } + + [Fact] + public async Task StreamAutoClaim_ClaimsSingleMessageWithCountOption() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var key = Me(); + var db = conn.GetDatabase(); + const string group = "consumerGroup", + consumer1 = "c1", + consumer2 = "c2"; + + // Create Consumer Group, add messages, and read messages into a consumer. + var messageIds = StreamAutoClaim_PrepareTestData(db, key, group, consumer1); + + // Claim a single pending message and reassign it to consumer2. + var result = db.StreamAutoClaim(key, group, consumer2, 0, "0-0", count: 1); + + // Should be the second message ID from the call to prepare. + Assert.Equal(messageIds[1], result.NextStartId); + Assert.NotEmpty(result.ClaimedEntries); + Assert.Empty(result.DeletedIds); + Assert.Single(result.ClaimedEntries); + Assert.Equal("value1", result.ClaimedEntries[0].Values[0].Value); + } + + [Fact] + public async Task StreamAutoClaim_ClaimsSingleMessageWithCountOptionIdsOnly() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var key = Me(); + var db = conn.GetDatabase(); + const string group = "consumerGroup", + consumer1 = "c1", + consumer2 = "c2"; + + // Create Consumer Group, add messages, and read messages into a consumer. + var messageIds = StreamAutoClaim_PrepareTestData(db, key, group, consumer1); + + // Claim a single pending message and reassign it to consumer2. + var result = db.StreamAutoClaimIdsOnly(key, group, consumer2, 0, "0-0", count: 1); + + // Should be the second message ID from the call to prepare. + Assert.Equal(messageIds[1], result.NextStartId); + Assert.NotEmpty(result.ClaimedIds); + Assert.Single(result.ClaimedIds); + Assert.Equal(messageIds[0], result.ClaimedIds[0]); + Assert.Empty(result.DeletedIds); + } + + [Fact] + public async Task StreamAutoClaim_ClaimsSingleMessageWithCountOptionAsync() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var key = Me(); + var db = conn.GetDatabase(); + const string group = "consumerGroup", + consumer1 = "c1", + consumer2 = "c2"; + + // Create Consumer Group, add messages, and read messages into a consumer. + var messageIds = StreamAutoClaim_PrepareTestData(db, key, group, consumer1); + + // Claim a single pending message and reassign it to consumer2. + var result = await db.StreamAutoClaimAsync(key, group, consumer2, 0, "0-0", count: 1); + + // Should be the second message ID from the call to prepare. + Assert.Equal(messageIds[1], result.NextStartId); + Assert.NotEmpty(result.ClaimedEntries); + Assert.Empty(result.DeletedIds); + Assert.Single(result.ClaimedEntries); + Assert.Equal("value1", result.ClaimedEntries[0].Values[0].Value); + } + + [Fact] + public async Task StreamAutoClaim_ClaimsSingleMessageWithCountOptionIdsOnlyAsync() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var key = Me(); + var db = conn.GetDatabase(); + const string group = "consumerGroup", + consumer1 = "c1", + consumer2 = "c2"; + + // Create Consumer Group, add messages, and read messages into a consumer. + var messageIds = StreamAutoClaim_PrepareTestData(db, key, group, consumer1); + + // Claim a single pending message and reassign it to consumer2. + var result = await db.StreamAutoClaimIdsOnlyAsync(key, group, consumer2, 0, "0-0", count: 1); + + // Should be the second message ID from the call to prepare. + Assert.Equal(messageIds[1], result.NextStartId); + Assert.NotEmpty(result.ClaimedIds); + Assert.Single(result.ClaimedIds); + Assert.Equal(messageIds[0], result.ClaimedIds[0]); + Assert.Empty(result.DeletedIds); + } + + [Fact] + public async Task StreamAutoClaim_IncludesDeletedMessageId() + { + await using var conn = Create(require: RedisFeatures.v7_0_0_rc1); + + var key = Me(); + var db = conn.GetDatabase(); + const string group = "consumerGroup", + consumer1 = "c1", + consumer2 = "c2"; + + // Create Consumer Group, add messages, and read messages into a consumer. + var messageIds = StreamAutoClaim_PrepareTestData(db, key, group, consumer1); + + // Delete one of the messages, it should be included in the deleted message ID array. + db.StreamDelete(key, [messageIds[0]]); + + // Claim a single pending message and reassign it to consumer2. + var result = db.StreamAutoClaim(key, group, consumer2, 0, "0-0", count: 2); + + Assert.Equal("0-0", result.NextStartId); + Assert.NotEmpty(result.ClaimedEntries); + Assert.NotEmpty(result.DeletedIds); + Assert.Single(result.ClaimedEntries); + Assert.Single(result.DeletedIds); + Assert.Equal(messageIds[0], result.DeletedIds[0]); + } + + [Fact] + public async Task StreamAutoClaim_IncludesDeletedMessageIdAsync() + { + await using var conn = Create(require: RedisFeatures.v7_0_0_rc1); + + var key = Me(); + var db = conn.GetDatabase(); + const string group = "consumerGroup", + consumer1 = "c1", + consumer2 = "c2"; + + // Create Consumer Group, add messages, and read messages into a consumer. + var messageIds = StreamAutoClaim_PrepareTestData(db, key, group, consumer1); + + // Delete one of the messages, it should be included in the deleted message ID array. + db.StreamDelete(key, [messageIds[0]]); + + // Claim a single pending message and reassign it to consumer2. + var result = await db.StreamAutoClaimAsync(key, group, consumer2, 0, "0-0", count: 2); + + Assert.Equal("0-0", result.NextStartId); + Assert.NotEmpty(result.ClaimedEntries); + Assert.NotEmpty(result.DeletedIds); + Assert.Single(result.ClaimedEntries); + Assert.Single(result.DeletedIds); + Assert.Equal(messageIds[0], result.DeletedIds[0]); + } + + [Fact] + public async Task StreamAutoClaim_NoMessagesToClaim() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var key = Me(); + var db = conn.GetDatabase(); + const string group = "consumerGroup"; + + // Create the group. + db.KeyDelete(key); + db.StreamCreateConsumerGroup(key, group, createStream: true); + + // **Don't add any messages to the stream** + + // Claim any pending messages (there aren't any) and reassign them to consumer2. + var result = db.StreamAutoClaim(key, group, "consumer1", 0, "0-0"); + + // Claimed entries should be empty + Assert.Equal("0-0", result.NextStartId); + Assert.Empty(result.ClaimedEntries); + Assert.Empty(result.DeletedIds); + } + + [Fact] + public async Task StreamAutoClaim_NoMessagesToClaimAsync() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var key = Me(); + var db = conn.GetDatabase(); + const string group = "consumerGroup"; + + // Create the group. + db.KeyDelete(key); + db.StreamCreateConsumerGroup(key, group, createStream: true); + + // **Don't add any messages to the stream** + + // Claim any pending messages (there aren't any) and reassign them to consumer2. + var result = await db.StreamAutoClaimAsync(key, group, "consumer1", 0, "0-0"); + + // Claimed entries should be empty + Assert.Equal("0-0", result.NextStartId); + Assert.Empty(result.ClaimedEntries); + Assert.Empty(result.DeletedIds); + } + + [Fact] + public async Task StreamAutoClaim_NoMessageMeetsMinIdleTime() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var key = Me(); + var db = conn.GetDatabase(); + const string group = "consumerGroup", + consumer1 = "c1", + consumer2 = "c2"; + + // Create Consumer Group, add messages, and read messages into a consumer. + _ = StreamAutoClaim_PrepareTestData(db, key, group, consumer1); + + // Claim messages idle for more than 5 minutes, should return an empty array. + var result = db.StreamAutoClaim(key, group, consumer2, 300000, "0-0"); + + Assert.Equal("0-0", result.NextStartId); + Assert.Empty(result.ClaimedEntries); + Assert.Empty(result.DeletedIds); + } + + [Fact] + public async Task StreamAutoClaim_NoMessageMeetsMinIdleTimeAsync() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var key = Me(); + var db = conn.GetDatabase(); + const string group = "consumerGroup", + consumer1 = "c1", + consumer2 = "c2"; + + // Create Consumer Group, add messages, and read messages into a consumer. + _ = StreamAutoClaim_PrepareTestData(db, key, group, consumer1); + + // Claim messages idle for more than 5 minutes, should return an empty array. + var result = await db.StreamAutoClaimAsync(key, group, consumer2, 300000, "0-0"); + + Assert.Equal("0-0", result.NextStartId); + Assert.Empty(result.ClaimedEntries); + Assert.Empty(result.DeletedIds); + } + + [Fact] + public async Task StreamAutoClaim_ReturnsMessageIdOnly() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var key = Me(); + var db = conn.GetDatabase(); + const string group = "consumerGroup", + consumer1 = "c1", + consumer2 = "c2"; + + // Create Consumer Group, add messages, and read messages into a consumer. + var messageIds = StreamAutoClaim_PrepareTestData(db, key, group, consumer1); + + // Claim any pending messages and reassign them to consumer2. + var result = db.StreamAutoClaimIdsOnly(key, group, consumer2, 0, "0-0"); + + Assert.Equal("0-0", result.NextStartId); + Assert.NotEmpty(result.ClaimedIds); + Assert.Empty(result.DeletedIds); + Assert.Equal(2, result.ClaimedIds.Length); + Assert.Equal(messageIds[0], result.ClaimedIds[0]); + Assert.Equal(messageIds[1], result.ClaimedIds[1]); + } + + [Fact] + public async Task StreamAutoClaim_ReturnsMessageIdOnlyAsync() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var key = Me(); + var db = conn.GetDatabase(); + const string group = "consumerGroup", + consumer1 = "c1", + consumer2 = "c2"; + + // Create Consumer Group, add messages, and read messages into a consumer. + var messageIds = StreamAutoClaim_PrepareTestData(db, key, group, consumer1); + + // Claim any pending messages and reassign them to consumer2. + var result = await db.StreamAutoClaimIdsOnlyAsync(key, group, consumer2, 0, "0-0"); + + Assert.Equal("0-0", result.NextStartId); + Assert.NotEmpty(result.ClaimedIds); + Assert.Empty(result.DeletedIds); + Assert.Equal(2, result.ClaimedIds.Length); + Assert.Equal(messageIds[0], result.ClaimedIds[0]); + Assert.Equal(messageIds[1], result.ClaimedIds[1]); + } + + private static RedisValue[] StreamAutoClaim_PrepareTestData(IDatabase db, RedisKey key, RedisValue group, RedisValue consumer) + { + // Create the group. + db.KeyDelete(key); + db.StreamCreateConsumerGroup(key, group, createStream: true); + + // Add some messages + var id1 = db.StreamAdd(key, "field1", "value1"); + var id2 = db.StreamAdd(key, "field2", "value2"); + + // Read the messages into the "c1" + db.StreamReadGroup(key, group, consumer); + + return [id1, id2]; + } + + [Fact] + public async Task StreamConsumerGroupSetId() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + const string groupName = "test_group", consumer = "consumer"; + + // Create a stream + db.StreamAdd(key, "field1", "value1"); + db.StreamAdd(key, "field2", "value2"); + + // Create a group and set the position to deliver new messages only. + db.StreamCreateConsumerGroup(key, groupName, StreamPosition.NewMessages); + + // Read into the group, expect nothing + var firstRead = db.StreamReadGroup(key, groupName, consumer, StreamPosition.NewMessages); + + // Reset the ID back to read from the beginning. + db.StreamConsumerGroupSetPosition(key, groupName, StreamPosition.Beginning); + + var secondRead = db.StreamReadGroup(key, groupName, consumer, StreamPosition.NewMessages); + + Assert.NotNull(firstRead); + Assert.NotNull(secondRead); + Assert.Empty(firstRead); + Assert.Equal(2, secondRead.Length); + } + + [Fact] + public async Task StreamConsumerGroupAutoClaim_MultiStream() + { + await using var conn = Create(require: RedisFeatures.v8_4_0_rc1); + + var db = conn.GetDatabase(); + var key = Me(); + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + const string groupName = "test_group", consumer = "consumer"; + + // Create a group and set the position to deliver new messages only. + await db.StreamCreateConsumerGroupAsync(key, groupName, StreamPosition.NewMessages); + + // add some entries + await db.StreamAddAsync(key, "field1", "value1"); + await db.StreamAddAsync(key, "field2", "value2"); + + var idleTime = TimeSpan.FromMilliseconds(100); + // Read into the group, expect the two entries; we don't expect any data + // here, at least on a fast server, because it hasn't been idle long enough. + StreamPosition[] positions = [new(key, StreamPosition.NewMessages)]; + var groups = await db.StreamReadGroupAsync(positions, groupName, consumer, noAck: false, countPerStream: 10, claimMinIdleTime: idleTime); + var grp = Assert.Single(groups); + Assert.Equal(key, grp.Key); + Assert.Equal(2, grp.Entries.Length); + foreach (var entry in grp.Entries) + { + Assert.Equal(0, entry.DeliveryCount); // never delivered before + Assert.Equal(TimeSpan.Zero, entry.IdleTime); // never delivered before + } + + // now repeat immediately; we didn't "ack", so they're still pending, but not idle long enough + groups = await db.StreamReadGroupAsync(positions, groupName, consumer, noAck: false, countPerStream: 10, claimMinIdleTime: idleTime); + Assert.Empty(groups); // nothing available from any group + + // wait long enough for the messages to be considered idle + await Task.Delay(idleTime + idleTime); + + // repeat again; we should get the entries + groups = await db.StreamReadGroupAsync(positions, groupName, consumer, noAck: false, countPerStream: 10, claimMinIdleTime: idleTime); + grp = Assert.Single(groups); + Assert.Equal(key, grp.Key); + Assert.Equal(2, grp.Entries.Length); + foreach (var entry in grp.Entries) + { + Assert.Equal(1, entry.DeliveryCount); // this is a redelivery + Assert.True(entry.IdleTime > TimeSpan.Zero); // and is considered idle + } + } + + [Fact] + public async Task StreamConsumerGroupAutoClaim_SingleStream() + { + await using var conn = Create(require: RedisFeatures.v8_4_0_rc1); + + var db = conn.GetDatabase(); + var key = Me(); + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + const string groupName = "test_group", consumer = "consumer"; + + // Create a group and set the position to deliver new messages only. + await db.StreamCreateConsumerGroupAsync(key, groupName, StreamPosition.NewMessages); + + // add some entries + await db.StreamAddAsync(key, "field1", "value1"); + await db.StreamAddAsync(key, "field2", "value2"); + + var idleTime = TimeSpan.FromMilliseconds(100); + // Read into the group, expect the two entries; we don't expect any data + // here, at least on a fast server, because it hasn't been idle long enough. + var entries = await db.StreamReadGroupAsync(key, groupName, consumer, noAck: false, count: 10, claimMinIdleTime: idleTime); + Assert.Equal(2, entries.Length); + foreach (var entry in entries) + { + Assert.Equal(0, entry.DeliveryCount); // never delivered before + Assert.Equal(TimeSpan.Zero, entry.IdleTime); // never delivered before + } + + // now repeat immediately; we didn't "ack", so they're still pending, but not idle long enough + entries = await db.StreamReadGroupAsync(key, groupName, consumer, null, noAck: false, count: 10, claimMinIdleTime: idleTime); + Assert.Empty(entries); // nothing available from any group + + // wait long enough for the messages to be considered idle + await Task.Delay(idleTime + idleTime); + + // repeat again; we should get the entries + entries = await db.StreamReadGroupAsync(key, groupName, consumer, null, noAck: false, count: 10, claimMinIdleTime: idleTime); + Assert.Equal(2, entries.Length); + foreach (var entry in entries) + { + Assert.Equal(1, entry.DeliveryCount); // this is a redelivery + Assert.True(entry.IdleTime > TimeSpan.Zero); // and is considered idle + } + } + + [Fact] + public async Task StreamConsumerGroupWithNoConsumers() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + const string groupName = "test_group"; + + // Create a stream + db.StreamAdd(key, "field1", "value1"); + + // Create a group + db.StreamCreateConsumerGroup(key, groupName, "0-0"); + + // Query redis for the group consumers, expect an empty list in response. + var consumers = db.StreamConsumerInfo(key, groupName); + + Assert.Empty(consumers); + } + + [Fact] + public async Task StreamCreateConsumerGroup() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + const string groupName = "test_group"; + + // Create a stream + db.StreamAdd(key, "field1", "value1"); + + // Create a group + var result = db.StreamCreateConsumerGroup(key, groupName, StreamPosition.Beginning); + + Assert.True(result); + } + + [Fact] + public async Task StreamCreateConsumerGroupBeforeCreatingStream() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + + // Ensure the key doesn't exist. + var keyExistsBeforeCreate = db.KeyExists(key); + + // The 'createStream' parameter is 'true' by default. + var groupCreated = db.StreamCreateConsumerGroup(key, "consumerGroup", StreamPosition.NewMessages); + + var keyExistsAfterCreate = db.KeyExists(key); + + Assert.False(keyExistsBeforeCreate); + Assert.True(groupCreated); + Assert.True(keyExistsAfterCreate); + } + + [Fact] + public async Task StreamCreateConsumerGroupFailsIfKeyDoesntExist() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + + // Pass 'false' for 'createStream' to ensure that an + // exception is thrown when the stream doesn't exist. + Assert.ThrowsAny(() => db.StreamCreateConsumerGroup( + key, + "consumerGroup", + StreamPosition.NewMessages, + createStream: false)); + } + + [Fact] + public async Task StreamCreateConsumerGroupSucceedsWhenKeyExists() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + + db.StreamAdd(key, "f1", "v1"); + + // Pass 'false' for 'createStream', should create the consumer group + // without issue since the stream already exists. + var groupCreated = db.StreamCreateConsumerGroup( + key, + "consumerGroup", + StreamPosition.NewMessages, + createStream: false); + + Assert.True(groupCreated); + } + + [Fact] + public async Task StreamConsumerGroupReadOnlyNewMessagesWithEmptyResponse() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + const string groupName = "test_group"; + + // Create a stream + db.StreamAdd(key, "field1", "value1"); + db.StreamAdd(key, "field2", "value2"); + + // Create a group. + db.StreamCreateConsumerGroup(key, groupName); + + // Read, expect no messages + var entries = db.StreamReadGroup(key, groupName, "test_consumer", "0-0"); + + Assert.Empty(entries); + } + + [Fact] + public async Task StreamConsumerGroupReadFromStreamBeginning() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + const string groupName = "test_group"; + + var id1 = db.StreamAdd(key, "field1", "value1"); + var id2 = db.StreamAdd(key, "field2", "value2"); + + db.StreamCreateConsumerGroup(key, groupName, StreamPosition.Beginning); + + var entries = db.StreamReadGroup(key, groupName, "test_consumer", StreamPosition.NewMessages); + + Assert.Equal(2, entries.Length); + Assert.True(id1 == entries[0].Id); + Assert.True(id2 == entries[1].Id); + } + + [Fact] + public async Task StreamConsumerGroupReadFromStreamBeginningWithCount() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + const string groupName = "test_group"; + + var id1 = db.StreamAdd(key, "field1", "value1"); + var id2 = db.StreamAdd(key, "field2", "value2"); + var id3 = db.StreamAdd(key, "field3", "value3"); + _ = db.StreamAdd(key, "field4", "value4"); + + // Start reading after id1. + db.StreamCreateConsumerGroup(key, groupName, id1); + + var entries = db.StreamReadGroup(key, groupName, "test_consumer", StreamPosition.NewMessages, 2); + + // Ensure we only received the requested count and that the IDs match the expected values. + Assert.Equal(2, entries.Length); + Assert.True(id2 == entries[0].Id); + Assert.True(id3 == entries[1].Id); + } + + [Fact] + public async Task StreamConsumerGroupAcknowledgeMessage() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + const string groupName = "test_group", + consumer = "test_consumer"; + + var id1 = db.StreamAdd(key, "field1", "value1"); + var id2 = db.StreamAdd(key, "field2", "value2"); + var id3 = db.StreamAdd(key, "field3", "value3"); + RedisValue notexist = "0-0"; + var id4 = db.StreamAdd(key, "field4", "value4"); + + db.StreamCreateConsumerGroup(key, groupName, StreamPosition.Beginning); + + // Read all 4 messages, they will be assigned to the consumer + var entries = db.StreamReadGroup(key, groupName, consumer, StreamPosition.NewMessages); + Assert.Equal(4, entries.Length); + + // Send XACK for 3 of the messages + + // Single message Id overload. + var oneAck = db.StreamAcknowledge(key, groupName, id1); + Assert.Equal(1, oneAck); + + var nack = db.StreamAcknowledge(key, groupName, notexist); + Assert.Equal(0, nack); + + // Multiple message Id overload. + var twoAck = db.StreamAcknowledge(key, groupName, [id3, notexist, id4]); + + // Read the group again, it should only return the unacknowledged message. + var notAcknowledged = db.StreamReadGroup(key, groupName, consumer, "0-0"); + + Assert.Equal(2, twoAck); + Assert.Single(notAcknowledged); + Assert.Equal(id2, notAcknowledged[0].Id); + } + + [Theory] + [InlineData(StreamTrimMode.KeepReferences)] + [InlineData(StreamTrimMode.DeleteReferences)] + [InlineData(StreamTrimMode.Acknowledged)] + public void StreamConsumerGroupAcknowledgeAndDeleteMessage(StreamTrimMode mode) + { + using var conn = Create(require: RedisFeatures.v8_2_0_rc1); + + var db = conn.GetDatabase(); + var key = Me() + ":" + mode; + const string groupName = "test_group", + consumer = "test_consumer"; + + var id1 = db.StreamAdd(key, "field1", "value1"); + var id2 = db.StreamAdd(key, "field2", "value2"); + var id3 = db.StreamAdd(key, "field3", "value3"); + RedisValue notexist = "0-0"; + var id4 = db.StreamAdd(key, "field4", "value4"); + + db.StreamCreateConsumerGroup(key, groupName, StreamPosition.Beginning); + + // Read all 4 messages, they will be assigned to the consumer + var entries = db.StreamReadGroup(key, groupName, consumer, StreamPosition.NewMessages); + Assert.Equal(4, entries.Length); + + // Send XACK for 3 of the messages + + // Single message Id overload. + var oneAck = db.StreamAcknowledgeAndDelete(key, groupName, mode, id1); + Assert.Equal(StreamTrimResult.Deleted, oneAck); + + StreamTrimResult nack = db.StreamAcknowledgeAndDelete(key, groupName, mode, notexist); + Assert.Equal(StreamTrimResult.NotFound, nack); + + // Multiple message Id overload. + RedisValue[] ids = new[] { id3, notexist, id4 }; + var twoAck = db.StreamAcknowledgeAndDelete(key, groupName, mode, ids); + + // Read the group again, it should only return the unacknowledged message. + var notAcknowledged = db.StreamReadGroup(key, groupName, consumer, "0-0"); + + Assert.Equal(3, twoAck.Length); + Assert.Equal(StreamTrimResult.Deleted, twoAck[0]); + Assert.Equal(StreamTrimResult.NotFound, twoAck[1]); + Assert.Equal(StreamTrimResult.Deleted, twoAck[2]); + + Assert.Single(notAcknowledged); + Assert.Equal(id2, notAcknowledged[0].Id); + } + + [Fact] + public async Task StreamConsumerGroupClaimMessages() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + const string groupName = "test_group", + consumer1 = "test_consumer_1", + consumer2 = "test_consumer_2"; + + _ = db.StreamAdd(key, "field1", "value1"); + _ = db.StreamAdd(key, "field2", "value2"); + _ = db.StreamAdd(key, "field3", "value3"); + _ = db.StreamAdd(key, "field4", "value4"); + + db.StreamCreateConsumerGroup(key, groupName, "0-0"); + + // Read a single message into the first consumer. + db.StreamReadGroup(key, groupName, consumer1, count: 1); + + // Read the remaining messages into the second consumer. + db.StreamReadGroup(key, groupName, consumer2); + + // Claim the 3 messages consumed by consumer2 for consumer1. + + // Get the pending messages for consumer2. + var pendingMessages = db.StreamPendingMessages( + key, + groupName, + 10, + consumer2); + + // Claim the messages for consumer1. + var messages = db.StreamClaim( + key, + groupName, + consumer1, + 0, // Min message idle time + messageIds: pendingMessages.Select(pm => pm.MessageId).ToArray()); + + // Now see how many messages are pending for each consumer + var pendingSummary = db.StreamPending(key, groupName); + + Assert.NotNull(pendingSummary.Consumers); + Assert.Single(pendingSummary.Consumers); + Assert.Equal(4, pendingSummary.Consumers[0].PendingMessageCount); + Assert.Equal(pendingMessages.Length, messages.Length); + } + + [Fact] + public async Task StreamConsumerGroupClaimMessagesReturningIds() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + const string groupName = "test_group", + consumer1 = "test_consumer_1", + consumer2 = "test_consumer_2"; + + _ = db.StreamAdd(key, "field1", "value1"); + var id2 = db.StreamAdd(key, "field2", "value2"); + var id3 = db.StreamAdd(key, "field3", "value3"); + var id4 = db.StreamAdd(key, "field4", "value4"); + + db.StreamCreateConsumerGroup(key, groupName, StreamPosition.Beginning); + + // Read a single message into the first consumer. + _ = db.StreamReadGroup(key, groupName, consumer1, StreamPosition.NewMessages, 1); + + // Read the remaining messages into the second consumer. + _ = db.StreamReadGroup(key, groupName, consumer2); + + // Claim the 3 messages consumed by consumer2 for consumer1. + + // Get the pending messages for consumer2. + var pendingMessages = db.StreamPendingMessages( + key, + groupName, + 10, + consumer2); + + // Claim the messages for consumer1. + var messageIds = db.StreamClaimIdsOnly( + key, + groupName, + consumer1, + 0, // Min message idle time + messageIds: pendingMessages.Select(pm => pm.MessageId).ToArray()); + + // We should get an array of 3 message IDs. + Assert.Equal(3, messageIds.Length); + Assert.Equal(id2, messageIds[0]); + Assert.Equal(id3, messageIds[1]); + Assert.Equal(id4, messageIds[2]); + } + + [Fact] + public async Task StreamConsumerGroupReadMultipleOneReadBeginningOneReadNew() + { + // Create a group for each stream. One set to read from the beginning of the + // stream and the other to begin reading only new messages. + + // Ask redis to read from the beginning of both stream, expect messages + // for only the stream set to read from the beginning. + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + const string groupName = "test_group"; + var stream1 = Me() + "a"; + var stream2 = Me() + "b"; + + db.StreamAdd(stream1, "field1-1", "value1-1"); + db.StreamAdd(stream1, "field1-2", "value1-2"); + + db.StreamAdd(stream2, "field2-1", "value2-1"); + db.StreamAdd(stream2, "field2-2", "value2-2"); + db.StreamAdd(stream2, "field2-3", "value2-3"); + + // stream1 set up to read only new messages. + db.StreamCreateConsumerGroup(stream1, groupName, StreamPosition.NewMessages); + + // stream2 set up to read from the beginning of the stream + db.StreamCreateConsumerGroup(stream2, groupName, StreamPosition.Beginning); + + // Read for both streams from the beginning. We shouldn't get anything back for stream1. + var pairs = new[] + { + // StreamPosition.NewMessages will send ">" which indicates "Undelivered" messages. + new StreamPosition(stream1, StreamPosition.NewMessages), + new StreamPosition(stream2, StreamPosition.NewMessages), + }; + + var streams = db.StreamReadGroup(pairs, groupName, "test_consumer"); + + Assert.NotNull(streams); + Assert.Single(streams); + Assert.Equal(stream2, streams[0].Key); + Assert.Equal(3, streams[0].Entries.Length); + } + + [Fact] + public async Task StreamConsumerGroupReadMultipleOnlyNewMessagesExpectNoResult() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + const string groupName = "test_group"; + var stream1 = Me() + "a"; + var stream2 = Me() + "b"; + + db.StreamAdd(stream1, "field1-1", "value1-1"); + db.StreamAdd(stream2, "field2-1", "value2-1"); + + // set both streams to read only new messages (default behavior). + db.StreamCreateConsumerGroup(stream1, groupName); + db.StreamCreateConsumerGroup(stream2, groupName); + + // We shouldn't get anything for either stream. + var pairs = new[] + { + new StreamPosition(stream1, StreamPosition.Beginning), + new StreamPosition(stream2, StreamPosition.Beginning), + }; + + var streams = db.StreamReadGroup(pairs, groupName, "test_consumer"); + + Assert.NotNull(streams); + Assert.Equal(2, streams.Length); + Assert.Empty(streams[0].Entries); + Assert.Empty(streams[1].Entries); + } + + [Fact] + public async Task StreamConsumerGroupReadMultipleOnlyNewMessagesExpect1Result() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + const string groupName = "test_group"; + var stream1 = Me() + "a"; + var stream2 = Me() + "b"; + + // These messages won't be read. + db.StreamAdd(stream1, "field1-1", "value1-1"); + db.StreamAdd(stream2, "field2-1", "value2-1"); + + // set both streams to read only new messages (default behavior). + db.StreamCreateConsumerGroup(stream1, groupName); + db.StreamCreateConsumerGroup(stream2, groupName); + + // We should read these though. + var id1 = db.StreamAdd(stream1, "field1-2", "value1-2"); + var id2 = db.StreamAdd(stream2, "field2-2", "value2-2"); + + // Read the new messages (messages created after the group was created). + var pairs = new[] + { + new StreamPosition(stream1, StreamPosition.NewMessages), + new StreamPosition(stream2, StreamPosition.NewMessages), + }; + + var streams = db.StreamReadGroup(pairs, groupName, "test_consumer"); + + Assert.NotNull(streams); + Assert.Equal(2, streams.Length); + Assert.Single(streams[0].Entries); + Assert.Single(streams[1].Entries); + Assert.Equal(id1, streams[0].Entries[0].Id); + Assert.Equal(id2, streams[1].Entries[0].Id); + } + + [Fact] + public async Task StreamConsumerGroupReadMultipleRestrictCount() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + const string groupName = "test_group"; + var stream1 = Me() + "a"; + var stream2 = Me() + "b"; + + var id1_1 = db.StreamAdd(stream1, "field1-1", "value1-1"); + var id1_2 = db.StreamAdd(stream1, "field1-2", "value1-2"); + + var id2_1 = db.StreamAdd(stream2, "field2-1", "value2-1"); + _ = db.StreamAdd(stream2, "field2-2", "value2-2"); + _ = db.StreamAdd(stream2, "field2-3", "value2-3"); + + // Set the initial read point in each stream, *after* the first ID in both streams. + db.StreamCreateConsumerGroup(stream1, groupName, id1_1); + db.StreamCreateConsumerGroup(stream2, groupName, id2_1); + + var pairs = new[] + { + // Read after the first id in both streams + new StreamPosition(stream1, StreamPosition.NewMessages), + new StreamPosition(stream2, StreamPosition.NewMessages), + }; + + // Restrict the count to 2 (expect only 1 message from first stream, 2 from the second). + var streams = db.StreamReadGroup(pairs, groupName, "test_consumer", 2); + + Assert.NotNull(streams); + Assert.Equal(2, streams.Length); + Assert.Single(streams[0].Entries); + Assert.Equal(2, streams[1].Entries.Length); + Assert.Equal(id1_2, streams[0].Entries[0].Id); + } + + [Fact] + public async Task StreamConsumerGroupViewPendingInfoNoConsumers() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + const string groupName = "test_group"; + + db.StreamAdd(key, "field1", "value1"); + + db.StreamCreateConsumerGroup(key, groupName, StreamPosition.Beginning); + + var pendingInfo = db.StreamPending(key, groupName); + + Assert.Equal(0, pendingInfo.PendingMessageCount); + Assert.Equal(RedisValue.Null, pendingInfo.LowestPendingMessageId); + Assert.Equal(RedisValue.Null, pendingInfo.HighestPendingMessageId); + Assert.NotNull(pendingInfo.Consumers); + Assert.Empty(pendingInfo.Consumers); + } + + [Fact] + public async Task StreamConsumerGroupViewPendingInfoWhenNothingPending() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + const string groupName = "test_group"; + + db.StreamAdd(key, "field1", "value1"); + + db.StreamCreateConsumerGroup(key, groupName, "0-0"); + + var pendingMessages = db.StreamPendingMessages( + key, + groupName, + 10, + consumerName: RedisValue.Null); + + Assert.NotNull(pendingMessages); + Assert.Empty(pendingMessages); + } + + [Fact] + public async Task StreamConsumerGroupViewPendingInfoSummary() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + const string groupName = "test_group", + consumer1 = "test_consumer_1", + consumer2 = "test_consumer_2"; + + var id1 = db.StreamAdd(key, "field1", "value1"); + db.StreamAdd(key, "field2", "value2"); + db.StreamAdd(key, "field3", "value3"); + var id4 = db.StreamAdd(key, "field4", "value4"); + + db.StreamCreateConsumerGroup(key, groupName, StreamPosition.Beginning); + + // Read a single message into the first consumer. + db.StreamReadGroup(key, groupName, consumer1, StreamPosition.NewMessages, 1); + + // Read the remaining messages into the second consumer. + db.StreamReadGroup(key, groupName, consumer2); + + var pendingInfo = db.StreamPending(key, groupName); + + Assert.Equal(4, pendingInfo.PendingMessageCount); + Assert.Equal(id1, pendingInfo.LowestPendingMessageId); + Assert.Equal(id4, pendingInfo.HighestPendingMessageId); + Assert.Equal(2, pendingInfo.Consumers.Length); + + var consumer1Count = pendingInfo.Consumers.First(c => c.Name == consumer1).PendingMessageCount; + var consumer2Count = pendingInfo.Consumers.First(c => c.Name == consumer2).PendingMessageCount; + + Assert.Equal(1, consumer1Count); + Assert.Equal(3, consumer2Count); + } + + [Fact] + public async Task StreamConsumerGroupViewPendingMessageInfo() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + const string groupName = "test_group", + consumer1 = "test_consumer_1", + consumer2 = "test_consumer_2"; + + var id1 = db.StreamAdd(key, "field1", "value1"); + db.StreamAdd(key, "field2", "value2"); + db.StreamAdd(key, "field3", "value3"); + db.StreamAdd(key, "field4", "value4"); + + db.StreamCreateConsumerGroup(key, groupName, StreamPosition.Beginning); + + // Read a single message into the first consumer. + db.StreamReadGroup(key, groupName, consumer1, count: 1); + + // Read the remaining messages into the second consumer. + _ = db.StreamReadGroup(key, groupName, consumer2) ?? throw new ArgumentNullException(nameof(consumer2), "db.StreamReadGroup(key, groupName, consumer2)"); + + await Task.Delay(10).ForAwait(); + + // Get the pending info about the messages themselves. + var pendingMessageInfoList = db.StreamPendingMessages(key, groupName, 10, RedisValue.Null); + + Assert.NotNull(pendingMessageInfoList); + Assert.Equal(4, pendingMessageInfoList.Length); + Assert.Equal(consumer1, pendingMessageInfoList[0].ConsumerName); + Assert.Equal(1, pendingMessageInfoList[0].DeliveryCount); + Assert.True((int)pendingMessageInfoList[0].IdleTimeInMilliseconds > 0); + Assert.Equal(id1, pendingMessageInfoList[0].MessageId); + } + + [Fact] + public async Task StreamConsumerGroupViewPendingMessageWithMinIdle() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var key = Me(); + const string groupName = "test_group", + consumer1 = "test_consumer_1"; + const int minIdleTimeInMs = 100; + + var id1 = db.StreamAdd(key, "field1", "value1"); + + db.StreamCreateConsumerGroup(key, groupName, StreamPosition.Beginning); + + // Read a single message into the first consumer. + db.StreamReadGroup(key, groupName, consumer1, count: 1); + + var preDelayPendingMessages = + db.StreamPendingMessages(key, groupName, 10, RedisValue.Null, minId: id1, maxId: id1, minIdleTimeInMs: minIdleTimeInMs); + + await Task.Delay(minIdleTimeInMs * 2).ForAwait(); + + var postDelayPendingMessages = + db.StreamPendingMessages(key, groupName, 10, RedisValue.Null, minId: id1, maxId: id1, minIdleTimeInMs: minIdleTimeInMs); + + Assert.NotNull(preDelayPendingMessages); + Assert.Empty(preDelayPendingMessages); + Assert.NotNull(postDelayPendingMessages); + Assert.Single(postDelayPendingMessages); + Assert.Equal(1, postDelayPendingMessages[0].DeliveryCount); + Assert.True((int)postDelayPendingMessages[0].IdleTimeInMilliseconds > minIdleTimeInMs); + Assert.Equal(id1, postDelayPendingMessages[0].MessageId); + } + + [Fact] + public async Task StreamConsumerGroupViewPendingMessageInfoForConsumer() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + const string groupName = "test_group", + consumer1 = "test_consumer_1", + consumer2 = "test_consumer_2"; + + db.StreamAdd(key, "field1", "value1"); + db.StreamAdd(key, "field2", "value2"); + db.StreamAdd(key, "field3", "value3"); + db.StreamAdd(key, "field4", "value4"); + + db.StreamCreateConsumerGroup(key, groupName, StreamPosition.Beginning); + + // Read a single message into the first consumer. + db.StreamReadGroup(key, groupName, consumer1, count: 1); + + // Read the remaining messages into the second consumer. + db.StreamReadGroup(key, groupName, consumer2); + + // Get the pending info about the messages themselves. + var pendingMessageInfoList = db.StreamPendingMessages( + key, + groupName, + 10, + consumer2); + + Assert.NotNull(pendingMessageInfoList); + Assert.Equal(3, pendingMessageInfoList.Length); + } + + [Fact] + public async Task StreamDeleteConsumer() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + const string groupName = "test_group", + consumer = "test_consumer"; + + // Add a message to create the stream. + db.StreamAdd(key, "field1", "value1"); + db.StreamAdd(key, "field2", "value2"); + + // Create a consumer group and read the message. + db.StreamCreateConsumerGroup(key, groupName, StreamPosition.Beginning); + db.StreamReadGroup(key, groupName, consumer, StreamPosition.NewMessages); + + var preDeleteConsumers = db.StreamConsumerInfo(key, groupName); + + // Delete the consumer. + var deleteResult = db.StreamDeleteConsumer(key, groupName, consumer); + + // Should get 2 messages in the deleteResult. + var postDeleteConsumers = db.StreamConsumerInfo(key, groupName); + + Assert.Equal(2, deleteResult); + Assert.Single(preDeleteConsumers); + Assert.Empty(postDeleteConsumers); + } + + [Fact] + public async Task StreamDeleteConsumerGroup() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + const string groupName = "test_group", + consumer = "test_consumer"; + + // Add a message to create the stream. + db.StreamAdd(key, "field1", "value1"); + + // Create a consumer group and read the messages. + db.StreamCreateConsumerGroup(key, groupName, StreamPosition.Beginning); + db.StreamReadGroup(key, groupName, consumer, StreamPosition.Beginning); + + var preDeleteInfo = db.StreamInfo(key); + + // Now delete the group. + var deleteResult = db.StreamDeleteConsumerGroup(key, groupName); + + var postDeleteInfo = db.StreamInfo(key); + + Assert.True(deleteResult); + Assert.Equal(1, preDeleteInfo.ConsumerGroupCount); + Assert.Equal(0, postDeleteInfo.ConsumerGroupCount); + } + + [Fact] + public async Task StreamDeleteMessage() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + + db.StreamAdd(key, "field1", "value1"); + db.StreamAdd(key, "field2", "value2"); + var id3 = db.StreamAdd(key, "field3", "value3"); + db.StreamAdd(key, "field4", "value4"); + + var deletedCount = db.StreamDelete(key, [id3]); + var messages = db.StreamRange(key); + + Assert.Equal(1, deletedCount); + Assert.Equal(3, messages.Length); + } + + [Fact] + public async Task StreamDeleteMessages() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + + db.StreamAdd(key, "field1", "value1"); + var id2 = db.StreamAdd(key, "field2", "value2"); + var id3 = db.StreamAdd(key, "field3", "value3"); + db.StreamAdd(key, "field4", "value4"); + + var deletedCount = db.StreamDelete(key, [id2, id3], CommandFlags.None); + var messages = db.StreamRange(key); + + Assert.Equal(2, deletedCount); + Assert.Equal(2, messages.Length); + } + + [Theory] + [InlineData(StreamTrimMode.KeepReferences)] + [InlineData(StreamTrimMode.DeleteReferences)] + [InlineData(StreamTrimMode.Acknowledged)] + public void StreamDeleteExMessage(StreamTrimMode mode) + { + using var conn = Create(require: RedisFeatures.v8_2_0_rc1); // XDELEX + + var db = conn.GetDatabase(); + var key = Me() + ":" + mode; + + db.StreamAdd(key, "field1", "value1"); + db.StreamAdd(key, "field2", "value2"); + var id3 = db.StreamAdd(key, "field3", "value3"); + db.StreamAdd(key, "field4", "value4"); + + var deleted = db.StreamDelete(key, new[] { id3 }, mode: mode); + var messages = db.StreamRange(key); + + Assert.Equal(StreamTrimResult.Deleted, Assert.Single(deleted)); + Assert.Equal(3, messages.Length); + } + + [Theory] + [InlineData(StreamTrimMode.KeepReferences)] + [InlineData(StreamTrimMode.DeleteReferences)] + [InlineData(StreamTrimMode.Acknowledged)] + public void StreamDeleteExMessages(StreamTrimMode mode) + { + using var conn = Create(require: RedisFeatures.v8_2_0_rc1); // XDELEX + + var db = conn.GetDatabase(); + var key = Me() + ":" + mode; + + db.StreamAdd(key, "field1", "value1"); + var id2 = db.StreamAdd(key, "field2", "value2"); + var id3 = db.StreamAdd(key, "field3", "value3"); + db.StreamAdd(key, "field4", "value4"); + + var deleted = db.StreamDelete(key, new[] { id2, id3 }, mode: mode); + var messages = db.StreamRange(key); + + Assert.Equal(2, deleted.Length); + Assert.Equal(StreamTrimResult.Deleted, deleted[0]); + Assert.Equal(StreamTrimResult.Deleted, deleted[1]); + Assert.Equal(2, messages.Length); + } + + [Fact] + public async Task StreamGroupInfoGet() + { + var key = Me(); + const string group1 = "test_group_1", + group2 = "test_group_2", + consumer1 = "test_consumer_1", + consumer2 = "test_consumer_2"; + + await using (var conn = Create(require: RedisFeatures.v5_0_0)) + { + var db = conn.GetDatabase(); + db.KeyDelete(key); + + db.StreamAdd(key, "field1", "value1"); + db.StreamAdd(key, "field2", "value2"); + db.StreamAdd(key, "field3", "value3"); + db.StreamAdd(key, "field4", "value4"); + + db.StreamCreateConsumerGroup(key, group1, StreamPosition.Beginning); + db.StreamCreateConsumerGroup(key, group2, StreamPosition.Beginning); + + var groupInfoList = db.StreamGroupInfo(key); + Assert.Equal(0, groupInfoList[0].EntriesRead); + Assert.Equal(4, groupInfoList[0].Lag); + Assert.Equal(0, groupInfoList[0].EntriesRead); + Assert.Equal(4, groupInfoList[1].Lag); + + // Read a single message into the first consumer. + db.StreamReadGroup(key, group1, consumer1, count: 1); + + // Read the remaining messages into the second consumer. + db.StreamReadGroup(key, group2, consumer2); + + groupInfoList = db.StreamGroupInfo(key); + + Assert.NotNull(groupInfoList); + Assert.Equal(2, groupInfoList.Length); + + Assert.Equal(group1, groupInfoList[0].Name); + Assert.Equal(1, groupInfoList[0].PendingMessageCount); + Assert.True(IsMessageId(groupInfoList[0].LastDeliveredId)); // can't test actual - will vary + Assert.Equal(1, groupInfoList[0].EntriesRead); + Assert.Equal(3, groupInfoList[0].Lag); + + Assert.Equal(group2, groupInfoList[1].Name); + Assert.Equal(4, groupInfoList[1].PendingMessageCount); + Assert.True(IsMessageId(groupInfoList[1].LastDeliveredId)); // can't test actual - will vary + Assert.Equal(4, groupInfoList[1].EntriesRead); + Assert.Equal(0, groupInfoList[1].Lag); + } + + static bool IsMessageId(string? value) + { + if (value.IsNullOrWhiteSpace()) return false; + return value.Length >= 3 && value.Contains('-'); + } + } + + [Fact] + public async Task StreamGroupConsumerInfoGet() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + const string group = "test_group", + consumer1 = "test_consumer_1", + consumer2 = "test_consumer_2"; + + db.StreamAdd(key, "field1", "value1"); + db.StreamAdd(key, "field2", "value2"); + db.StreamAdd(key, "field3", "value3"); + db.StreamAdd(key, "field4", "value4"); + + db.StreamCreateConsumerGroup(key, group, StreamPosition.Beginning); + db.StreamReadGroup(key, group, consumer1, count: 1); + db.StreamReadGroup(key, group, consumer2); + + var consumerInfoList = db.StreamConsumerInfo(key, group); + + Assert.NotNull(consumerInfoList); + Assert.Equal(2, consumerInfoList.Length); + + Assert.Equal(consumer1, consumerInfoList[0].Name); + Assert.Equal(consumer2, consumerInfoList[1].Name); + + Assert.Equal(1, consumerInfoList[0].PendingMessageCount); + Assert.Equal(3, consumerInfoList[1].PendingMessageCount); + } + + [Fact] + public async Task StreamInfoGet() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + + var id1 = db.StreamAdd(key, "field1", "value1"); + db.StreamAdd(key, "field2", "value2"); + var id3 = db.StreamAdd(key, "field3", "value3"); + db.StreamAdd(key, "field4", "value4"); + var id5 = db.StreamAdd(key, "field5", "value5"); + db.StreamDelete(key, [id3]); + var streamInfo = db.StreamInfo(key); + + Assert.Equal(4, streamInfo.Length); + Assert.True(streamInfo.RadixTreeKeys > 0); + Assert.True(streamInfo.RadixTreeNodes > 0); + Assert.Equal(id1, streamInfo.FirstEntry.Id); + Assert.Equal(id5, streamInfo.LastEntry.Id); + + var server = conn.GetServer(conn.GetEndPoints().First()); + Log($"server version: {server.Version}"); + if (server.Version.IsAtLeast(RedisFeatures.v7_0_0_rc1)) + { + Assert.Equal(id3, streamInfo.MaxDeletedEntryId); + Assert.Equal(5, streamInfo.EntriesAdded); + Assert.False(streamInfo.RecordedFirstEntryId.IsNull); + } + else + { + Assert.True(streamInfo.MaxDeletedEntryId.IsNull); + Assert.Equal(-1, streamInfo.EntriesAdded); + Assert.True(streamInfo.RecordedFirstEntryId.IsNull); + } + + if (server.Version.IsAtLeast(RedisFeatures.v8_6_0)) + { + Assert.True(streamInfo.IdmpDuration > 0); + Assert.True(streamInfo.IdmpMaxSize > 0); + Assert.Equal(0, streamInfo.PidsTracked); + Assert.Equal(0, streamInfo.IidsTracked); + Assert.Equal(0, streamInfo.IidsDuplicates); + Assert.Equal(0, streamInfo.IidsAdded); + } + else + { + Assert.Equal(-1, streamInfo.IdmpDuration); + Assert.Equal(-1, streamInfo.IdmpMaxSize); + Assert.Equal(-1, streamInfo.PidsTracked); + Assert.Equal(-1, streamInfo.IidsTracked); + Assert.Equal(-1, streamInfo.IidsDuplicates); + Assert.Equal(-1, streamInfo.IidsAdded); + } + } + + [Fact] + public async Task StreamInfoGetWithEmptyStream() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + + // Add an entry and then delete it so the stream is empty, then run streaminfo + // to ensure it functions properly on an empty stream. Namely, the first-entry + // and last-entry messages should be null. + var id = db.StreamAdd(key, "field1", "value1"); + db.StreamDelete(key, [id]); + + Assert.Equal(0, db.StreamLength(key)); + + var streamInfo = db.StreamInfo(key); + + Assert.True(streamInfo.FirstEntry.IsNull); + Assert.True(streamInfo.LastEntry.IsNull); + } + + [Fact] + public async Task StreamNoConsumerGroups() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + + db.StreamAdd(key, "field1", "value1"); + + var groups = db.StreamGroupInfo(key); + + Assert.NotNull(groups); + Assert.Empty(groups); + } + + [Fact] + public async Task StreamPendingNoMessagesOrConsumers() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + const string groupName = "test_group"; + + var id = db.StreamAdd(key, "field1", "value1"); + db.StreamDelete(key, [id]); + + db.StreamCreateConsumerGroup(key, groupName, "0-0"); + + var pendingInfo = db.StreamPending(key, "test_group"); + + Assert.Equal(0, pendingInfo.PendingMessageCount); + Assert.Equal(RedisValue.Null, pendingInfo.LowestPendingMessageId); + Assert.Equal(RedisValue.Null, pendingInfo.HighestPendingMessageId); + Assert.NotNull(pendingInfo.Consumers); + Assert.Empty(pendingInfo.Consumers); + } + + [Fact] + public void StreamPositionDefaultValueIsBeginning() + { + RedisValue position = StreamPosition.Beginning; + Assert.Equal(StreamConstants.AllMessages, StreamPosition.Resolve(position, RedisCommand.XREAD)); + Assert.Equal(StreamConstants.AllMessages, StreamPosition.Resolve(position, RedisCommand.XREADGROUP)); + Assert.Equal(StreamConstants.AllMessages, StreamPosition.Resolve(position, RedisCommand.XGROUP)); + } + + [Fact] + public void StreamPositionValidateBeginning() + { + var position = StreamPosition.Beginning; + + Assert.Equal(StreamConstants.AllMessages, StreamPosition.Resolve(position, RedisCommand.XREAD)); + } + + [Fact] + public void StreamPositionValidateExplicit() + { + const string explicitValue = "1-0"; + const string position = explicitValue; + + Assert.Equal(explicitValue, StreamPosition.Resolve(position, RedisCommand.XREAD)); + } + + [Fact] + public void StreamPositionValidateNew() + { + var position = StreamPosition.NewMessages; + + Assert.Equal(StreamConstants.NewMessages, StreamPosition.Resolve(position, RedisCommand.XGROUP)); + Assert.Equal(StreamConstants.UndeliveredMessages, StreamPosition.Resolve(position, RedisCommand.XREADGROUP)); + Assert.ThrowsAny(() => StreamPosition.Resolve(position, RedisCommand.XREAD)); + } + + [Fact] + public async Task StreamRead() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + + var id1 = db.StreamAdd(key, "field1", "value1"); + var id2 = db.StreamAdd(key, "field2", "value2"); + var id3 = db.StreamAdd(key, "field3", "value3"); + + // Read the entire stream from the beginning. + var entries = db.StreamRead(key, "0-0"); + + Assert.Equal(3, entries.Length); + Assert.Equal(id1, entries[0].Id); + Assert.Equal(id2, entries[1].Id); + Assert.Equal(id3, entries[2].Id); + } + + [Fact] + public async Task StreamReadEmptyStream() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + + // Write to a stream to create the key. + var id1 = db.StreamAdd(key, "field1", "value1"); + + // Delete the key to empty the stream. + db.StreamDelete(key, [id1]); + var len = db.StreamLength(key); + + // Read the entire stream from the beginning. + var entries = db.StreamRead(key, "0-0"); + + Assert.Empty(entries); + Assert.Equal(0, len); + } + + [Fact] + public async Task StreamReadEmptyStreams() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key1 = Me() + "a"; + var key2 = Me() + "b"; + + // Write to a stream to create the key. + var id1 = db.StreamAdd(key1, "field1", "value1"); + var id2 = db.StreamAdd(key2, "field2", "value2"); + + // Delete the key to empty the stream. + db.StreamDelete(key1, [id1]); + db.StreamDelete(key2, [id2]); + + var len1 = db.StreamLength(key1); + var len2 = db.StreamLength(key2); + + // Read the entire stream from the beginning. + var entries1 = db.StreamRead(key1, "0-0"); + var entries2 = db.StreamRead(key2, "0-0"); + + Assert.Empty(entries1); + Assert.Empty(entries2); + + Assert.Equal(0, len1); + Assert.Equal(0, len2); + } + + [Fact] + public async Task StreamReadLastMessage() + { + await using var conn = Create(require: RedisFeatures.v7_4_0_rc1); + var db = conn.GetDatabase(); + var key1 = Me(); + + // Read the entire stream from the beginning. + db.StreamRead(key1, "0-0"); + db.StreamAdd(key1, "field2", "value2"); + db.StreamAdd(key1, "fieldLast", "valueLast"); + var entries = db.StreamRead(key1, "+"); + + Assert.NotNull(entries); + Assert.True(entries.Length > 0); + Assert.Equal(new[] { new NameValueEntry("fieldLast", "valueLast") }, entries[0].Values); + } + + [Fact] + public async Task StreamReadExpectedExceptionInvalidCountMultipleStream() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var streamPositions = new[] + { + new StreamPosition("key1", "0-0"), + new StreamPosition("key2", "0-0"), + }; + Assert.Throws(() => db.StreamRead(streamPositions, 0)); + } + + [Fact] + public async Task StreamReadExpectedExceptionInvalidCountSingleStream() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + Assert.Throws(() => db.StreamRead(key, "0-0", 0)); + } + + [Fact] + public async Task StreamReadExpectedExceptionNullStreamList() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + Assert.Throws(() => db.StreamRead(null!)); + } + + [Fact] + public async Task StreamReadExpectedExceptionEmptyStreamList() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var emptyList = Array.Empty(); + Assert.Throws(() => db.StreamRead(emptyList)); + } + + [Fact] + public async Task StreamReadMultipleStreams() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key1 = Me() + "a"; + var key2 = Me() + "b"; + + var id1 = db.StreamAdd(key1, "field1", "value1"); + var id2 = db.StreamAdd(key1, "field2", "value2"); + var id3 = db.StreamAdd(key2, "field3", "value3"); + var id4 = db.StreamAdd(key2, "field4", "value4"); + + // Read from both streams at the same time. + var streamList = new[] + { + new StreamPosition(key1, "0-0"), + new StreamPosition(key2, "0-0"), + }; + + var streams = db.StreamRead(streamList); + + Assert.Equal(2, streams.Length); + + Assert.Equal(key1, streams[0].Key); + Assert.Equal(2, streams[0].Entries.Length); + Assert.Equal(id1, streams[0].Entries[0].Id); + Assert.Equal(id2, streams[0].Entries[1].Id); + + Assert.Equal(key2, streams[1].Key); + Assert.Equal(2, streams[1].Entries.Length); + Assert.Equal(id3, streams[1].Entries[0].Id); + Assert.Equal(id4, streams[1].Entries[1].Id); + } + + [Fact] + public async Task StreamReadMultipleStreamsLastMessage() + { + await using var conn = Create(require: RedisFeatures.v7_4_0_rc1); + + var db = conn.GetDatabase(); + var key1 = Me() + "a"; + var key2 = Me() + "b"; + + var id1 = db.StreamAdd(key1, "field1", "value1"); + var id2 = db.StreamAdd(key1, "field2", "value2"); + var id3 = db.StreamAdd(key2, "field3", "value3"); + var id4 = db.StreamAdd(key2, "field4", "value4"); + + var streamList = new[] { new StreamPosition(key1, "0-0"), new StreamPosition(key2, "0-0") }; + db.StreamRead(streamList); + + var streams = db.StreamRead(streamList); + + db.StreamAdd(key1, "field5", "value5"); + db.StreamAdd(key1, "field6", "value6"); + db.StreamAdd(key2, "field7", "value7"); + db.StreamAdd(key2, "field8", "value8"); + + streamList = [new StreamPosition(key1, "+"), new StreamPosition(key2, "+")]; + + streams = db.StreamRead(streamList); + + Assert.NotNull(streams); + Assert.Equal(2, streams.Length); + + var stream1 = streams.Where(e => e.Key == key1).First(); + Assert.NotNull(stream1.Entries); + Assert.True(stream1.Entries.Length > 0); + Assert.Equal(new[] { new NameValueEntry("field6", "value6") }, stream1.Entries[0].Values); + + var stream2 = streams.Where(e => e.Key == key2).First(); + Assert.NotNull(stream2.Entries); + Assert.True(stream2.Entries.Length > 0); + Assert.Equal(new[] { new NameValueEntry("field8", "value8") }, stream2.Entries[0].Values); + } + + [Fact] + public async Task StreamReadMultipleStreamsWithCount() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key1 = Me() + "a"; + var key2 = Me() + "b"; + + var id1 = db.StreamAdd(key1, "field1", "value1"); + db.StreamAdd(key1, "field2", "value2"); + var id3 = db.StreamAdd(key2, "field3", "value3"); + db.StreamAdd(key2, "field4", "value4"); + + var streamList = new[] + { + new StreamPosition(key1, "0-0"), + new StreamPosition(key2, "0-0"), + }; + + var streams = db.StreamRead(streamList, countPerStream: 1); + + // We should get both streams back. + Assert.Equal(2, streams.Length); + + // Ensure we only got one message per stream. + Assert.Single(streams[0].Entries); + Assert.Single(streams[1].Entries); + + // Check the message IDs as well. + Assert.Equal(id1, streams[0].Entries[0].Id); + Assert.Equal(id3, streams[1].Entries[0].Id); + } + + [Fact] + public async Task StreamReadMultipleStreamsWithReadPastSecondStream() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key1 = Me() + "a"; + var key2 = Me() + "b"; + + db.StreamAdd(key1, "field1", "value1"); + db.StreamAdd(key1, "field2", "value2"); + db.StreamAdd(key2, "field3", "value3"); + var id4 = db.StreamAdd(key2, "field4", "value4"); + + var streamList = new[] + { + new StreamPosition(key1, "0-0"), + + // read past the end of stream # 2 + new StreamPosition(key2, id4), + }; + + var streams = db.StreamRead(streamList); + + // We should only get the first stream back. + Assert.Single(streams); + + Assert.Equal(key1, streams[0].Key); + Assert.Equal(2, streams[0].Entries.Length); + } + + [Fact] + public async Task StreamReadMultipleStreamsWithEmptyResponse() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key1 = Me() + "a"; + var key2 = Me() + "b"; + + db.StreamAdd(key1, "field1", "value1"); + var id2 = db.StreamAdd(key1, "field2", "value2"); + db.StreamAdd(key2, "field3", "value3"); + var id4 = db.StreamAdd(key2, "field4", "value4"); + + var streamList = new[] + { + // Read past the end of both streams. + new StreamPosition(key1, id2), + new StreamPosition(key2, id4), + }; + + var streams = db.StreamRead(streamList); + + // We expect an empty response. + Assert.Empty(streams); + } + + [Fact] + public async Task StreamReadPastEndOfStream() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + + db.StreamAdd(key, "field1", "value1"); + var id2 = db.StreamAdd(key, "field2", "value2"); + + // Read after the final ID in the stream, we expect an empty array as a response. + var entries = db.StreamRead(key, id2); + + Assert.Empty(entries); + } + + [Fact] + public async Task StreamReadRange() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + + var id1 = db.StreamAdd(key, "field1", "value1"); + var id2 = db.StreamAdd(key, "field2", "value2"); + + var entries = db.StreamRange(key); + + Assert.Equal(2, entries.Length); + Assert.Equal(id1, entries[0].Id); + Assert.Equal(id2, entries[1].Id); + } + + [Fact] + public async Task StreamReadRangeOfEmptyStream() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + + var id1 = db.StreamAdd(key, "field1", "value1"); + var id2 = db.StreamAdd(key, "field2", "value2"); + + var deleted = db.StreamDelete(key, [id1, id2]); + + var entries = db.StreamRange(key); + + Assert.Equal(2, deleted); + Assert.NotNull(entries); + Assert.Empty(entries); + } + + [Fact] + public async Task StreamReadRangeWithCount() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + + var id1 = db.StreamAdd(key, "field1", "value1"); + db.StreamAdd(key, "field2", "value2"); + + var entries = db.StreamRange(key, count: 1); + + Assert.Single(entries); + Assert.Equal(id1, entries[0].Id); + } + + [Fact] + public async Task StreamReadRangeReverse() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + + var id1 = db.StreamAdd(key, "field1", "value1"); + var id2 = db.StreamAdd(key, "field2", "value2"); + + var entries = db.StreamRange(key, messageOrder: Order.Descending); + + Assert.Equal(2, entries.Length); + Assert.Equal(id2, entries[0].Id); + Assert.Equal(id1, entries[1].Id); + } + + [Fact] + public async Task StreamReadRangeReverseWithCount() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + + var id1 = db.StreamAdd(key, "field1", "value1"); + var id2 = db.StreamAdd(key, "field2", "value2"); + + var entries = db.StreamRange(key, id1, id2, 1, Order.Descending); + + Assert.Single(entries); + Assert.Equal(id2, entries[0].Id); + } + + [Fact] + public async Task StreamReadWithAfterIdAndCount_1() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + + var id1 = db.StreamAdd(key, "field1", "value1"); + var id2 = db.StreamAdd(key, "field2", "value2"); + db.StreamAdd(key, "field3", "value3"); + + // Only read a single item from the stream. + var entries = db.StreamRead(key, id1, 1); + + Assert.Single(entries); + Assert.Equal(id2, entries[0].Id); + } + + [Fact] + public async Task StreamReadWithAfterIdAndCount_2() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + + var id1 = db.StreamAdd(key, "field1", "value1"); + var id2 = db.StreamAdd(key, "field2", "value2"); + var id3 = db.StreamAdd(key, "field3", "value3"); + db.StreamAdd(key, "field4", "value4"); + + // Read multiple items from the stream. + var entries = db.StreamRead(key, id1, 2); + + Assert.Equal(2, entries.Length); + Assert.Equal(id2, entries[0].Id); + Assert.Equal(id3, entries[1].Id); + } + + protected override string GetConfiguration() => "127.0.0.1:6379"; + + [Fact] + public async Task StreamTrimLength() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + + // Add a couple items and check length. + db.StreamAdd(key, "field1", "value1"); + db.StreamAdd(key, "field2", "value2"); + db.StreamAdd(key, "field3", "value3"); + db.StreamAdd(key, "field4", "value4"); + + var numRemoved = db.StreamTrim(key, 1); + var len = db.StreamLength(key); + + Assert.Equal(3, numRemoved); + Assert.Equal(1, len); + } + + private static Version ForMode(StreamTrimMode mode, Version? defaultVersion = null) => mode switch + { + StreamTrimMode.KeepReferences => defaultVersion ?? RedisFeatures.v5_0_0, + StreamTrimMode.Acknowledged => RedisFeatures.v8_2_0_rc1, + StreamTrimMode.DeleteReferences => RedisFeatures.v8_2_0_rc1, + _ => throw new ArgumentOutOfRangeException(nameof(mode)), + }; + + [Theory] + [InlineData(StreamTrimMode.KeepReferences)] + [InlineData(StreamTrimMode.DeleteReferences)] + [InlineData(StreamTrimMode.Acknowledged)] + public void StreamTrimByMinId(StreamTrimMode mode) + { + using var conn = Create(require: ForMode(mode, RedisFeatures.v6_2_0)); + + var db = conn.GetDatabase(); + var key = Me() + ":" + mode; + + // Add a couple items and check length. + db.StreamAdd(key, "field1", "value1", 1111111110); + db.StreamAdd(key, "field2", "value2", 1111111111); + db.StreamAdd(key, "field3", "value3", 1111111112); + + var numRemoved = db.StreamTrimByMinId(key, 1111111111, mode: mode); + var len = db.StreamLength(key); + + Assert.Equal(1, numRemoved); + Assert.Equal(2, len); + } + +#pragma warning disable xUnit1004 + [Theory(Skip = "Flaky")] +#pragma warning restore xUnit1004 + [InlineData(StreamTrimMode.KeepReferences)] + [InlineData(StreamTrimMode.DeleteReferences)] + [InlineData(StreamTrimMode.Acknowledged)] + public void StreamTrimByMinIdWithApproximateAndLimit(StreamTrimMode mode) + { + using var conn = Create(require: ForMode(mode, RedisFeatures.v6_2_0)); + + var db = conn.GetDatabase(); + var key = Me() + ":" + mode; + + const int maxLength = 100; + const int limit = 10; + + // The behavior of ACKED etc is undefined when there are no consumer groups; or rather, + // it *is* defined, but it is defined/implemented differently < and >= server 8.6 + // This *does* have the side-effect that the 3 modes behave the same in this test, + // but: we're trying to test the API, not the server. + const string groupName = "test_group", consumer = "consumer"; + db.StreamCreateConsumerGroup(key, groupName, StreamPosition.NewMessages); + for (var i = 0; i < maxLength; i++) + { + db.StreamAdd(key, $"field", $"value", 1111111110 + i); + } + + var entries = db.StreamReadGroup( + key, + groupName, + consumer, + StreamPosition.NewMessages); + + Assert.Equal(maxLength, entries.Length); + + var numRemoved = db.StreamTrimByMinId(key, 1111111110 + maxLength, useApproximateMaxLength: true, limit: limit, mode: mode); + const int EXPECT_REMOVED = 0; + var len = db.StreamLength(key); + + Assert.Equal(EXPECT_REMOVED, numRemoved); + Assert.Equal(maxLength - EXPECT_REMOVED, len); + } + + [Fact] + public async Task StreamVerifyLength() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + + // Add a couple items and check length. + db.StreamAdd(key, "field1", "value1"); + db.StreamAdd(key, "field2", "value2"); + + var len = db.StreamLength(key); + + Assert.Equal(2, len); + } + + [Fact] + public async Task AddWithApproxCountAsync() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + await db.StreamAddAsync(key, "field", "value", maxLength: 10, useApproximateMaxLength: true, flags: CommandFlags.None).ConfigureAwait(false); + } + + [Theory] + [InlineData(StreamTrimMode.KeepReferences)] + [InlineData(StreamTrimMode.DeleteReferences)] + [InlineData(StreamTrimMode.Acknowledged)] + public async Task AddWithApproxCount(StreamTrimMode mode) + { + await using var conn = Create(require: ForMode(mode)); + + var db = conn.GetDatabase(); + var key = Me() + ":" + mode; + db.StreamAdd(key, "field", "value", maxLength: 10, useApproximateMaxLength: true, trimMode: mode, flags: CommandFlags.None); + } + + [Theory] + [InlineData(StreamTrimMode.KeepReferences, 1)] + [InlineData(StreamTrimMode.DeleteReferences, 1)] + [InlineData(StreamTrimMode.Acknowledged, 1)] + [InlineData(StreamTrimMode.KeepReferences, 2)] + [InlineData(StreamTrimMode.DeleteReferences, 2)] + [InlineData(StreamTrimMode.Acknowledged, 2)] + public async Task AddWithMultipleApproxCount(StreamTrimMode mode, int count) + { + await using var conn = Create(require: ForMode(mode)); + + var db = conn.GetDatabase(); + var key = Me() + ":" + mode; + + var pairs = new NameValueEntry[count]; + for (var i = 0; i < count; i++) + { + pairs[i] = new NameValueEntry($"field{i}", $"value{i}"); + } + db.StreamAdd(key, maxLength: 10, useApproximateMaxLength: true, trimMode: mode, flags: CommandFlags.None, streamPairs: pairs); + } + + [Fact] + public async Task StreamReadGroupWithNoAckShowsNoPendingMessages() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + const string groupName = "test_group", + consumer = "consumer"; + + db.StreamAdd(key, "field1", "value1"); + db.StreamAdd(key, "field2", "value2"); + + db.StreamCreateConsumerGroup(key, groupName, StreamPosition.NewMessages); + + db.StreamReadGroup( + key, + groupName, + consumer, + StreamPosition.NewMessages, + noAck: true); + + var pendingInfo = db.StreamPending(key, groupName); + + Assert.Equal(0, pendingInfo.PendingMessageCount); + } + + [Fact] + public async Task StreamReadGroupMultiStreamWithNoAckShowsNoPendingMessages() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key1 = Me() + "a"; + var key2 = Me() + "b"; + const string groupName = "test_group", + consumer = "consumer"; + + db.StreamAdd(key1, "field1", "value1"); + db.StreamAdd(key1, "field2", "value2"); + + db.StreamAdd(key2, "field3", "value3"); + db.StreamAdd(key2, "field4", "value4"); + + db.StreamCreateConsumerGroup(key1, groupName, StreamPosition.NewMessages); + db.StreamCreateConsumerGroup(key2, groupName, StreamPosition.NewMessages); + + db.StreamReadGroup( + [ + new StreamPosition(key1, StreamPosition.NewMessages), + new StreamPosition(key2, StreamPosition.NewMessages), + ], + groupName, + consumer, + noAck: true); + + var pending1 = db.StreamPending(key1, groupName); + var pending2 = db.StreamPending(key2, groupName); + + Assert.Equal(0, pending1.PendingMessageCount); + Assert.Equal(0, pending2.PendingMessageCount); + } + + [Fact] + public async Task StreamReadIndexerUsage() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var streamName = Me(); + + await db.StreamAddAsync( + streamName, + [ + new NameValueEntry("x", "blah"), + new NameValueEntry("msg", /*lang=json,strict*/ @"{""name"":""test"",""id"":123}"), + new NameValueEntry("y", "more blah"), + ]); + + var streamResult = await db.StreamRangeAsync(streamName, count: 1000); + var evntJson = streamResult + .Select(x => (dynamic?)JsonConvert.DeserializeObject(x["msg"]!)) + .ToList(); + var obj = Assert.Single(evntJson); + Assert.Equal(123, (int)obj!.id); + Assert.Equal("test", (string)obj.name); + } + + [Fact] + public async Task StreamConsumerGroupInfoLagIsNull() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + const string groupName = "test_group", + consumer = "consumer"; + + await db.StreamCreateConsumerGroupAsync(key, groupName); + await db.StreamReadGroupAsync(key, groupName, consumer, "0-0", 1); + await db.StreamAddAsync(key, "field1", "value1"); + await db.StreamAddAsync(key, "field1", "value1"); + + var streamInfo = await db.StreamInfoAsync(key); + await db.StreamDeleteAsync(key, new[] { streamInfo.LastEntry.Id }); + + Assert.Null((await db.StreamGroupInfoAsync(key))[0].Lag); + } + + [Fact] + public async Task StreamConsumerGroupInfoLagIsTwo() + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + var db = conn.GetDatabase(); + var key = Me(); + const string groupName = "test_group", + consumer = "consumer"; + + await db.StreamCreateConsumerGroupAsync(key, groupName); + await db.StreamReadGroupAsync(key, groupName, consumer, "0-0", 1); + await db.StreamAddAsync(key, "field1", "value1"); + await db.StreamAddAsync(key, "field1", "value1"); + + Assert.Equal(2, (await db.StreamGroupInfoAsync(key))[0].Lag); + } +} diff --git a/tests/StackExchange.Redis.Tests/Streams.cs b/tests/StackExchange.Redis.Tests/Streams.cs deleted file mode 100644 index 22a0fa944..000000000 --- a/tests/StackExchange.Redis.Tests/Streams.cs +++ /dev/null @@ -1,1826 +0,0 @@ -using System; -using System.Linq; -using System.Threading.Tasks; -using Newtonsoft.Json; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - [Collection(SharedConnectionFixture.Key)] - public class Streams : TestBase - { - public Streams(ITestOutputHelper output, SharedConnectionFixture fixture) : base(output, fixture) { } - - [Fact] - public void IsStreamType() - { - using (var conn = Create()) - { - var key = GetUniqueKey("type_check"); - - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - db.StreamAdd(key, "field1", "value1"); - - var keyType = db.KeyType(key); - - Assert.Equal(RedisType.Stream, keyType); - } - } - - [Fact] - public void StreamAddSinglePairWithAutoId() - { - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - var messageId = db.StreamAdd(GetUniqueKey("auto_id"), "field1", "value1"); - - Assert.True(messageId != RedisValue.Null && ((string)messageId).Length > 0); - } - } - - [Fact] - public void StreamAddMultipleValuePairsWithAutoId() - { - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var key = GetUniqueKey("multiple_value_pairs"); - - var fields = new [] - { - new NameValueEntry("field1", "value1"), - new NameValueEntry("field2", "value2") - }; - - var db = conn.GetDatabase(); - var messageId = db.StreamAdd(key, fields); - - var entries = db.StreamRange(key); - - Assert.Single(entries); - Assert.Equal(messageId, entries[0].Id); - Assert.Equal(2, entries[0].Values.Length); - Assert.Equal("field1", entries[0].Values[0].Name); - Assert.Equal("value1", entries[0].Values[0].Value); - Assert.Equal("field2", entries[0].Values[1].Name); - Assert.Equal("value2", entries[0].Values[1].Value); - } - } - - [Fact] - public void StreamAddWithManualId() - { - const string id = "42-0"; - var key = GetUniqueKey("manual_id"); - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - var messageId = db.StreamAdd(key, "field1", "value1", id); - - Assert.Equal(id, messageId); - } - } - - [Fact] - public void StreamAddMultipleValuePairsWithManualId() - { - const string id = "42-0"; - var key = GetUniqueKey("manual_id_multiple_values"); - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - var fields = new [] - { - new NameValueEntry("field1", "value1"), - new NameValueEntry("field2", "value2") - }; - - var messageId = db.StreamAdd(key, fields, id); - var entries = db.StreamRange(key); - - Assert.Equal(id, messageId); - Assert.NotNull(entries); - Assert.Single(entries); - Assert.Equal(id, entries[0].Id); - } - } - - [Fact] - public void StreamConsumerGroupSetId() - { - var key = GetUniqueKey("group_set_id"); - const string groupName = "test_group"; - const string consumer = "consumer"; - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - // Create a stream - db.StreamAdd(key, "field1", "value1"); - db.StreamAdd(key, "field2", "value2"); - - // Create a group and set the position to deliver new messages only. - db.StreamCreateConsumerGroup(key, groupName, StreamPosition.NewMessages); - - // Read into the group, expect nothing - var firstRead = db.StreamReadGroup(key, groupName, consumer, StreamPosition.NewMessages); - - // Reset the ID back to read from the beginning. - db.StreamConsumerGroupSetPosition(key, groupName, StreamPosition.Beginning); - - var secondRead = db.StreamReadGroup(key, groupName, consumer, StreamPosition.NewMessages); - - Assert.NotNull(firstRead); - Assert.NotNull(secondRead); - Assert.Empty(firstRead); - Assert.Equal(2, secondRead.Length); - } - } - - [Fact] - public void StreamConsumerGroupWithNoConsumers() - { - var key = GetUniqueKey("group_with_no_consumers"); - const string groupName = "test_group"; - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - // Create a stream - db.StreamAdd(key, "field1", "value1"); - - // Create a group - db.StreamCreateConsumerGroup(key, groupName, "0-0"); - - // Query redis for the group consumers, expect an empty list in response. - var consumers = db.StreamConsumerInfo(key, groupName); - - Assert.Empty(consumers); - } - } - - [Fact] - public void StreamCreateConsumerGroup() - { - var key = GetUniqueKey("group_create"); - const string groupName = "test_group"; - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - // Create a stream - db.StreamAdd(key, "field1", "value1"); - - // Create a group - var result = db.StreamCreateConsumerGroup(key, groupName, StreamPosition.Beginning); - - Assert.True(result); - } - } - - [Fact] - public void StreamCreateConsumerGroupBeforeCreatingStream() - { - var key = GetUniqueKey("group_create_before_stream"); - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - // Ensure the key doesn't exist. - var keyExistsBeforeCreate = db.KeyExists(key); - - // The 'createStream' parameter is 'true' by default. - var groupCreated = db.StreamCreateConsumerGroup(key, "consumerGroup", StreamPosition.NewMessages); - - var keyExistsAfterCreate = db.KeyExists(key); - - Assert.False(keyExistsBeforeCreate); - Assert.True(groupCreated); - Assert.True(keyExistsAfterCreate); - } - } - - [Fact] - public void StreamCreateConsumerGroupFailsIfKeyDoesntExist() - { - var key = GetUniqueKey("group_create_before_stream_should_fail"); - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - // Pass 'false' for 'createStream' to ensure that an - // execption is thrown when the stream doesn't exist. - Assert.ThrowsAny(() => - { - db.StreamCreateConsumerGroup( - key, - "consumerGroup", - StreamPosition.NewMessages, - createStream: false); - }); - } - } - - [Fact] - public void StreamCreateConsumerGroupSucceedsWhenKeyExists() - { - var key = GetUniqueKey("group_create_after_stream"); - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - db.StreamAdd(key, "f1", "v1"); - - // Pass 'false' for 'createStream', should create the consumer group - // without issue since the stream already exists. - var groupCreated = db.StreamCreateConsumerGroup( - key, - "consumerGroup", - StreamPosition.NewMessages, - createStream: false); - - Assert.True(groupCreated); - } - } - - [Fact] - public void StreamConsumerGroupReadOnlyNewMessagesWithEmptyResponse() - { - var key = GetUniqueKey("group_read"); - const string groupName = "test_group"; - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - // Create a stream - db.StreamAdd(key, "field1", "value1"); - db.StreamAdd(key, "field2", "value2"); - - // Create a group. - db.StreamCreateConsumerGroup(key, groupName); - - // Read, expect no messages - var entries = db.StreamReadGroup(key, groupName, "test_consumer", "0-0"); - - Assert.Empty(entries); - } - } - - [Fact] - public void StreamConsumerGroupReadFromStreamBeginning() - { - var key = GetUniqueKey("group_read_beginning"); - const string groupName = "test_group"; - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - var id1 = db.StreamAdd(key, "field1", "value1"); - var id2 = db.StreamAdd(key, "field2", "value2"); - - db.StreamCreateConsumerGroup(key, groupName, StreamPosition.Beginning); - - var entries = db.StreamReadGroup(key, groupName, "test_consumer", StreamPosition.NewMessages); - - Assert.Equal(2, entries.Length); - Assert.True(id1 == entries[0].Id); - Assert.True(id2 == entries[1].Id); - } - } - - [Fact] - public void StreamConsumerGroupReadFromStreamBeginningWithCount() - { - var key = GetUniqueKey("group_read_with_count"); - const string groupName = "test_group"; - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - var id1 = db.StreamAdd(key, "field1", "value1"); - var id2 = db.StreamAdd(key, "field2", "value2"); - var id3 = db.StreamAdd(key, "field3", "value3"); - _ = db.StreamAdd(key, "field4", "value4"); - - // Start reading after id1. - db.StreamCreateConsumerGroup(key, groupName, id1); - - var entries = db.StreamReadGroup(key, groupName, "test_consumer", StreamPosition.NewMessages, 2); - - // Ensure we only received the requested count and that the IDs match the expected values. - Assert.Equal(2, entries.Length); - Assert.True(id2 == entries[0].Id); - Assert.True(id3 == entries[1].Id); - } - } - - [Fact] - public void StreamConsumerGroupAcknowledgeMessage() - { - var key = GetUniqueKey("group_ack"); - const string groupName = "test_group"; - const string consumer = "test_consumer"; - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - var id1 = db.StreamAdd(key, "field1", "value1"); - var id2 = db.StreamAdd(key, "field2", "value2"); - var id3 = db.StreamAdd(key, "field3", "value3"); - var id4 = db.StreamAdd(key, "field4", "value4"); - - db.StreamCreateConsumerGroup(key, groupName, StreamPosition.Beginning); - - // Read all 4 messages, they will be assigned to the consumer - var entries = db.StreamReadGroup(key, groupName, consumer, StreamPosition.NewMessages); - - // Send XACK for 3 of the messages - - // Single message Id overload. - var oneAck = db.StreamAcknowledge(key, groupName, id1); - - // Multiple message Id overload. - var twoAck = db.StreamAcknowledge(key, groupName, new [] { id3, id4 }); - - // Read the group again, it should only return the unacknowledged message. - var notAcknowledged = db.StreamReadGroup(key, groupName, consumer, "0-0"); - - Assert.Equal(4, entries.Length); - Assert.Equal(1, oneAck); - Assert.Equal(2, twoAck); - Assert.Single(notAcknowledged); - Assert.Equal(id2, notAcknowledged[0].Id); - } - } - - [Fact] - public void StreamConsumerGroupClaimMessages() - { - var key = GetUniqueKey("group_claim"); - const string groupName = "test_group"; - const string consumer1 = "test_consumer_1"; - const string consumer2 = "test_consumer_2"; - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - _ = db.StreamAdd(key, "field1", "value1"); - _ = db.StreamAdd(key, "field2", "value2"); - _ = db.StreamAdd(key, "field3", "value3"); - _ = db.StreamAdd(key, "field4", "value4"); - - db.StreamCreateConsumerGroup(key, groupName, "0-0"); - - // Read a single message into the first consumer. - db.StreamReadGroup(key, groupName, consumer1, count: 1); - - // Read the remaining messages into the second consumer. - db.StreamReadGroup(key, groupName, consumer2); - - // Claim the 3 messages consumed by consumer2 for consumer1. - - // Get the pending messages for consumer2. - var pendingMessages = db.StreamPendingMessages(key, groupName, - 10, - consumer2); - - // Claim the messages for consumer1. - var messages = db.StreamClaim(key, - groupName, - consumer1, - 0, // Min message idle time - messageIds: pendingMessages.Select(pm => pm.MessageId).ToArray()); - - // Now see how many messages are pending for each consumer - var pendingSummary = db.StreamPending(key, groupName); - - Assert.NotNull(pendingSummary.Consumers); - Assert.Single(pendingSummary.Consumers); - Assert.Equal(4, pendingSummary.Consumers[0].PendingMessageCount); - Assert.Equal(pendingMessages.Length, messages.Length); - } - } - - [Fact] - public void StreamConsumerGroupClaimMessagesReturningIds() - { - var key = GetUniqueKey("group_claim_view_ids"); - const string groupName = "test_group"; - const string consumer1 = "test_consumer_1"; - const string consumer2 = "test_consumer_2"; - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - _ = db.StreamAdd(key, "field1", "value1"); - var id2 = db.StreamAdd(key, "field2", "value2"); - var id3 = db.StreamAdd(key, "field3", "value3"); - var id4 = db.StreamAdd(key, "field4", "value4"); - - db.StreamCreateConsumerGroup(key, groupName, StreamPosition.Beginning); - - // Read a single message into the first consumer. - _ = db.StreamReadGroup(key, groupName, consumer1, StreamPosition.NewMessages, 1); - - // Read the remaining messages into the second consumer. - _ = db.StreamReadGroup(key, groupName, consumer2); - - // Claim the 3 messages consumed by consumer2 for consumer1. - - // Get the pending messages for consumer2. - var pendingMessages = db.StreamPendingMessages(key, groupName, - 10, - consumer2); - - // Claim the messages for consumer1. - var messageIds = db.StreamClaimIdsOnly(key, - groupName, - consumer1, - 0, // Min message idle time - messageIds: pendingMessages.Select(pm => pm.MessageId).ToArray()); - - // We should get an array of 3 message IDs. - Assert.Equal(3, messageIds.Length); - Assert.Equal(id2, messageIds[0]); - Assert.Equal(id3, messageIds[1]); - Assert.Equal(id4, messageIds[2]); - } - } - - [Fact] - public void StreamConsumerGroupReadMultipleOneReadBeginningOneReadNew() - { - // Create a group for each stream. One set to read from the beginning of the - // stream and the other to begin reading only new messages. - - // Ask redis to read from the beginning of both stream, expect messages - // for only the stream set to read from the beginning. - - const string groupName = "test_group"; - var stream1 = GetUniqueKey("stream1a"); - var stream2 = GetUniqueKey("stream2a"); - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - db.StreamAdd(stream1, "field1-1", "value1-1"); - db.StreamAdd(stream1, "field1-2", "value1-2"); - - db.StreamAdd(stream2, "field2-1", "value2-1"); - db.StreamAdd(stream2, "field2-2", "value2-2"); - db.StreamAdd(stream2, "field2-3", "value2-3"); - - // stream1 set up to read only new messages. - db.StreamCreateConsumerGroup(stream1, groupName, StreamPosition.NewMessages); - - // stream2 set up to read from the beginning of the stream - db.StreamCreateConsumerGroup(stream2, groupName, StreamPosition.Beginning); - - // Read for both streams from the beginning. We shouldn't get anything back for stream1. - var pairs = new [] - { - // StreamPosition.NewMessages will send ">" which indicates "Undelivered" messages. - new StreamPosition(stream1, StreamPosition.NewMessages), - new StreamPosition(stream2, StreamPosition.NewMessages) - }; - - var streams = db.StreamReadGroup(pairs, groupName, "test_consumer"); - - Assert.NotNull(streams); - Assert.Single(streams); - Assert.Equal(stream2, streams[0].Key); - Assert.Equal(3, streams[0].Entries.Length); - } - } - - [Fact] - public void StreamConsumerGroupReadMultipleOnlyNewMessagesExpectNoResult() - { - const string groupName = "test_group"; - var stream1 = GetUniqueKey("stream1b"); - var stream2 = GetUniqueKey("stream2b"); - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - db.StreamAdd(stream1, "field1-1", "value1-1"); - db.StreamAdd(stream2, "field2-1", "value2-1"); - - // set both streams to read only new messages (default behavior). - db.StreamCreateConsumerGroup(stream1, groupName); - db.StreamCreateConsumerGroup(stream2, groupName); - - // We shouldn't get anything for either stream. - var pairs = new [] - { - new StreamPosition(stream1, StreamPosition.Beginning), - new StreamPosition(stream2, StreamPosition.Beginning) - }; - - var streams = db.StreamReadGroup(pairs, groupName, "test_consumer"); - - Assert.NotNull(streams); - Assert.Equal(2, streams.Length); - Assert.Empty(streams[0].Entries); - Assert.Empty(streams[1].Entries); - } - } - - [Fact] - public void StreamConsumerGroupReadMultipleOnlyNewMessagesExpect1Result() - { - const string groupName = "test_group"; - var stream1 = GetUniqueKey("stream1c"); - var stream2 = GetUniqueKey("stream2c"); - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - // These messages won't be read. - db.StreamAdd(stream1, "field1-1", "value1-1"); - db.StreamAdd(stream2, "field2-1", "value2-1"); - - // set both streams to read only new messages (default behavior). - db.StreamCreateConsumerGroup(stream1, groupName); - db.StreamCreateConsumerGroup(stream2, groupName); - - // We should read these though. - var id1 = db.StreamAdd(stream1, "field1-2", "value1-2"); - var id2 = db.StreamAdd(stream2, "field2-2", "value2-2"); - - // Read the new messages (messages created after the group was created). - var pairs = new [] - { - new StreamPosition(stream1, StreamPosition.NewMessages), - new StreamPosition(stream2, StreamPosition.NewMessages) - }; - - var streams = db.StreamReadGroup(pairs, groupName, "test_consumer"); - - Assert.NotNull(streams); - Assert.Equal(2, streams.Length); - Assert.Single(streams[0].Entries); - Assert.Single(streams[1].Entries); - Assert.Equal(id1, streams[0].Entries[0].Id); - Assert.Equal(id2, streams[1].Entries[0].Id); - } - } - - [Fact] - public void StreamConsumerGroupReadMultipleRestrictCount() - { - const string groupName = "test_group"; - var stream1 = GetUniqueKey("stream1d"); - var stream2 = GetUniqueKey("stream2d"); - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - var id1_1 = db.StreamAdd(stream1, "field1-1", "value1-1"); - var id1_2 = db.StreamAdd(stream1, "field1-2", "value1-2"); - - var id2_1 = db.StreamAdd(stream2, "field2-1", "value2-1"); - _ = db.StreamAdd(stream2, "field2-2", "value2-2"); - _ = db.StreamAdd(stream2, "field2-3", "value2-3"); - - // Set the initial read point in each stream, *after* the first ID in both streams. - db.StreamCreateConsumerGroup(stream1, groupName, id1_1); - db.StreamCreateConsumerGroup(stream2, groupName, id2_1); - - var pairs = new [] - { - // Read after the first id in both streams - new StreamPosition(stream1, StreamPosition.NewMessages), - new StreamPosition(stream2, StreamPosition.NewMessages) - }; - - // Restrict the count to 2 (expect only 1 message from first stream, 2 from the second). - var streams = db.StreamReadGroup(pairs, groupName, "test_consumer", 2); - - Assert.NotNull(streams); - Assert.Equal(2, streams.Length); - Assert.Single(streams[0].Entries); - Assert.Equal(2, streams[1].Entries.Length); - Assert.Equal(id1_2, streams[0].Entries[0].Id); - } - } - - [Fact] - public void StreamConsumerGroupViewPendingInfoNoConsumers() - { - var key = GetUniqueKey("group_pending_info_no_consumers"); - const string groupName = "test_group"; - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - db.StreamAdd(key, "field1", "value1"); - - db.StreamCreateConsumerGroup(key, groupName, StreamPosition.Beginning); - - var pendingInfo = db.StreamPending(key, groupName); - - Assert.Equal(0, pendingInfo.PendingMessageCount); - Assert.Equal(RedisValue.Null, pendingInfo.LowestPendingMessageId); - Assert.Equal(RedisValue.Null, pendingInfo.HighestPendingMessageId); - Assert.NotNull(pendingInfo.Consumers); - Assert.Empty(pendingInfo.Consumers); - } - } - - [Fact] - public void StreamConsumerGroupViewPendingInfoWhenNothingPending() - { - var key = GetUniqueKey("group_pending_info_nothing_pending"); - const string groupName = "test_group"; - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - db.StreamAdd(key, "field1", "value1"); - - db.StreamCreateConsumerGroup(key, groupName, "0-0"); - - var pendingMessages = db.StreamPendingMessages(key, - groupName, - 10, - consumerName: RedisValue.Null); - - Assert.NotNull(pendingMessages); - Assert.Empty(pendingMessages); - } - } - - [Fact] - public void StreamConsumerGroupViewPendingInfoSummary() - { - var key = GetUniqueKey("group_pending_info"); - const string groupName = "test_group"; - const string consumer1 = "test_consumer_1"; - const string consumer2 = "test_consumer_2"; - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - var id1 = db.StreamAdd(key, "field1", "value1"); - db.StreamAdd(key, "field2", "value2"); - db.StreamAdd(key, "field3", "value3"); - var id4 = db.StreamAdd(key, "field4", "value4"); - - db.StreamCreateConsumerGroup(key, groupName, StreamPosition.Beginning); - - // Read a single message into the first consumer. - db.StreamReadGroup(key, groupName, consumer1, StreamPosition.NewMessages, 1); - - // Read the remaining messages into the second consumer. - db.StreamReadGroup(key, groupName, consumer2); - - var pendingInfo = db.StreamPending(key, groupName); - - Assert.Equal(4, pendingInfo.PendingMessageCount); - Assert.Equal(id1, pendingInfo.LowestPendingMessageId); - Assert.Equal(id4, pendingInfo.HighestPendingMessageId); - Assert.True(pendingInfo.Consumers.Length == 2); - - var consumer1Count = pendingInfo.Consumers.First(c => c.Name == consumer1).PendingMessageCount; - var consumer2Count = pendingInfo.Consumers.First(c => c.Name == consumer2).PendingMessageCount; - - Assert.Equal(1, consumer1Count); - Assert.Equal(3, consumer2Count); - } - } - - [Fact] - public async Task StreamConsumerGroupViewPendingMessageInfo() - { - var key = GetUniqueKey("group_pending_messages"); - const string groupName = "test_group"; - const string consumer1 = "test_consumer_1"; - const string consumer2 = "test_consumer_2"; - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - var id1 = db.StreamAdd(key, "field1", "value1"); - db.StreamAdd(key, "field2", "value2"); - db.StreamAdd(key, "field3", "value3"); - db.StreamAdd(key, "field4", "value4"); - - db.StreamCreateConsumerGroup(key, groupName, StreamPosition.Beginning); - - // Read a single message into the first consumer. - db.StreamReadGroup(key, groupName, consumer1, count: 1); - - // Read the remaining messages into the second consumer. - _ = db.StreamReadGroup(key, groupName, consumer2) ?? throw new ArgumentNullException(nameof(consumer2), "db.StreamReadGroup(key, groupName, consumer2)"); - - await Task.Delay(10).ForAwait(); - - // Get the pending info about the messages themselves. - var pendingMessageInfoList = db.StreamPendingMessages(key, groupName, 10, RedisValue.Null); - - Assert.NotNull(pendingMessageInfoList); - Assert.Equal(4, pendingMessageInfoList.Length); - Assert.Equal(consumer1, pendingMessageInfoList[0].ConsumerName); - Assert.Equal(1, pendingMessageInfoList[0].DeliveryCount); - Assert.True((int)pendingMessageInfoList[0].IdleTimeInMilliseconds > 0); - Assert.Equal(id1, pendingMessageInfoList[0].MessageId); - } - } - - [Fact] - public void StreamConsumerGroupViewPendingMessageInfoForConsumer() - { - var key = GetUniqueKey("group_pending_for_consumer"); - const string groupName = "test_group"; - const string consumer1 = "test_consumer_1"; - const string consumer2 = "test_consumer_2"; - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - db.StreamAdd(key, "field1", "value1"); - db.StreamAdd(key, "field2", "value2"); - db.StreamAdd(key, "field3", "value3"); - db.StreamAdd(key, "field4", "value4"); - - db.StreamCreateConsumerGroup(key, groupName, StreamPosition.Beginning); - - // Read a single message into the first consumer. - db.StreamReadGroup(key, groupName, consumer1, count: 1); - - // Read the remaining messages into the second consumer. - db.StreamReadGroup(key, groupName, consumer2); - - // Get the pending info about the messages themselves. - var pendingMessageInfoList = db.StreamPendingMessages(key, - groupName, - 10, - consumer2); - - Assert.NotNull(pendingMessageInfoList); - Assert.Equal(3, pendingMessageInfoList.Length); - } - } - - [Fact] - public void StreamDeleteConsumer() - { - var key = GetUniqueKey("delete_consumer"); - const string groupName = "test_group"; - const string consumer = "test_consumer"; - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - // Add a message to create the stream. - db.StreamAdd(key, "field1", "value1"); - db.StreamAdd(key, "field2", "value2"); - - // Create a consumer group and read the message. - db.StreamCreateConsumerGroup(key, groupName, StreamPosition.Beginning); - db.StreamReadGroup(key, groupName, consumer, StreamPosition.NewMessages); - - var preDeleteConsumers = db.StreamConsumerInfo(key, groupName); - - // Delete the consumer. - var deleteResult = db.StreamDeleteConsumer(key, groupName, consumer); - - // Should get 2 messages in the deleteResult. - var postDeleteConsumers = db.StreamConsumerInfo(key, groupName); - - Assert.Equal(2, deleteResult); - Assert.Single(preDeleteConsumers); - Assert.Empty(postDeleteConsumers); - } - } - - [Fact] - public void StreamDeleteConsumerGroup() - { - var key = GetUniqueKey("delete_consumer_group"); - const string groupName = "test_group"; - const string consumer = "test_consumer"; - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - // Add a message to create the stream. - db.StreamAdd(key, "field1", "value1"); - - // Create a consumer group and read the messages. - db.StreamCreateConsumerGroup(key, groupName, StreamPosition.Beginning); - db.StreamReadGroup(key, groupName, consumer, StreamPosition.Beginning); - - var preDeleteInfo = db.StreamInfo(key); - - // Now delete the group. - var deleteResult = db.StreamDeleteConsumerGroup(key, groupName); - - var postDeleteInfo = db.StreamInfo(key); - - Assert.True(deleteResult); - Assert.Equal(1, preDeleteInfo.ConsumerGroupCount); - Assert.Equal(0, postDeleteInfo.ConsumerGroupCount); - } - } - - [Fact] - public void StreamDeleteMessage() - { - var key = GetUniqueKey("delete_msg"); - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - db.StreamAdd(key, "field1", "value1"); - db.StreamAdd(key, "field2", "value2"); - var id3 = db.StreamAdd(key, "field3", "value3"); - db.StreamAdd(key, "field4", "value4"); - - var deletedCount = db.StreamDelete(key, new [] { id3 }); - var messages = db.StreamRange(key); - - Assert.Equal(1, deletedCount); - Assert.Equal(3, messages.Length); - } - } - - [Fact] - public void StreamDeleteMessages() - { - var key = GetUniqueKey("delete_msgs"); - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - db.StreamAdd(key, "field1", "value1"); - var id2 = db.StreamAdd(key, "field2", "value2"); - var id3 = db.StreamAdd(key, "field3", "value3"); - db.StreamAdd(key, "field4", "value4"); - - var deletedCount = db.StreamDelete(key, new [] { id2, id3 }, CommandFlags.None); - var messages = db.StreamRange(key); - - Assert.Equal(2, deletedCount); - Assert.Equal(2, messages.Length); - } - } - - [Fact] - public void StreamGroupInfoGet() - { - var key = GetUniqueKey("group_info"); - const string group1 = "test_group_1"; - const string group2 = "test_group_2"; - const string consumer1 = "test_consumer_1"; - const string consumer2 = "test_consumer_2"; - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - db.KeyDelete(key); - - db.StreamAdd(key, "field1", "value1"); - db.StreamAdd(key, "field2", "value2"); - db.StreamAdd(key, "field3", "value3"); - db.StreamAdd(key, "field4", "value4"); - - db.StreamCreateConsumerGroup(key, group1, StreamPosition.Beginning); - db.StreamCreateConsumerGroup(key, group2, StreamPosition.Beginning); - - // Read a single message into the first consumer. - db.StreamReadGroup(key, group1, consumer1, count: 1); - - // Read the remaining messages into the second consumer. - db.StreamReadGroup(key, group2, consumer2); - - var groupInfoList = db.StreamGroupInfo(key); - - Assert.NotNull(groupInfoList); - Assert.Equal(2, groupInfoList.Length); - - Assert.Equal(group1, groupInfoList[0].Name); - Assert.Equal(1, groupInfoList[0].PendingMessageCount); - Assert.True(IsMessageId(groupInfoList[0].LastDeliveredId)); // can't test actual - will vary - - Assert.Equal(group2, groupInfoList[1].Name); - Assert.Equal(4, groupInfoList[1].PendingMessageCount); - Assert.True(IsMessageId(groupInfoList[1].LastDeliveredId)); // can't test actual - will vary - } - - static bool IsMessageId(string value) - { - if (string.IsNullOrWhiteSpace(value)) return false; - return value.Length >= 3 && value.Contains("-"); - } - } - - [Fact] - public void StreamGroupConsumerInfoGet() - { - var key = GetUniqueKey("group_consumer_info"); - const string group = "test_group"; - const string consumer1 = "test_consumer_1"; - const string consumer2 = "test_consumer_2"; - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - db.StreamAdd(key, "field1", "value1"); - db.StreamAdd(key, "field2", "value2"); - db.StreamAdd(key, "field3", "value3"); - db.StreamAdd(key, "field4", "value4"); - - db.StreamCreateConsumerGroup(key, group, StreamPosition.Beginning); - db.StreamReadGroup(key, group, consumer1, count: 1); - db.StreamReadGroup(key, group, consumer2); - - var consumerInfoList = db.StreamConsumerInfo(key, group); - - Assert.NotNull(consumerInfoList); - Assert.Equal(2, consumerInfoList.Length); - - Assert.Equal(consumer1, consumerInfoList[0].Name); - Assert.Equal(consumer2, consumerInfoList[1].Name); - - Assert.Equal(1, consumerInfoList[0].PendingMessageCount); - Assert.Equal(3, consumerInfoList[1].PendingMessageCount); - } - } - - [Fact] - public void StreamInfoGet() - { - var key = GetUniqueKey("stream_info"); - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - var id1 = db.StreamAdd(key, "field1", "value1"); - db.StreamAdd(key, "field2", "value2"); - db.StreamAdd(key, "field3", "value3"); - var id4 = db.StreamAdd(key, "field4", "value4"); - - var streamInfo = db.StreamInfo(key); - - Assert.Equal(4, streamInfo.Length); - Assert.True(streamInfo.RadixTreeKeys > 0); - Assert.True(streamInfo.RadixTreeNodes > 0); - Assert.Equal(id1, streamInfo.FirstEntry.Id); - Assert.Equal(id4, streamInfo.LastEntry.Id); - } - } - - [Fact] - public void StreamInfoGetWithEmptyStream() - { - var key = GetUniqueKey("stream_info_empty"); - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - // Add an entry and then delete it so the stream is empty, then run streaminfo - // to ensure it functions properly on an empty stream. Namely, the first-entry - // and last-entry messages should be null. - - var id = db.StreamAdd(key, "field1", "value1"); - db.StreamDelete(key, new [] { id }); - - Assert.Equal(0, db.StreamLength(key)); - - var streamInfo = db.StreamInfo(key); - - Assert.True(streamInfo.FirstEntry.IsNull); - Assert.True(streamInfo.LastEntry.IsNull); - } - } - - [Fact] - public void StreamNoConsumerGroups() - { - var key = GetUniqueKey("stream_with_no_consumers"); - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - db.StreamAdd(key, "field1", "value1"); - - var groups = db.StreamGroupInfo(key); - - Assert.NotNull(groups); - Assert.Empty(groups); - } - } - - [Fact] - public void StreamPendingNoMessagesOrConsumers() - { - var key = GetUniqueKey("stream_pending_empty"); - const string groupName = "test_group"; - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - var id = db.StreamAdd(key, "field1", "value1"); - db.StreamDelete(key, new [] { id }); - - db.StreamCreateConsumerGroup(key, groupName, "0-0"); - - var pendingInfo = db.StreamPending(key, "test_group"); - - Assert.Equal(0, pendingInfo.PendingMessageCount); - Assert.Equal(RedisValue.Null, pendingInfo.LowestPendingMessageId); - Assert.Equal(RedisValue.Null, pendingInfo.HighestPendingMessageId); - Assert.NotNull(pendingInfo.Consumers); - Assert.Empty(pendingInfo.Consumers); - } - } - - [Fact] - public void StreamPositionDefaultValueIsBeginning() - { - RedisValue position = StreamPosition.Beginning; - Assert.Equal(StreamConstants.AllMessages, StreamPosition.Resolve(position, RedisCommand.XREAD)); - Assert.Equal(StreamConstants.AllMessages, StreamPosition.Resolve(position, RedisCommand.XREADGROUP)); - Assert.Equal(StreamConstants.AllMessages, StreamPosition.Resolve(position, RedisCommand.XGROUP)); - } - - [Fact] - public void StreamPositionValidateBeginning() - { - var position = StreamPosition.Beginning; - - Assert.Equal(StreamConstants.AllMessages, StreamPosition.Resolve(position, RedisCommand.XREAD)); - } - - [Fact] - public void StreamPositionValidateExplicit() - { - const string explicitValue = "1-0"; - const string position = explicitValue; - - Assert.Equal(explicitValue, StreamPosition.Resolve(position, RedisCommand.XREAD)); - } - - [Fact] - public void StreamPositionValidateNew() - { - var position = StreamPosition.NewMessages; - - Assert.Equal(StreamConstants.NewMessages, StreamPosition.Resolve(position, RedisCommand.XGROUP)); - Assert.Equal(StreamConstants.UndeliveredMessages, StreamPosition.Resolve(position, RedisCommand.XREADGROUP)); - Assert.ThrowsAny(() => StreamPosition.Resolve(position, RedisCommand.XREAD)); - } - - [Fact] - public void StreamRead() - { - var key = GetUniqueKey("read"); - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - var id1 = db.StreamAdd(key, "field1", "value1"); - var id2 = db.StreamAdd(key, "field2", "value2"); - var id3 = db.StreamAdd(key, "field3", "value3"); - - // Read the entire stream from the beginning. - var entries = db.StreamRead(key, "0-0"); - - Assert.Equal(3, entries.Length); - Assert.Equal(id1, entries[0].Id); - Assert.Equal(id2, entries[1].Id); - Assert.Equal(id3, entries[2].Id); - } - } - - [Fact] - public void StreamReadEmptyStream() - { - var key = GetUniqueKey("read_empty_stream"); - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - // Write to a stream to create the key. - var id1 = db.StreamAdd(key, "field1", "value1"); - - // Delete the key to empty the stream. - db.StreamDelete(key, new [] { id1 }); - var len = db.StreamLength(key); - - // Read the entire stream from the beginning. - var entries = db.StreamRead(key, "0-0"); - - Assert.Empty(entries); - Assert.Equal(0, len); - } - } - - [Fact] - public void StreamReadEmptyStreams() - { - var key1 = GetUniqueKey("read_empty_stream_1"); - var key2 = GetUniqueKey("read_empty_stream_2"); - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - // Write to a stream to create the key. - var id1 = db.StreamAdd(key1, "field1", "value1"); - var id2 = db.StreamAdd(key2, "field2", "value2"); - - // Delete the key to empty the stream. - db.StreamDelete(key1, new [] { id1 }); - db.StreamDelete(key2, new [] { id2 }); - - var len1 = db.StreamLength(key1); - var len2 = db.StreamLength(key2); - - // Read the entire stream from the beginning. - var entries1 = db.StreamRead(key1, "0-0"); - var entries2 = db.StreamRead(key2, "0-0"); - - Assert.Empty(entries1); - Assert.Empty(entries2); - - Assert.Equal(0, len1); - Assert.Equal(0, len2); - } - } - - [Fact] - public void StreamReadExpectedExceptionInvalidCountMultipleStream() - { - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var streamPositions = new [] - { - new StreamPosition("key1", "0-0"), - new StreamPosition("key2", "0-0") - }; - - var db = conn.GetDatabase(); - Assert.Throws(() => db.StreamRead(streamPositions, 0)); - } - } - - [Fact] - public void StreamReadExpectedExceptionInvalidCountSingleStream() - { - var key = GetUniqueKey("read_exception_invalid_count_single"); - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - Assert.Throws(() => db.StreamRead(key, "0-0", 0)); - } - } - - [Fact] - public void StreamReadExpectedExceptionNullStreamList() - { - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - Assert.Throws(() => db.StreamRead(null)); - } - } - - [Fact] - public void StreamReadExpectedExceptionEmptyStreamList() - { - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - var emptyList = new StreamPosition[0]; - - Assert.Throws(() => db.StreamRead(emptyList)); - } - } - - [Fact] - public void StreamReadMultipleStreams() - { - var key1 = GetUniqueKey("read_multi_1a"); - var key2 = GetUniqueKey("read_multi_2a"); - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - var id1 = db.StreamAdd(key1, "field1", "value1"); - var id2 = db.StreamAdd(key1, "field2", "value2"); - var id3 = db.StreamAdd(key2, "field3", "value3"); - var id4 = db.StreamAdd(key2, "field4", "value4"); - - // Read from both streams at the same time. - var streamList = new [] - { - new StreamPosition(key1, "0-0"), - new StreamPosition(key2, "0-0") - }; - - var streams = db.StreamRead(streamList); - - Assert.True(streams.Length == 2); - - Assert.Equal(key1, streams[0].Key); - Assert.Equal(2, streams[0].Entries.Length); - Assert.Equal(id1, streams[0].Entries[0].Id); - Assert.Equal(id2, streams[0].Entries[1].Id); - - Assert.Equal(key2, streams[1].Key); - Assert.Equal(2, streams[1].Entries.Length); - Assert.Equal(id3, streams[1].Entries[0].Id); - Assert.Equal(id4, streams[1].Entries[1].Id); - } - } - - [Fact] - public void StreamReadMultipleStreamsWithCount() - { - var key1 = GetUniqueKey("read_multi_count_1"); - var key2 = GetUniqueKey("read_multi_count_2"); - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - var id1 = db.StreamAdd(key1, "field1", "value1"); - db.StreamAdd(key1, "field2", "value2"); - var id3 = db.StreamAdd(key2, "field3", "value3"); - db.StreamAdd(key2, "field4", "value4"); - - var streamList = new [] - { - new StreamPosition(key1, "0-0"), - new StreamPosition(key2, "0-0") - }; - - var streams = db.StreamRead(streamList, countPerStream: 1); - - // We should get both streams back. - Assert.Equal(2, streams.Length); - - // Ensure we only got one message per stream. - Assert.Single(streams[0].Entries); - Assert.Single(streams[1].Entries); - - // Check the message IDs as well. - Assert.Equal(id1, streams[0].Entries[0].Id); - Assert.Equal(id3, streams[1].Entries[0].Id); - } - } - - [Fact] - public void StreamReadMultipleStreamsWithReadPastSecondStream() - { - var key1 = GetUniqueKey("read_multi_1b"); - var key2 = GetUniqueKey("read_multi_2b"); - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - db.StreamAdd(key1, "field1", "value1"); - db.StreamAdd(key1, "field2", "value2"); - db.StreamAdd(key2, "field3", "value3"); - var id4 = db.StreamAdd(key2, "field4", "value4"); - - var streamList = new [] - { - new StreamPosition(key1, "0-0"), - - // read past the end of stream # 2 - new StreamPosition(key2, id4) - }; - - var streams = db.StreamRead(streamList); - - // We should only get the first stream back. - Assert.Single(streams); - - Assert.Equal(key1, streams[0].Key); - Assert.Equal(2, streams[0].Entries.Length); - } - } - - [Fact] - public void StreamReadMultipleStreamsWithEmptyResponse() - { - var key1 = GetUniqueKey("read_multi_1c"); - var key2 = GetUniqueKey("read_multi_2c"); - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - db.StreamAdd(key1, "field1", "value1"); - var id2 = db.StreamAdd(key1, "field2", "value2"); - db.StreamAdd(key2, "field3", "value3"); - var id4 = db.StreamAdd(key2, "field4", "value4"); - - var streamList = new [] - { - // Read past the end of both streams. - new StreamPosition(key1, id2), - new StreamPosition(key2, id4) - }; - - var streams = db.StreamRead(streamList); - - // We expect an empty response. - Assert.Empty(streams); - } - } - - [Fact] - public void StreamReadPastEndOfStream() - { - var key = GetUniqueKey("read_empty"); - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - db.StreamAdd(key, "field1", "value1"); - var id2 = db.StreamAdd(key, "field2", "value2"); - - // Read after the final ID in the stream, we expect an empty array as a response. - - var entries = db.StreamRead(key, id2); - - Assert.Empty(entries); - } - } - - [Fact] - public void StreamReadRange() - { - var key = GetUniqueKey("range"); - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - var id1 = db.StreamAdd(key, "field1", "value1"); - var id2 = db.StreamAdd(key, "field2", "value2"); - - var entries = db.StreamRange(key); - - Assert.Equal(2, entries.Length); - Assert.Equal(id1, entries[0].Id); - Assert.Equal(id2, entries[1].Id); - } - } - - [Fact] - public void StreamReadRangeOfEmptyStream() - { - var key = GetUniqueKey("range_empty"); - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - var id1 = db.StreamAdd(key, "field1", "value1"); - var id2 = db.StreamAdd(key, "field2", "value2"); - - var deleted = db.StreamDelete(key, new [] { id1, id2 }); - - var entries = db.StreamRange(key); - - Assert.Equal(2, deleted); - Assert.NotNull(entries); - Assert.Empty(entries); - } - } - - [Fact] - public void StreamReadRangeWithCount() - { - var key = GetUniqueKey("range_count"); - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - var id1 = db.StreamAdd(key, "field1", "value1"); - db.StreamAdd(key, "field2", "value2"); - - var entries = db.StreamRange(key, count: 1); - - Assert.Single(entries); - Assert.Equal(id1, entries[0].Id); - } - } - - [Fact] - public void StreamReadRangeReverse() - { - var key = GetUniqueKey("rangerev"); - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - var id1 = db.StreamAdd(key, "field1", "value1"); - var id2 = db.StreamAdd(key, "field2", "value2"); - - var entries = db.StreamRange(key, messageOrder: Order.Descending); - - Assert.Equal(2, entries.Length); - Assert.Equal(id2, entries[0].Id); - Assert.Equal(id1, entries[1].Id); - } - } - - [Fact] - public void StreamReadRangeReverseWithCount() - { - var key = GetUniqueKey("rangerev_count"); - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - var id1 = db.StreamAdd(key, "field1", "value1"); - var id2 = db.StreamAdd(key, "field2", "value2"); - - var entries = db.StreamRange(key, id1, id2, 1, Order.Descending); - - Assert.Single(entries); - Assert.Equal(id2, entries[0].Id); - } - } - - [Fact] - public void StreamReadWithAfterIdAndCount_1() - { - var key = GetUniqueKey("read1"); - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - var id1 = db.StreamAdd(key, "field1", "value1"); - var id2 = db.StreamAdd(key, "field2", "value2"); - db.StreamAdd(key, "field3", "value3"); - - // Only read a single item from the stream. - var entries = db.StreamRead(key, id1, 1); - - Assert.Single(entries); - Assert.Equal(id2, entries[0].Id); - } - } - - [Fact] - public void StreamReadWithAfterIdAndCount_2() - { - var key = GetUniqueKey("read2"); - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - var id1 = db.StreamAdd(key, "field1", "value1"); - var id2 = db.StreamAdd(key, "field2", "value2"); - var id3 = db.StreamAdd(key, "field3", "value3"); - db.StreamAdd(key, "field4", "value4"); - - // Read multiple items from the stream. - var entries = db.StreamRead(key, id1, 2); - - Assert.Equal(2, entries.Length); - Assert.Equal(id2, entries[0].Id); - Assert.Equal(id3, entries[1].Id); - } - } - - [Fact] - public void StreamTrimLength() - { - var key = GetUniqueKey("trimlen"); - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - // Add a couple items and check length. - db.StreamAdd(key, "field1", "value1"); - db.StreamAdd(key, "field2", "value2"); - db.StreamAdd(key, "field3", "value3"); - db.StreamAdd(key, "field4", "value4"); - - var numRemoved = db.StreamTrim(key, 1); - var len = db.StreamLength(key); - - Assert.Equal(3, numRemoved); - Assert.Equal(1, len); - } - } - - [Fact] - public void StreamVerifyLength() - { - var key = GetUniqueKey("len"); - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - // Add a couple items and check length. - db.StreamAdd(key, "field1", "value1"); - db.StreamAdd(key, "field2", "value2"); - - var len = db.StreamLength(key); - - Assert.Equal(2, len); - } - } - - [Fact] - public async Task AddWithApproxCountAsync() - { - var key = GetUniqueKey("approx-async"); - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - await db.StreamAddAsync(key, "field", "value", maxLength: 10, useApproximateMaxLength: true, flags: CommandFlags.None).ConfigureAwait(false); - } - } - - [Fact] - public void AddWithApproxCount() - { - var key = GetUniqueKey("approx"); - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - db.StreamAdd(key, "field", "value", maxLength: 10, useApproximateMaxLength: true, flags: CommandFlags.None); - } - } - - [Fact] - public void StreamReadGroupWithNoAckShowsNoPendingMessages() - { - var key = GetUniqueKey("read_group_noack"); - const string groupName = "test_group"; - const string consumer = "consumer"; - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - db.StreamAdd(key, "field1", "value1"); - db.StreamAdd(key, "field2", "value2"); - - db.StreamCreateConsumerGroup(key, groupName, StreamPosition.NewMessages); - - db.StreamReadGroup(key, - groupName, - consumer, - StreamPosition.NewMessages, - noAck: true); - - var pendingInfo = db.StreamPending(key, groupName); - - Assert.Equal(0, pendingInfo.PendingMessageCount); - } - } - - [Fact] - public void StreamReadGroupMultiStreamWithNoAckShowsNoPendingMessages() - { - var key1 = GetUniqueKey("read_group_noack1"); - var key2 = GetUniqueKey("read_group_noack2"); - const string groupName = "test_group"; - const string consumer = "consumer"; - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - db.StreamAdd(key1, "field1", "value1"); - db.StreamAdd(key1, "field2", "value2"); - - db.StreamAdd(key2, "field3", "value3"); - db.StreamAdd(key2, "field4", "value4"); - - db.StreamCreateConsumerGroup(key1, groupName, StreamPosition.NewMessages); - db.StreamCreateConsumerGroup(key2, groupName, StreamPosition.NewMessages); - - db.StreamReadGroup( - new [] - { - new StreamPosition(key1, StreamPosition.NewMessages), - new StreamPosition(key2, StreamPosition.NewMessages) - }, - groupName, - consumer, - noAck: true); - - var pending1 = db.StreamPending(key1, groupName); - var pending2 = db.StreamPending(key2, groupName); - - Assert.Equal(0, pending1.PendingMessageCount); - Assert.Equal(0, pending2.PendingMessageCount); - } - } - - private RedisKey GetUniqueKey(string type) => $"{type}_stream_{DateTimeOffset.UtcNow.ToUnixTimeMilliseconds()}"; - - - [Fact] - public async Task StreamReadIndexerUsage() - { - var streamName = GetUniqueKey("read-group-indexer"); - - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.Streams), r => r.Streams); - - var db = conn.GetDatabase(); - - await db.StreamAddAsync(streamName, new[] { - new NameValueEntry("x", "blah"), - new NameValueEntry("msg", @"{""name"":""test"",""id"":123}"), - new NameValueEntry("y", "more blah"), - }); - - var streamResult = await db.StreamRangeAsync(streamName, count: 1000); - var evntJson = streamResult - .Select(x => (dynamic)JsonConvert.DeserializeObject(x["msg"])) - .ToList(); - var obj = Assert.Single(evntJson); - Assert.Equal(123, (int)obj.id); - Assert.Equal("test", (string)obj.name); - } - } - - } -} diff --git a/tests/StackExchange.Redis.Tests/StringTests.cs b/tests/StackExchange.Redis.Tests/StringTests.cs new file mode 100644 index 000000000..2dcf8f6fb --- /dev/null +++ b/tests/StackExchange.Redis.Tests/StringTests.cs @@ -0,0 +1,1034 @@ +using System; +using System.IO; +using System.Linq; +using System.Text; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +/// +/// Tests for . +/// +[RunPerProtocol] +public class StringTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + [Fact] + public async Task Append() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + var server = GetServer(conn); + var key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + var l0 = server.Features.StringLength ? db.StringLengthAsync(key) : null; + + var s0 = db.StringGetAsync(key); + + db.StringSet(key, "abc", flags: CommandFlags.FireAndForget); + var s1 = db.StringGetAsync(key); + var l1 = server.Features.StringLength ? db.StringLengthAsync(key) : null; + + var result = db.StringAppendAsync(key, Encode("defgh")); + var s3 = db.StringGetAsync(key); + var l2 = server.Features.StringLength ? db.StringLengthAsync(key) : null; + + Assert.Null((string?)await s0); + Assert.Equal("abc", await s1); + Assert.Equal(8, await result); + Assert.Equal("abcdefgh", await s3); + + if (server.Features.StringLength) + { + Assert.Equal(0, await l0!); + Assert.Equal(3, await l1!); + Assert.Equal(8, await l2!); + } + } + + [Fact] + public async Task Set() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + var key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + db.StringSet(key, "abc", flags: CommandFlags.FireAndForget); + var v1 = db.StringGetAsync(key); + + db.StringSet(key, Encode("def"), flags: CommandFlags.FireAndForget); + var v2 = db.StringGetAsync(key); + + Assert.Equal("abc", await v1); + Assert.Equal("def", Decode(await v2)); + } + + [Fact] + public async Task SetEmpty() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + var key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + db.StringSet(key, new byte[] { }); + var exists = await db.KeyExistsAsync(key); + var val = await db.StringGetAsync(key); + + Assert.True(exists); + Log("Value: " + val); + Assert.Equal(0, val.Length()); + } + + [Fact] + public async Task StringGetSetExpiryNoValue() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + var emptyVal = await db.StringGetSetExpiryAsync(key, TimeSpan.FromHours(1)); + + Assert.Equal(RedisValue.Null, emptyVal); + } + + [Fact] + public async Task StringGetSetExpiryRelative() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + db.StringSet(key, "abc", TimeSpan.FromHours(1)); + var relativeSec = db.StringGetSetExpiryAsync(key, TimeSpan.FromMinutes(30)); + var relativeSecTtl = db.KeyTimeToLiveAsync(key); + + Assert.Equal("abc", await relativeSec); + var time = await relativeSecTtl; + Assert.NotNull(time); + Assert.InRange(time.Value, TimeSpan.FromMinutes(29.8), TimeSpan.FromMinutes(30.2)); + } + + [Fact] + public async Task StringGetSetExpiryAbsolute() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + db.StringSet(key, "abc", TimeSpan.FromHours(1)); + var newDate = DateTime.UtcNow.AddMinutes(30); + var val = db.StringGetSetExpiryAsync(key, newDate); + var valTtl = db.KeyTimeToLiveAsync(key); + + Assert.Equal("abc", await val); + var time = await valTtl; + Assert.NotNull(time); + Assert.InRange(time.Value, TimeSpan.FromMinutes(29.8), TimeSpan.FromMinutes(30.2)); + + // And ensure our type checking works + var ex = await Assert.ThrowsAsync(() => db.StringGetSetExpiryAsync(key, new DateTime(100, DateTimeKind.Unspecified))); + Assert.NotNull(ex); + } + + [Fact] + public async Task StringGetSetExpiryPersist() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + db.StringSet(key, "abc", TimeSpan.FromHours(1)); + var val = db.StringGetSetExpiryAsync(key, null); + var valTtl = db.KeyTimeToLiveAsync(key); + + Assert.Equal("abc", await val); + Assert.Null(await valTtl); + } + + [Fact] + public async Task GetLease() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + var key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + db.StringSet(key, "abc", flags: CommandFlags.FireAndForget); + using (var v1 = await db.StringGetLeaseAsync(key).ConfigureAwait(false)) + { + string? s = v1?.DecodeString(); + Assert.Equal("abc", s); + } + } + + [Fact] + public async Task GetLeaseAsStream() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + var key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + db.StringSet(key, "abc", flags: CommandFlags.FireAndForget); + var lease = await db.StringGetLeaseAsync(key).ConfigureAwait(false); + Assert.NotNull(lease); + using (var v1 = lease.AsStream()) + { + using (var sr = new StreamReader(v1)) + { + string s = sr.ReadToEnd(); + Assert.Equal("abc", s); + } + } + } + + [Fact] + public async Task GetDelete() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var prefix = Me(); + db.KeyDelete(prefix + "1", CommandFlags.FireAndForget); + db.KeyDelete(prefix + "2", CommandFlags.FireAndForget); + db.StringSet(prefix + "1", "abc", flags: CommandFlags.FireAndForget); + + Assert.True(db.KeyExists(prefix + "1")); + Assert.False(db.KeyExists(prefix + "2")); + + var s0 = db.StringGetDelete(prefix + "1"); + var s2 = db.StringGetDelete(prefix + "2"); + + Assert.False(db.KeyExists(prefix + "1")); + Assert.Equal("abc", s0); + Assert.Equal(RedisValue.Null, s2); + } + + [Fact] + public async Task GetDeleteAsync() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var prefix = Me(); + db.KeyDelete(prefix + "1", CommandFlags.FireAndForget); + db.KeyDelete(prefix + "2", CommandFlags.FireAndForget); + db.StringSet(prefix + "1", "abc", flags: CommandFlags.FireAndForget); + + Assert.True(db.KeyExists(prefix + "1")); + Assert.False(db.KeyExists(prefix + "2")); + + var s0 = db.StringGetDeleteAsync(prefix + "1"); + var s2 = db.StringGetDeleteAsync(prefix + "2"); + + Assert.False(db.KeyExists(prefix + "1")); + Assert.Equal("abc", await s0); + Assert.Equal(RedisValue.Null, await s2); + } + + [Fact] + public async Task SetNotExists() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + var prefix = Me(); + db.KeyDelete(prefix + "1", CommandFlags.FireAndForget); + db.KeyDelete(prefix + "2", CommandFlags.FireAndForget); + db.KeyDelete(prefix + "3", CommandFlags.FireAndForget); + db.KeyDelete(prefix + "4", CommandFlags.FireAndForget); + db.KeyDelete(prefix + "5", CommandFlags.FireAndForget); + db.StringSet(prefix + "1", "abc", flags: CommandFlags.FireAndForget); + + var x0 = db.StringSetAsync(prefix + "1", "def", when: When.NotExists); + var x1 = db.StringSetAsync(prefix + "1", Encode("def"), when: When.NotExists); + var x2 = db.StringSetAsync(prefix + "2", "def", when: When.NotExists); + var x3 = db.StringSetAsync(prefix + "3", Encode("def"), when: When.NotExists); + var x4 = db.StringSetAsync(prefix + "4", "def", expiry: TimeSpan.FromSeconds(4), when: When.NotExists); + var x5 = db.StringSetAsync(prefix + "5", "def", expiry: TimeSpan.FromMilliseconds(4001), when: When.NotExists); + + var s0 = db.StringGetAsync(prefix + "1"); + var s2 = db.StringGetAsync(prefix + "2"); + var s3 = db.StringGetAsync(prefix + "3"); + + Assert.False(await x0); + Assert.False(await x1); + Assert.True(await x2); + Assert.True(await x3); + Assert.True(await x4); + Assert.True(await x5); + Assert.Equal("abc", await s0); + Assert.Equal("def", await s2); + Assert.Equal("def", await s3); + } + + [Fact] + public async Task SetKeepTtl() + { + await using var conn = Create(require: RedisFeatures.v6_0_0); + + var db = conn.GetDatabase(); + var prefix = Me(); + db.KeyDelete(prefix + "1", CommandFlags.FireAndForget); + db.KeyDelete(prefix + "2", CommandFlags.FireAndForget); + db.KeyDelete(prefix + "3", CommandFlags.FireAndForget); + db.StringSet(prefix + "1", "abc", flags: CommandFlags.FireAndForget); + db.StringSet(prefix + "2", "abc", expiry: TimeSpan.FromMinutes(5), flags: CommandFlags.FireAndForget); + db.StringSet(prefix + "3", "abc", expiry: TimeSpan.FromMinutes(10), flags: CommandFlags.FireAndForget); + + var x0 = db.KeyTimeToLiveAsync(prefix + "1"); + var x1 = db.KeyTimeToLiveAsync(prefix + "2"); + var x2 = db.KeyTimeToLiveAsync(prefix + "3"); + + Assert.Null(await x0); + Assert.True(await x1 > TimeSpan.FromMinutes(4), "Over 4"); + Assert.True(await x1 <= TimeSpan.FromMinutes(5), "Under 5"); + Assert.True(await x2 > TimeSpan.FromMinutes(9), "Over 9"); + Assert.True(await x2 <= TimeSpan.FromMinutes(10), "Under 10"); + + db.StringSet(prefix + "1", "def", Expiration.KeepTtl, flags: CommandFlags.FireAndForget); + db.StringSet(prefix + "2", "def", flags: CommandFlags.FireAndForget); + db.StringSet(prefix + "3", "def", Expiration.KeepTtl, flags: CommandFlags.FireAndForget); + + var y0 = db.KeyTimeToLiveAsync(prefix + "1"); + var y1 = db.KeyTimeToLiveAsync(prefix + "2"); + var y2 = db.KeyTimeToLiveAsync(prefix + "3"); + + Assert.Null(await y0); + Assert.Null(await y1); + Assert.True(await y2 > TimeSpan.FromMinutes(9), "Over 9"); + Assert.True(await y2 <= TimeSpan.FromMinutes(10), "Under 10"); + } + + [Fact] + public async Task SetAndGet() + { + await using var conn = Create(require: RedisFeatures.v6_2_0); + + var db = conn.GetDatabase(); + var prefix = Me(); + db.KeyDelete(prefix + "1", CommandFlags.FireAndForget); + db.KeyDelete(prefix + "2", CommandFlags.FireAndForget); + db.KeyDelete(prefix + "3", CommandFlags.FireAndForget); + db.KeyDelete(prefix + "4", CommandFlags.FireAndForget); + db.KeyDelete(prefix + "5", CommandFlags.FireAndForget); + db.KeyDelete(prefix + "6", CommandFlags.FireAndForget); + db.KeyDelete(prefix + "7", CommandFlags.FireAndForget); + db.KeyDelete(prefix + "8", CommandFlags.FireAndForget); + db.KeyDelete(prefix + "9", CommandFlags.FireAndForget); + db.KeyDelete(prefix + "10", CommandFlags.FireAndForget); + db.StringSet(prefix + "1", "abc", flags: CommandFlags.FireAndForget); + db.StringSet(prefix + "2", "abc", flags: CommandFlags.FireAndForget); + db.StringSet(prefix + "4", "abc", flags: CommandFlags.FireAndForget); + db.StringSet(prefix + "6", "abc", flags: CommandFlags.FireAndForget); + db.StringSet(prefix + "7", "abc", flags: CommandFlags.FireAndForget); + db.StringSet(prefix + "8", "abc", flags: CommandFlags.FireAndForget); + db.StringSet(prefix + "9", "abc", flags: CommandFlags.FireAndForget); + db.StringSet(prefix + "10", "abc", expiry: TimeSpan.FromMinutes(10), flags: CommandFlags.FireAndForget); + + var x0 = db.StringSetAndGetAsync(prefix + "1", RedisValue.Null); + var x1 = db.StringSetAndGetAsync(prefix + "2", "def"); + var x2 = db.StringSetAndGetAsync(prefix + "3", "def"); + var x3 = db.StringSetAndGetAsync(prefix + "4", "def", when: When.Exists); + var x4 = db.StringSetAndGetAsync(prefix + "5", "def", when: When.Exists); + var x5 = db.StringSetAndGetAsync(prefix + "6", "def", expiry: TimeSpan.FromSeconds(4)); + var x6 = db.StringSetAndGetAsync(prefix + "7", "def", expiry: TimeSpan.FromMilliseconds(4001)); + var x7 = db.StringSetAndGetAsync(prefix + "8", "def", expiry: TimeSpan.FromSeconds(4), when: When.Exists); + var x8 = db.StringSetAndGetAsync(prefix + "9", "def", expiry: TimeSpan.FromMilliseconds(4001), when: When.Exists); + + var y0 = db.StringSetAndGetAsync(prefix + "10", "def", keepTtl: true); + var y1 = db.KeyTimeToLiveAsync(prefix + "10"); + var y2 = db.StringGetAsync(prefix + "10"); + + var s0 = db.StringGetAsync(prefix + "1"); + var s1 = db.StringGetAsync(prefix + "2"); + var s2 = db.StringGetAsync(prefix + "3"); + var s3 = db.StringGetAsync(prefix + "4"); + var s4 = db.StringGetAsync(prefix + "5"); + + Assert.Equal("abc", await x0); + Assert.Equal("abc", await x1); + Assert.Equal(RedisValue.Null, await x2); + Assert.Equal("abc", await x3); + Assert.Equal(RedisValue.Null, await x4); + Assert.Equal("abc", await x5); + Assert.Equal("abc", await x6); + Assert.Equal("abc", await x7); + Assert.Equal("abc", await x8); + + Assert.Equal("abc", await y0); + Assert.True(await y1 <= TimeSpan.FromMinutes(10), "Under 10 min"); + Assert.True(await y1 >= TimeSpan.FromMinutes(8), "Over 8 min"); + Assert.Equal("def", await y2); + + Assert.Equal(RedisValue.Null, await s0); + Assert.Equal("def", await s1); + Assert.Equal("def", await s2); + Assert.Equal("def", await s3); + Assert.Equal(RedisValue.Null, await s4); + } + + [Fact] + public async Task SetNotExistsAndGet() + { + await using var conn = Create(require: RedisFeatures.v7_0_0_rc1); + + var db = conn.GetDatabase(); + var prefix = Me(); + db.KeyDelete(prefix + "1", CommandFlags.FireAndForget); + db.KeyDelete(prefix + "2", CommandFlags.FireAndForget); + db.KeyDelete(prefix + "3", CommandFlags.FireAndForget); + db.KeyDelete(prefix + "4", CommandFlags.FireAndForget); + db.StringSet(prefix + "1", "abc", flags: CommandFlags.FireAndForget); + + var x0 = db.StringSetAndGetAsync(prefix + "1", "def", when: When.NotExists); + var x1 = db.StringSetAndGetAsync(prefix + "2", "def", when: When.NotExists); + var x2 = db.StringSetAndGetAsync(prefix + "3", "def", expiry: TimeSpan.FromSeconds(4), when: When.NotExists); + var x3 = db.StringSetAndGetAsync(prefix + "4", "def", expiry: TimeSpan.FromMilliseconds(4001), when: When.NotExists); + + var s0 = db.StringGetAsync(prefix + "1"); + var s1 = db.StringGetAsync(prefix + "2"); + + Assert.Equal("abc", await x0); + Assert.Equal(RedisValue.Null, await x1); + Assert.Equal(RedisValue.Null, await x2); + Assert.Equal(RedisValue.Null, await x3); + + Assert.Equal("abc", await s0); + Assert.Equal("def", await s1); + } + + [Fact] + public async Task Ranges() + { + await using var conn = Create(require: RedisFeatures.v2_1_8); + + var db = conn.GetDatabase(); + var key = Me(); + + db.KeyDelete(key, CommandFlags.FireAndForget); + + db.StringSet(key, "abcdefghi", flags: CommandFlags.FireAndForget); + db.StringSetRange(key, 2, "xy", CommandFlags.FireAndForget); + db.StringSetRange(key, 4, Encode("z"), CommandFlags.FireAndForget); + + var val = db.StringGetAsync(key); + + Assert.Equal("abxyzfghi", await val); + } + + [Fact] + public async Task IncrDecr() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + var key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + db.StringSet(key, "2", flags: CommandFlags.FireAndForget); + var v1 = db.StringIncrementAsync(key); + var v2 = db.StringIncrementAsync(key, 5); + var v3 = db.StringIncrementAsync(key, -2); + var v4 = db.StringDecrementAsync(key); + var v5 = db.StringDecrementAsync(key, 5); + var v6 = db.StringDecrementAsync(key, -2); + var s = db.StringGetAsync(key); + + Assert.Equal(3, await v1); + Assert.Equal(8, await v2); + Assert.Equal(6, await v3); + Assert.Equal(5, await v4); + Assert.Equal(0, await v5); + Assert.Equal(2, await v6); + Assert.Equal("2", await s); + } + + [Fact] + public async Task IncrDecrFloat() + { + await using var conn = Create(require: RedisFeatures.v2_6_0); + + var db = conn.GetDatabase(); + var key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + db.StringSet(key, "2", flags: CommandFlags.FireAndForget); + var v1 = db.StringIncrementAsync(key, 1.1); + var v2 = db.StringIncrementAsync(key, 5.0); + var v3 = db.StringIncrementAsync(key, -2.0); + var v4 = db.StringIncrementAsync(key, -1.0); + var v5 = db.StringIncrementAsync(key, -5.0); + var v6 = db.StringIncrementAsync(key, 2.0); + + var s = db.StringGetAsync(key); + + Assert.Equal(3.1, await v1, 5); + Assert.Equal(8.1, await v2, 5); + Assert.Equal(6.1, await v3, 5); + Assert.Equal(5.1, await v4, 5); + Assert.Equal(0.1, await v5, 5); + Assert.Equal(2.1, await v6, 5); + Assert.Equal(2.1, (double)await s, 5); + } + + [Fact] + public async Task GetRange() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + var key = Me(); + db.KeyDelete(key, CommandFlags.FireAndForget); + + db.StringSet(key, "abcdefghi", flags: CommandFlags.FireAndForget); + var s = db.StringGetRangeAsync(key, 2, 4); + var b = db.StringGetRangeAsync(key, 2, 4); + + Assert.Equal("cde", await s); + Assert.Equal("cde", Decode(await b)); + } + + [Fact] + public async Task BitCount() + { + await using var conn = Create(require: RedisFeatures.v2_6_0); + + var db = conn.GetDatabase(); + var key = Me(); + db.KeyDelete(key, flags: CommandFlags.FireAndForget); + db.StringSet(key, "foobar", flags: CommandFlags.FireAndForget); + + var r1 = db.StringBitCount(key); + var r2 = db.StringBitCount(key, 0, 0); + var r3 = db.StringBitCount(key, 1, 1); + + Assert.Equal(26, r1); + Assert.Equal(4, r2); + Assert.Equal(6, r3); + + // Async + r1 = await db.StringBitCountAsync(key); + r2 = await db.StringBitCountAsync(key, 0, 0); + r3 = await db.StringBitCountAsync(key, 1, 1); + + Assert.Equal(26, r1); + Assert.Equal(4, r2); + Assert.Equal(6, r3); + } + + [Fact] + public async Task BitCountWithBitUnit() + { + await using var conn = Create(require: RedisFeatures.v7_0_0_rc1); + + var db = conn.GetDatabase(); + var key = Me(); + db.KeyDelete(key, flags: CommandFlags.FireAndForget); + db.StringSet(key, "foobar", flags: CommandFlags.FireAndForget); + + var r1 = db.StringBitCount(key, 1, 1); // Using default byte + var r2 = db.StringBitCount(key, 1, 1, StringIndexType.Bit); + + Assert.Equal(6, r1); + Assert.Equal(1, r2); + + // Async + r1 = await db.StringBitCountAsync(key, 1, 1); // Using default byte + r2 = await db.StringBitCountAsync(key, 1, 1, StringIndexType.Bit); + + Assert.Equal(6, r1); + Assert.Equal(1, r2); + } + + [Fact] + public async Task BitOp() + { + await using var conn = Create(require: RedisFeatures.v2_6_0); + + var db = conn.GetDatabase(); + var prefix = Me(); + var key1 = prefix + "1"; + var key2 = prefix + "2"; + var key3 = prefix + "3"; + db.StringSet(key1, new byte[] { 3 }, flags: CommandFlags.FireAndForget); + db.StringSet(key2, new byte[] { 6 }, flags: CommandFlags.FireAndForget); + db.StringSet(key3, new byte[] { 12 }, flags: CommandFlags.FireAndForget); + + var len_and = db.StringBitOperationAsync(Bitwise.And, "and", [key1, key2, key3]); + var len_or = db.StringBitOperationAsync(Bitwise.Or, "or", [key1, key2, key3]); + var len_xor = db.StringBitOperationAsync(Bitwise.Xor, "xor", [key1, key2, key3]); + var len_not = db.StringBitOperationAsync(Bitwise.Not, "not", key1); + + Assert.Equal(1, await len_and); + Assert.Equal(1, await len_or); + Assert.Equal(1, await len_xor); + Assert.Equal(1, await len_not); + + var r_and = ((byte[]?)(await db.StringGetAsync("and").ForAwait()))?.Single(); + var r_or = ((byte[]?)(await db.StringGetAsync("or").ForAwait()))?.Single(); + var r_xor = ((byte[]?)(await db.StringGetAsync("xor").ForAwait()))?.Single(); + var r_not = ((byte[]?)(await db.StringGetAsync("not").ForAwait()))?.Single(); + + Assert.Equal((byte)(3 & 6 & 12), r_and); + Assert.Equal((byte)(3 | 6 | 12), r_or); + Assert.Equal((byte)(3 ^ 6 ^ 12), r_xor); + Assert.Equal(unchecked((byte)(~3)), r_not); + } + + [Fact] + public async Task BitOpExtended() + { + await using var conn = Create(require: RedisFeatures.v8_2_0_rc1); + var db = conn.GetDatabase(); + var prefix = Me(); + var keyX = prefix + "X"; + var keyY1 = prefix + "Y1"; + var keyY2 = prefix + "Y2"; + var keyY3 = prefix + "Y3"; + + // Clean up keys + db.KeyDelete([keyX, keyY1, keyY2, keyY3], CommandFlags.FireAndForget); + + // Set up test data with more complex patterns + // X = 11110000 (240) + // Y1 = 10101010 (170) + // Y2 = 01010101 (85) + // Y3 = 11001100 (204) + db.StringSet(keyX, new byte[] { 240 }, flags: CommandFlags.FireAndForget); + db.StringSet(keyY1, new byte[] { 170 }, flags: CommandFlags.FireAndForget); + db.StringSet(keyY2, new byte[] { 85 }, flags: CommandFlags.FireAndForget); + db.StringSet(keyY3, new byte[] { 204 }, flags: CommandFlags.FireAndForget); + + // Test DIFF: X ∧ ¬(Y1 ∨ Y2 ∨ Y3) + // Y1 ∨ Y2 ∨ Y3 = 170 | 85 | 204 = 255 + // X ∧ ¬(Y1 ∨ Y2 ∨ Y3) = 240 & ~255 = 240 & 0 = 0 + var len_diff = await db.StringBitOperationAsync(Bitwise.Diff, "diff", [keyX, keyY1, keyY2, keyY3]); + Assert.Equal(1, len_diff); + var r_diff = ((byte[]?)(await db.StringGetAsync("diff")))?.Single(); + Assert.Equal((byte)0, r_diff); + + // Test DIFF1: ¬X ∧ (Y1 ∨ Y2 ∨ Y3) + // ¬X = ~240 = 15 + // Y1 ∨ Y2 ∨ Y3 = 255 + // ¬X ∧ (Y1 ∨ Y2 ∨ Y3) = 15 & 255 = 15 + var len_diff1 = await db.StringBitOperationAsync(Bitwise.Diff1, "diff1", [keyX, keyY1, keyY2, keyY3]); + Assert.Equal(1, len_diff1); + var r_diff1 = ((byte[]?)(await db.StringGetAsync("diff1")))?.Single(); + Assert.Equal((byte)15, r_diff1); + + // Test ANDOR: X ∧ (Y1 ∨ Y2 ∨ Y3) + // Y1 ∨ Y2 ∨ Y3 = 255 + // X ∧ (Y1 ∨ Y2 ∨ Y3) = 240 & 255 = 240 + var len_andor = await db.StringBitOperationAsync(Bitwise.AndOr, "andor", [keyX, keyY1, keyY2, keyY3]); + Assert.Equal(1, len_andor); + var r_andor = ((byte[]?)(await db.StringGetAsync("andor")))?.Single(); + Assert.Equal((byte)240, r_andor); + + // Test ONE: bits set in exactly one bitmap + // For X=240, Y1=170, Y2=85, Y3=204 + // We need to count bits that appear in exactly one of these values + var len_one = await db.StringBitOperationAsync(Bitwise.One, "one", [keyX, keyY1, keyY2, keyY3]); + Assert.Equal(1, len_one); + var r_one = ((byte[]?)(await db.StringGetAsync("one")))?.Single(); + + // Calculate expected ONE result manually + // Bit 7: X=1, Y1=1, Y2=0, Y3=1 -> count=3, not exactly 1 + // Bit 6: X=1, Y1=0, Y2=1, Y3=1 -> count=3, not exactly 1 + // Bit 5: X=1, Y1=1, Y2=0, Y3=0 -> count=2, not exactly 1 + // Bit 4: X=1, Y1=0, Y2=1, Y3=0 -> count=2, not exactly 1 + // Bit 3: X=0, Y1=1, Y2=0, Y3=1 -> count=2, not exactly 1 + // Bit 2: X=0, Y1=0, Y2=1, Y3=1 -> count=2, not exactly 1 + // Bit 1: X=0, Y1=1, Y2=0, Y3=0 -> count=1, exactly 1! -> bit should be set + // Bit 0: X=0, Y1=0, Y2=1, Y3=0 -> count=1, exactly 1! -> bit should be set + // Expected result: 00000011 = 3 + Assert.Equal((byte)3, r_one); + } + + [Fact] + public async Task BitOpTwoOperands() + { + await using var conn = Create(require: RedisFeatures.v8_2_0_rc1); + var db = conn.GetDatabase(); + var prefix = Me(); + var key1 = prefix + "1"; + var key2 = prefix + "2"; + + // Clean up keys + db.KeyDelete([key1, key2], CommandFlags.FireAndForget); + + // Test with two operands: key1=10101010 (170), key2=11001100 (204) + db.StringSet(key1, new byte[] { 170 }, flags: CommandFlags.FireAndForget); + db.StringSet(key2, new byte[] { 204 }, flags: CommandFlags.FireAndForget); + + // Test DIFF: key1 ∧ ¬key2 = 170 & ~204 = 170 & 51 = 34 + var len_diff = await db.StringBitOperationAsync(Bitwise.Diff, "diff2", [key1, key2]); + Assert.Equal(1, len_diff); + var r_diff = ((byte[]?)(await db.StringGetAsync("diff2")))?.Single(); + Assert.Equal((byte)(170 & ~204), r_diff); + + // Test ONE with two operands (should be equivalent to XOR) + var len_one = await db.StringBitOperationAsync(Bitwise.One, "one2", [key1, key2]); + Assert.Equal(1, len_one); + var r_one = ((byte[]?)(await db.StringGetAsync("one2")))?.Single(); + Assert.Equal((byte)(170 ^ 204), r_one); + + // Verify ONE equals XOR for two operands + var len_xor = await db.StringBitOperationAsync(Bitwise.Xor, "xor2", [key1, key2]); + Assert.Equal(1, len_xor); + var r_xor = ((byte[]?)(await db.StringGetAsync("xor2")))?.Single(); + Assert.Equal(r_one, r_xor); + } + + [Fact] + public async Task BitOpDiff() + { + await using var conn = Create(require: RedisFeatures.v8_2_0_rc1); + var db = conn.GetDatabase(); + var prefix = Me(); + var keyX = prefix + "X"; + var keyY1 = prefix + "Y1"; + var keyY2 = prefix + "Y2"; + var keyResult = prefix + "result"; + + // Clean up keys + db.KeyDelete([keyX, keyY1, keyY2, keyResult], CommandFlags.FireAndForget); + + // Set up test data: X=11110000, Y1=10100000, Y2=01010000 + // Expected DIFF result: X ∧ ¬(Y1 ∨ Y2) = 11110000 ∧ ¬(11110000) = 00000000 + db.StringSet(keyX, new byte[] { 0b11110000 }, flags: CommandFlags.FireAndForget); + db.StringSet(keyY1, new byte[] { 0b10100000 }, flags: CommandFlags.FireAndForget); + db.StringSet(keyY2, new byte[] { 0b01010000 }, flags: CommandFlags.FireAndForget); + + var length = db.StringBitOperation(Bitwise.Diff, keyResult, [keyX, keyY1, keyY2]); + Assert.Equal(1, length); + + var result = ((byte[]?)db.StringGet(keyResult))?.Single(); + // X ∧ ¬(Y1 ∨ Y2) = 11110000 ∧ ¬(11110000) = 11110000 ∧ 00001111 = 00000000 + Assert.Equal((byte)0b00000000, result); + } + + [Fact] + public async Task BitOpDiff1() + { + await using var conn = Create(require: RedisFeatures.v8_2_0_rc1); + var db = conn.GetDatabase(); + var prefix = Me(); + var keyX = prefix + "X"; + var keyY1 = prefix + "Y1"; + var keyY2 = prefix + "Y2"; + var keyResult = prefix + "result"; + + // Clean up keys + db.KeyDelete([keyX, keyY1, keyY2, keyResult], CommandFlags.FireAndForget); + + // Set up test data: X=11000000, Y1=10100000, Y2=01010000 + // Expected DIFF1 result: ¬X ∧ (Y1 ∨ Y2) = ¬11000000 ∧ (10100000 ∨ 01010000) = 00111111 ∧ 11110000 = 00110000 + db.StringSet(keyX, new byte[] { 0b11000000 }, flags: CommandFlags.FireAndForget); + db.StringSet(keyY1, new byte[] { 0b10100000 }, flags: CommandFlags.FireAndForget); + db.StringSet(keyY2, new byte[] { 0b01010000 }, flags: CommandFlags.FireAndForget); + + var length = db.StringBitOperation(Bitwise.Diff1, keyResult, [keyX, keyY1, keyY2]); + Assert.Equal(1, length); + + var result = ((byte[]?)db.StringGet(keyResult))?.Single(); + // ¬X ∧ (Y1 ∨ Y2) = 00111111 ∧ 11110000 = 00110000 + Assert.Equal((byte)0b00110000, result); + } + + [Fact] + public async Task BitOpAndOr() + { + await using var conn = Create(require: RedisFeatures.v8_2_0_rc1); + var db = conn.GetDatabase(); + var prefix = Me(); + var keyX = prefix + "X"; + var keyY1 = prefix + "Y1"; + var keyY2 = prefix + "Y2"; + var keyResult = prefix + "result"; + + // Clean up keys + db.KeyDelete([keyX, keyY1, keyY2, keyResult], CommandFlags.FireAndForget); + + // Set up test data: X=11110000, Y1=10100000, Y2=01010000 + // Expected ANDOR result: X ∧ (Y1 ∨ Y2) = 11110000 ∧ (10100000 ∨ 01010000) = 11110000 ∧ 11110000 = 11110000 + db.StringSet(keyX, new byte[] { 0b11110000 }, flags: CommandFlags.FireAndForget); + db.StringSet(keyY1, new byte[] { 0b10100000 }, flags: CommandFlags.FireAndForget); + db.StringSet(keyY2, new byte[] { 0b01010000 }, flags: CommandFlags.FireAndForget); + + var length = db.StringBitOperation(Bitwise.AndOr, keyResult, [keyX, keyY1, keyY2]); + Assert.Equal(1, length); + + var result = ((byte[]?)db.StringGet(keyResult))?.Single(); + // X ∧ (Y1 ∨ Y2) = 11110000 ∧ 11110000 = 11110000 + Assert.Equal((byte)0b11110000, result); + } + + [Fact] + public async Task BitOpOne() + { + await using var conn = Create(require: RedisFeatures.v8_2_0_rc1); + var db = conn.GetDatabase(); + var prefix = Me(); + var key1 = prefix + "1"; + var key2 = prefix + "2"; + var key3 = prefix + "3"; + var keyResult = prefix + "result"; + + // Clean up keys + db.KeyDelete([key1, key2, key3, keyResult], CommandFlags.FireAndForget); + + // Set up test data: key1=10100000, key2=01010000, key3=00110000 + // Expected ONE result: bits set in exactly one bitmap = 11000000 + db.StringSet(key1, new byte[] { 0b10100000 }, flags: CommandFlags.FireAndForget); + db.StringSet(key2, new byte[] { 0b01010000 }, flags: CommandFlags.FireAndForget); + db.StringSet(key3, new byte[] { 0b00110000 }, flags: CommandFlags.FireAndForget); + + var length = db.StringBitOperation(Bitwise.One, keyResult, [key1, key2, key3]); + Assert.Equal(1, length); + + var result = ((byte[]?)db.StringGet(keyResult))?.Single(); + // Bits set in exactly one: position 7 (key1 only), position 6 (key2 only) = 11000000 + Assert.Equal((byte)0b11000000, result); + } + + [Fact] + public async Task BitOpDiffAsync() + { + await using var conn = Create(require: RedisFeatures.v8_2_0_rc1); + var db = conn.GetDatabase(); + var prefix = Me(); + var keyX = prefix + "X"; + var keyY1 = prefix + "Y1"; + var keyResult = prefix + "result"; + + // Clean up keys + db.KeyDelete([keyX, keyY1, keyResult], CommandFlags.FireAndForget); + + // Set up test data: X=11110000, Y1=10100000 + // Expected DIFF result: X ∧ ¬Y1 = 11110000 ∧ 01011111 = 01010000 + db.StringSet(keyX, new byte[] { 0b11110000 }, flags: CommandFlags.FireAndForget); + db.StringSet(keyY1, new byte[] { 0b10100000 }, flags: CommandFlags.FireAndForget); + + var length = await db.StringBitOperationAsync(Bitwise.Diff, keyResult, [keyX, keyY1]); + Assert.Equal(1, length); + + var result = ((byte[]?)await db.StringGetAsync(keyResult))?.Single(); + // X ∧ ¬Y1 = 11110000 ∧ 01011111 = 01010000 + Assert.Equal((byte)0b01010000, result); + } + + [Fact] + public async Task BitOpEdgeCases() + { + await using var conn = Create(require: RedisFeatures.v8_2_0_rc1); + var db = conn.GetDatabase(); + var prefix = Me(); + var keyEmpty = prefix + "empty"; + var keyNonEmpty = prefix + "nonempty"; + var keyResult = prefix + "result"; + + // Clean up keys + db.KeyDelete([keyEmpty, keyNonEmpty, keyResult], CommandFlags.FireAndForget); + + // Test with empty bitmap + db.StringSet(keyNonEmpty, new byte[] { 0b11110000 }, flags: CommandFlags.FireAndForget); + + // DIFF with empty key should return the first key + var length = db.StringBitOperation(Bitwise.Diff, keyResult, [keyNonEmpty, keyEmpty]); + Assert.Equal(1, length); + + var result = ((byte[]?)db.StringGet(keyResult))?.Single(); + Assert.Equal((byte)0b11110000, result); + + // ONE with single key should return that key + length = db.StringBitOperation(Bitwise.One, keyResult, [keyNonEmpty]); + Assert.Equal(1, length); + + result = ((byte[]?)db.StringGet(keyResult))?.Single(); + Assert.Equal((byte)0b11110000, result); + } + + [Fact] + public async Task BitPosition() + { + await using var conn = Create(require: RedisFeatures.v2_6_0); + + var db = conn.GetDatabase(); + var key = Me(); + db.KeyDelete(key, flags: CommandFlags.FireAndForget); + db.StringSet(key, "foo", flags: CommandFlags.FireAndForget); + + var r1 = db.StringBitPosition(key, true); + var r2 = db.StringBitPosition(key, true, 10, 10); + var r3 = db.StringBitPosition(key, true, 1, 3); + + Assert.Equal(1, r1); + Assert.Equal(-1, r2); + Assert.Equal(9, r3); + + // Async + r1 = await db.StringBitPositionAsync(key, true); + r2 = await db.StringBitPositionAsync(key, true, 10, 10); + r3 = await db.StringBitPositionAsync(key, true, 1, 3); + + Assert.Equal(1, r1); + Assert.Equal(-1, r2); + Assert.Equal(9, r3); + } + + [Fact] + public async Task BitPositionWithBitUnit() + { + await using var conn = Create(require: RedisFeatures.v7_0_0_rc1); + + var db = conn.GetDatabase(); + var key = Me(); + db.KeyDelete(key, flags: CommandFlags.FireAndForget); + db.StringSet(key, "foo", flags: CommandFlags.FireAndForget); + + var r1 = db.StringBitPositionAsync(key, true, 1, 3); // Using default byte + var r2 = db.StringBitPositionAsync(key, true, 1, 3, StringIndexType.Bit); + + Assert.Equal(9, await r1); + Assert.Equal(1, await r2); + } + + [Fact] + public async Task RangeString() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + var key = Me(); + db.StringSet(key, "hello world", flags: CommandFlags.FireAndForget); + var result = db.StringGetRangeAsync(key, 2, 6); + Assert.Equal("llo w", await result); + } + + [Fact] + public async Task HashStringLengthAsync() + { + await using var conn = Create(require: RedisFeatures.v3_2_0); + + var db = conn.GetDatabase(); + var key = Me(); + const string value = "hello world"; + db.HashSet(key, "field", value); + var resAsync = db.HashStringLengthAsync(key, "field"); + var resNonExistingAsync = db.HashStringLengthAsync(key, "non-existing-field"); + Assert.Equal(value.Length, await resAsync); + Assert.Equal(0, await resNonExistingAsync); + } + + [Fact] + public async Task HashStringLength() + { + await using var conn = Create(require: RedisFeatures.v3_2_0); + + var db = conn.GetDatabase(); + var key = Me(); + const string value = "hello world"; + db.HashSet(key, "field", value); + Assert.Equal(value.Length, db.HashStringLength(key, "field")); + Assert.Equal(0, db.HashStringLength(key, "non-existing-field")); + } + + [Fact] + public async Task LongestCommonSubsequence() + { + await using var conn = Create(require: RedisFeatures.v7_0_0_rc1); + + var db = conn.GetDatabase(); + var key1 = Me() + "1"; + var key2 = Me() + "2"; + db.KeyDelete(key1); + db.KeyDelete(key2); + db.StringSet(key1, "ohmytext"); + db.StringSet(key2, "mynewtext"); + + Assert.Equal("mytext", db.StringLongestCommonSubsequence(key1, key2)); + Assert.Equal(6, db.StringLongestCommonSubsequenceLength(key1, key2)); + + var stringMatchResult = db.StringLongestCommonSubsequenceWithMatches(key1, key2); + Assert.Equal(2, stringMatchResult.Matches.Length); // "my" and "text" are the two matches of the result + Assert.Equivalent(new LCSMatchResult.LCSMatch(new(4, 7), new(5, 8), length: 4), stringMatchResult.Matches[0]); // the string "text" starts at index 4 in the first string and at index 5 in the second string + Assert.Equivalent(new LCSMatchResult.LCSMatch(new(2, 3), new(0, 1), length: 2), stringMatchResult.Matches[1]); // the string "my" starts at index 2 in the first string and at index 0 in the second string + + stringMatchResult = db.StringLongestCommonSubsequenceWithMatches(key1, key2, 5); + Assert.Empty(stringMatchResult.Matches); // no matches longer than 5 characters + Assert.Equal(6, stringMatchResult.LongestMatchLength); + + // Missing keys + db.KeyDelete(key1); + Assert.Equal(string.Empty, db.StringLongestCommonSubsequence(key1, key2)); + db.KeyDelete(key2); + Assert.Equal(string.Empty, db.StringLongestCommonSubsequence(key1, key2)); + stringMatchResult = db.StringLongestCommonSubsequenceWithMatches(key1, key2); + Assert.NotNull(stringMatchResult.Matches); + Assert.Empty(stringMatchResult.Matches); + Assert.Equal(0, stringMatchResult.LongestMatchLength); + + // Default value + stringMatchResult = db.StringLongestCommonSubsequenceWithMatches(key1, key2, flags: CommandFlags.FireAndForget); + Assert.True(stringMatchResult.IsEmpty); + } + + [Fact] + public async Task LongestCommonSubsequenceAsync() + { + await using var conn = Create(require: RedisFeatures.v7_0_0_rc1); + + var db = conn.GetDatabase(); + var key1 = Me() + "1"; + var key2 = Me() + "2"; + db.KeyDelete(key1); + db.KeyDelete(key2); + db.StringSet(key1, "ohmytext"); + db.StringSet(key2, "mynewtext"); + + Assert.Equal("mytext", await db.StringLongestCommonSubsequenceAsync(key1, key2)); + Assert.Equal(6, await db.StringLongestCommonSubsequenceLengthAsync(key1, key2)); + + var stringMatchResult = await db.StringLongestCommonSubsequenceWithMatchesAsync(key1, key2); + Assert.Equal(2, stringMatchResult.Matches.Length); // "my" and "text" are the two matches of the result + Assert.Equivalent(new LCSMatchResult.LCSMatch(new(4, 7), new(5, 8), length: 4), stringMatchResult.Matches[0]); // the string "text" starts at index 4 in the first string and at index 5 in the second string + Assert.Equivalent(new LCSMatchResult.LCSMatch(new(2, 3), new(0, 1), length: 2), stringMatchResult.Matches[1]); // the string "my" starts at index 2 in the first string and at index 0 in the second string + + stringMatchResult = await db.StringLongestCommonSubsequenceWithMatchesAsync(key1, key2, 5); + Assert.Empty(stringMatchResult.Matches); // no matches longer than 5 characters + Assert.Equal(6, stringMatchResult.LongestMatchLength); + + // Missing keys + db.KeyDelete(key1); + Assert.Equal(string.Empty, await db.StringLongestCommonSubsequenceAsync(key1, key2)); + db.KeyDelete(key2); + Assert.Equal(string.Empty, await db.StringLongestCommonSubsequenceAsync(key1, key2)); + stringMatchResult = await db.StringLongestCommonSubsequenceWithMatchesAsync(key1, key2); + Assert.NotNull(stringMatchResult.Matches); + Assert.Empty(stringMatchResult.Matches); + Assert.Equal(0, stringMatchResult.LongestMatchLength); + + // Default value + stringMatchResult = await db.StringLongestCommonSubsequenceWithMatchesAsync(key1, key2, flags: CommandFlags.FireAndForget); + Assert.True(stringMatchResult.IsEmpty); + } + + private static byte[] Encode(string value) => Encoding.UTF8.GetBytes(value); + private static string? Decode(byte[]? value) => value is null ? null : Encoding.UTF8.GetString(value); +} diff --git a/tests/StackExchange.Redis.Tests/Strings.cs b/tests/StackExchange.Redis.Tests/Strings.cs deleted file mode 100644 index 678e2b544..000000000 --- a/tests/StackExchange.Redis.Tests/Strings.cs +++ /dev/null @@ -1,342 +0,0 @@ -using System.IO; -using System.Linq; -using System.Text; -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - [Collection(SharedConnectionFixture.Key)] - public class Strings : TestBase // https://redis.io/commands#string - { - public Strings(ITestOutputHelper output, SharedConnectionFixture fixture) : base(output, fixture) { } - - [Fact] - public async Task Append() - { - using (var muxer = Create()) - { - var conn = muxer.GetDatabase(); - var server = GetServer(muxer); - var key = Me(); - conn.KeyDelete(key, CommandFlags.FireAndForget); - var l0 = server.Features.StringLength ? conn.StringLengthAsync(key) : null; - - var s0 = conn.StringGetAsync(key); - - conn.StringSet(key, "abc", flags: CommandFlags.FireAndForget); - var s1 = conn.StringGetAsync(key); - var l1 = server.Features.StringLength ? conn.StringLengthAsync(key) : null; - - var result = conn.StringAppendAsync(key, Encode("defgh")); - var s3 = conn.StringGetAsync(key); - var l2 = server.Features.StringLength ? conn.StringLengthAsync(key) : null; - - Assert.Null((string)await s0); - Assert.Equal("abc", await s1); - Assert.Equal(8, await result); - Assert.Equal("abcdefgh", await s3); - - if (server.Features.StringLength) - { - Assert.Equal(0, await l0); - Assert.Equal(3, await l1); - Assert.Equal(8, await l2); - } - } - } - - [Fact] - public async Task Set() - { - using (var muxer = Create()) - { - var conn = muxer.GetDatabase(); - var key = Me(); - conn.KeyDelete(key, CommandFlags.FireAndForget); - - conn.StringSet(key, "abc", flags: CommandFlags.FireAndForget); - var v1 = conn.StringGetAsync(key); - - conn.StringSet(key, Encode("def"), flags: CommandFlags.FireAndForget); - var v2 = conn.StringGetAsync(key); - - Assert.Equal("abc", await v1); - Assert.Equal("def", Decode(await v2)); - } - } - - [Fact] - public async Task GetLease() - { - using (var muxer = Create()) - { - var conn = muxer.GetDatabase(); - var key = Me(); - conn.KeyDelete(key, CommandFlags.FireAndForget); - - conn.StringSet(key, "abc", flags: CommandFlags.FireAndForget); - using (var v1 = await conn.StringGetLeaseAsync(key).ConfigureAwait(false)) - { - string s = v1.DecodeString(); - Assert.Equal("abc", s); - } - } - } - - [Fact] - public async Task GetLeaseAsStream() - { - using (var muxer = Create()) - { - var conn = muxer.GetDatabase(); - var key = Me(); - conn.KeyDelete(key, CommandFlags.FireAndForget); - - conn.StringSet(key, "abc", flags: CommandFlags.FireAndForget); - using (var v1 = (await conn.StringGetLeaseAsync(key).ConfigureAwait(false)).AsStream()) - { - using (var sr = new StreamReader(v1)) - { - string s = sr.ReadToEnd(); - Assert.Equal("abc", s); - } - } - } - } - - [Fact] - public async Task SetNotExists() - { - using (var muxer = Create()) - { - var conn = muxer.GetDatabase(); - var prefix = Me(); - conn.KeyDelete(prefix + "1", CommandFlags.FireAndForget); - conn.KeyDelete(prefix + "2", CommandFlags.FireAndForget); - conn.KeyDelete(prefix + "3", CommandFlags.FireAndForget); - conn.StringSet(prefix + "1", "abc", flags: CommandFlags.FireAndForget); - - var x0 = conn.StringSetAsync(prefix + "1", "def", when: When.NotExists); - var x1 = conn.StringSetAsync(prefix + "1", Encode("def"), when: When.NotExists); - var x2 = conn.StringSetAsync(prefix + "2", "def", when: When.NotExists); - var x3 = conn.StringSetAsync(prefix + "3", Encode("def"), when: When.NotExists); - - var s0 = conn.StringGetAsync(prefix + "1"); - var s2 = conn.StringGetAsync(prefix + "2"); - var s3 = conn.StringGetAsync(prefix + "3"); - - Assert.False(await x0); - Assert.False(await x1); - Assert.True(await x2); - Assert.True(await x3); - Assert.Equal("abc", await s0); - Assert.Equal("def", await s2); - Assert.Equal("def", await s3); - } - } - - [Fact] - public async Task Ranges() - { - using (var muxer = Create()) - { - Skip.IfMissingFeature(muxer, nameof(RedisFeatures.StringSetRange), r => r.StringSetRange); - var conn = muxer.GetDatabase(); - var key = Me(); - - conn.KeyDelete(key, CommandFlags.FireAndForget); - - conn.StringSet(key, "abcdefghi", flags: CommandFlags.FireAndForget); - conn.StringSetRange(key, 2, "xy", CommandFlags.FireAndForget); - conn.StringSetRange(key, 4, Encode("z"), CommandFlags.FireAndForget); - - var val = conn.StringGetAsync(key); - - Assert.Equal("abxyzfghi", await val); - } - } - - [Fact] - public async Task IncrDecr() - { - using (var muxer = Create()) - { - var conn = muxer.GetDatabase(); - var key = Me(); - conn.KeyDelete(key, CommandFlags.FireAndForget); - - conn.StringSet(key, "2", flags: CommandFlags.FireAndForget); - var v1 = conn.StringIncrementAsync(key); - var v2 = conn.StringIncrementAsync(key, 5); - var v3 = conn.StringIncrementAsync(key, -2); - var v4 = conn.StringDecrementAsync(key); - var v5 = conn.StringDecrementAsync(key, 5); - var v6 = conn.StringDecrementAsync(key, -2); - var s = conn.StringGetAsync(key); - - Assert.Equal(3, await v1); - Assert.Equal(8, await v2); - Assert.Equal(6, await v3); - Assert.Equal(5, await v4); - Assert.Equal(0, await v5); - Assert.Equal(2, await v6); - Assert.Equal("2", await s); - } - } - - [Fact] - public async Task IncrDecrFloat() - { - using (var muxer = Create()) - { - Skip.IfMissingFeature(muxer, nameof(RedisFeatures.IncrementFloat), r => r.IncrementFloat); - var conn = muxer.GetDatabase(); - var key = Me(); - conn.KeyDelete(key, CommandFlags.FireAndForget); - - conn.StringSet(key, "2", flags: CommandFlags.FireAndForget); - var v1 = conn.StringIncrementAsync(key, 1.1); - var v2 = conn.StringIncrementAsync(key, 5.0); - var v3 = conn.StringIncrementAsync(key, -2.0); - var v4 = conn.StringIncrementAsync(key, -1.0); - var v5 = conn.StringIncrementAsync(key, -5.0); - var v6 = conn.StringIncrementAsync(key, 2.0); - - var s = conn.StringGetAsync(key); - - Assert.Equal(3.1, await v1, 5); - Assert.Equal(8.1, await v2, 5); - Assert.Equal(6.1, await v3, 5); - Assert.Equal(5.1, await v4, 5); - Assert.Equal(0.1, await v5, 5); - Assert.Equal(2.1, await v6, 5); - Assert.Equal(2.1, (double)await s, 5); - } - } - - [Fact] - public async Task GetRange() - { - using (var muxer = Create()) - { - var conn = muxer.GetDatabase(); - var key = Me(); - conn.KeyDelete(key, CommandFlags.FireAndForget); - - conn.StringSet(key, "abcdefghi", flags: CommandFlags.FireAndForget); - var s = conn.StringGetRangeAsync(key, 2, 4); - var b = conn.StringGetRangeAsync(key, 2, 4); - - Assert.Equal("cde", await s); - Assert.Equal("cde", Decode(await b)); - } - } - - [Fact] - public async Task BitCount() - { - using (var muxer = Create()) - { - Skip.IfMissingFeature(muxer, nameof(RedisFeatures.BitwiseOperations), r => r.BitwiseOperations); - - var conn = muxer.GetDatabase(); - var key = Me(); - conn.StringSet(key, "foobar", flags: CommandFlags.FireAndForget); - var r1 = conn.StringBitCountAsync(key); - var r2 = conn.StringBitCountAsync(key, 0, 0); - var r3 = conn.StringBitCountAsync(key, 1, 1); - - Assert.Equal(26, await r1); - Assert.Equal(4, await r2); - Assert.Equal(6, await r3); - } - } - - [Fact] - public async Task BitOp() - { - using (var muxer = Create()) - { - Skip.IfMissingFeature(muxer, nameof(RedisFeatures.BitwiseOperations), r => r.BitwiseOperations); - var conn = muxer.GetDatabase(); - var prefix = Me(); - var key1 = prefix + "1"; - var key2 = prefix + "2"; - var key3 = prefix + "3"; - conn.StringSet(key1, new byte[] { 3 }, flags: CommandFlags.FireAndForget); - conn.StringSet(key2, new byte[] { 6 }, flags: CommandFlags.FireAndForget); - conn.StringSet(key3, new byte[] { 12 }, flags: CommandFlags.FireAndForget); - - var len_and = conn.StringBitOperationAsync(Bitwise.And, "and", new RedisKey[] { key1, key2, key3 }); - var len_or = conn.StringBitOperationAsync(Bitwise.Or, "or", new RedisKey[] { key1, key2, key3 }); - var len_xor = conn.StringBitOperationAsync(Bitwise.Xor, "xor", new RedisKey[] { key1, key2, key3 }); - var len_not = conn.StringBitOperationAsync(Bitwise.Not, "not", key1); - - Assert.Equal(1, await len_and); - Assert.Equal(1, await len_or); - Assert.Equal(1, await len_xor); - Assert.Equal(1, await len_not); - - var r_and = ((byte[])(await conn.StringGetAsync("and").ForAwait())).Single(); - var r_or = ((byte[])(await conn.StringGetAsync("or").ForAwait())).Single(); - var r_xor = ((byte[])(await conn.StringGetAsync("xor").ForAwait())).Single(); - var r_not = ((byte[])(await conn.StringGetAsync("not").ForAwait())).Single(); - - Assert.Equal((byte)(3 & 6 & 12), r_and); - Assert.Equal((byte)(3 | 6 | 12), r_or); - Assert.Equal((byte)(3 ^ 6 ^ 12), r_xor); - Assert.Equal(unchecked((byte)(~3)), r_not); - } - } - - [Fact] - public async Task RangeString() - { - using (var muxer = Create()) - { - var conn = muxer.GetDatabase(); - var key = Me(); - conn.StringSet(key, "hello world", flags: CommandFlags.FireAndForget); - var result = conn.StringGetRangeAsync(key, 2, 6); - Assert.Equal("llo w", await result); - } - } - - [Fact] - public async Task HashStringLengthAsync() - { - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.HashStringLength), r => r.HashStringLength); - var database = conn.GetDatabase(); - var key = Me(); - var value = "hello world"; - database.HashSet(key, "field", value); - var resAsync = database.HashStringLengthAsync(key, "field"); - var resNonExistingAsync = database.HashStringLengthAsync(key, "non-existing-field"); - Assert.Equal(value.Length, await resAsync); - Assert.Equal(0, await resNonExistingAsync); - } - } - - [Fact] - public void HashStringLength() - { - using (var conn = Create()) - { - Skip.IfMissingFeature(conn, nameof(RedisFeatures.HashStringLength), r => r.HashStringLength); - var database = conn.GetDatabase(); - var key = Me(); - var value = "hello world"; - database.HashSet(key, "field", value); - Assert.Equal(value.Length, database.HashStringLength(key, "field")); - Assert.Equal(0, database.HashStringLength(key, "non-existing-field")); - } - } - - private static byte[] Encode(string value) => Encoding.UTF8.GetBytes(value); - private static string Decode(byte[] value) => Encoding.UTF8.GetString(value); - } -} diff --git a/tests/StackExchange.Redis.Tests/SyncContextTests.cs b/tests/StackExchange.Redis.Tests/SyncContextTests.cs new file mode 100644 index 000000000..5feb37e3d --- /dev/null +++ b/tests/StackExchange.Redis.Tests/SyncContextTests.cs @@ -0,0 +1,177 @@ +using System; +using System.IO; +using System.Threading; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests +{ + public class SyncContextTests(ITestOutputHelper testOutput) : TestBase(testOutput) + { + /* Note A (referenced below) + * + * When sync-context is *enabled*, we don't validate OpCount > 0 - this is because *with the additional checks*, + * it can genuinely happen that by the time we actually await it, it has completed - which results in a brittle test. + */ + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task DetectSyncContextUnsafe(bool continueOnCapturedContext) + { + using var ctx = new MySyncContext(Writer); + Assert.Equal(0, ctx.OpCount); + await Task.Delay(100).ConfigureAwait(continueOnCapturedContext); + + AssertState(continueOnCapturedContext, ctx); + } + + private void AssertState(bool continueOnCapturedContext, MySyncContext ctx) + { + Log($"Context in AssertState: {ctx}"); + if (continueOnCapturedContext) + { + Assert.True(ctx.IsCurrent, nameof(ctx.IsCurrent)); + // see note A re OpCount + } + else + { + // no guarantees on sync-context still being current; depends on sync vs async + Assert.Equal(0, ctx.OpCount); + } + } + + [Fact] + public async Task SyncPing() + { + using var ctx = new MySyncContext(Writer); + await using var conn = Create(); + Assert.Equal(0, ctx.OpCount); + var db = conn.GetDatabase(); + db.Ping(); + Assert.Equal(0, ctx.OpCount); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task AsyncPing(bool continueOnCapturedContext) + { + using var ctx = new MySyncContext(Writer); + await using var conn = Create(); + Assert.Equal(0, ctx.OpCount); + var db = conn.GetDatabase(); + Log($"Context before await: {ctx}"); + await db.PingAsync().ConfigureAwait(continueOnCapturedContext); + + AssertState(continueOnCapturedContext, ctx); + } + + [Fact] + public async Task SyncConfigure() + { + using var ctx = new MySyncContext(Writer); + await using var conn = Create(); + Assert.Equal(0, ctx.OpCount); + Assert.True(conn.Configure()); + Assert.Equal(0, ctx.OpCount); + } + + [Theory] + [InlineData(true)] // fail: Expected: Not RanToCompletion, Actual: RanToCompletion + [InlineData(false)] // pass + public async Task AsyncConfigure(bool continueOnCapturedContext) + { + using var ctx = new MySyncContext(Writer); + await using var conn = Create(); + + Log($"Context initial: {ctx}"); + await Task.Delay(500); + await conn.GetDatabase().PingAsync(); // ensure we're all ready + ctx.Reset(); + Log($"Context before: {ctx}"); + + Assert.Equal(0, ctx.OpCount); + Assert.True(await conn.ConfigureAsync(Writer).ConfigureAwait(continueOnCapturedContext), "config ran"); + + AssertState(continueOnCapturedContext, ctx); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task ConnectAsync(bool continueOnCapturedContext) + { + using var ctx = new MySyncContext(Writer); + var config = GetConfiguration(); // not ideal, but sufficient + await ConnectionMultiplexer.ConnectAsync(config, Writer).ConfigureAwait(continueOnCapturedContext); + + AssertState(continueOnCapturedContext, ctx); + } + + public sealed class MySyncContext : SynchronizationContext, IDisposable + { + private readonly SynchronizationContext? _previousContext; + private readonly TextWriter _log; + public MySyncContext(TextWriter log) + { + _previousContext = Current; + _log = log; + SetSynchronizationContext(this); + } + public int OpCount => Volatile.Read(ref _opCount); + private int _opCount; + private void Incr() => Interlocked.Increment(ref _opCount); + + public void Reset() => Volatile.Write(ref _opCount, 0); + + public override string ToString() => $"Sync context ({(IsCurrent ? "active" : "inactive")}): {OpCount}"; + + void IDisposable.Dispose() => SetSynchronizationContext(_previousContext); + + public override void Post(SendOrPostCallback d, object? state) + { + Log(_log, "sync-ctx: Post"); + Incr(); + ThreadPool.QueueUserWorkItem( + static state => + { + var tuple = (Tuple)state!; + tuple.Item1.Invoke(tuple.Item2, tuple.Item3); + }, + Tuple.Create(this, d, state)); + } + + private void Invoke(SendOrPostCallback d, object? state) + { + Log(_log, "sync-ctx: Invoke"); + if (!IsCurrent) SetSynchronizationContext(this); + d(state); + } + + public override void Send(SendOrPostCallback d, object? state) + { + Log(_log, "sync-ctx: Send"); + Incr(); + Invoke(d, state); + } + + public bool IsCurrent => ReferenceEquals(this, Current); + + public override int Wait(IntPtr[] waitHandles, bool waitAll, int millisecondsTimeout) + { + Incr(); + return base.Wait(waitHandles, waitAll, millisecondsTimeout); + } + public override void OperationStarted() + { + Incr(); + base.OperationStarted(); + } + public override void OperationCompleted() + { + Incr(); + base.OperationCompleted(); + } + } + } +} diff --git a/tests/StackExchange.Redis.Tests/TaskExtensions.cs b/tests/StackExchange.Redis.Tests/TaskExtensions.cs new file mode 100644 index 000000000..19db48f7c --- /dev/null +++ b/tests/StackExchange.Redis.Tests/TaskExtensions.cs @@ -0,0 +1,49 @@ +#if !NET +using System; +using System.Threading; +using System.Threading.Tasks; + +namespace StackExchange.Redis.Tests; + +internal static class TaskExtensions +{ + // suboptimal polyfill version of the .NET 6+ API; I'm not recommending this for production use, + // but it's good enough for tests + public static Task WaitAsync(this Task task, CancellationToken cancellationToken) + { + if (task.IsCompleted || !cancellationToken.CanBeCanceled) return task; + return Wrap(task, cancellationToken); + + static async Task Wrap(Task task, CancellationToken cancellationToken) + { + var tcs = new TaskCompletionSource(); + using var reg = cancellationToken.Register(() => tcs.TrySetCanceled(cancellationToken)); + _ = task.ContinueWith(t => + { + if (t.IsCanceled) tcs.TrySetCanceled(); + else if (t.IsFaulted) tcs.TrySetException(t.Exception!); + else tcs.TrySetResult(t.Result); + }); + return await tcs.Task; + } + } + + public static Task WaitAsync(this Task task, TimeSpan timeout) + { + if (task.IsCompleted) return task; + return Wrap(task, timeout); + + static async Task Wrap(Task task, TimeSpan timeout) + { + Task other = Task.Delay(timeout); + var first = await Task.WhenAny(task, other); + if (ReferenceEquals(first, other)) + { + throw new TimeoutException(); + } + return await task; + } + } +} + +#endif diff --git a/tests/StackExchange.Redis.Tests/TestBase.cs b/tests/StackExchange.Redis.Tests/TestBase.cs index 3ca698425..62b841f08 100644 --- a/tests/StackExchange.Redis.Tests/TestBase.cs +++ b/tests/StackExchange.Redis.Tests/TestBase.cs @@ -3,440 +3,665 @@ using System.Diagnostics; using System.IO; using System.Linq; -using System.Net; using System.Runtime; using System.Runtime.CompilerServices; using System.Threading; using System.Threading.Tasks; +using StackExchange.Redis.Configuration; +using StackExchange.Redis.Profiling; using StackExchange.Redis.Tests.Helpers; using Xunit; -using Xunit.Abstractions; -namespace StackExchange.Redis.Tests +namespace StackExchange.Redis.Tests; + +public abstract class TestBase : IDisposable { - public abstract class TestBase : IDisposable + protected ITestOutputHelper Output { get; } + protected TextWriterOutputHelper Writer { get; } + protected virtual string GetConfiguration() { - private ITestOutputHelper Output { get; } - protected TextWriterOutputHelper Writer { get; } - protected static bool RunningInCI { get; } = Environment.GetEnvironmentVariable("APPVEYOR") != null; - protected virtual string GetConfiguration() => GetDefaultConfiguration(); - internal static string GetDefaultConfiguration() => TestConfig.Current.MasterServerAndPort; + if (_inProcServerFixture != null) + { + return _inProcServerFixture.Configuration; + } + return GetDefaultConfiguration(); + } + internal static string GetDefaultConfiguration() => TestConfig.Current.PrimaryServerAndPort; - private readonly SharedConnectionFixture _fixture; + private readonly SharedConnectionFixture? _sharedConnectionFixture; + private readonly InProcServerFixture? _inProcServerFixture; - protected bool SharedFixtureAvailable => _fixture != null && _fixture.IsEnabled; + protected bool SharedFixtureAvailable => _sharedConnectionFixture != null && _sharedConnectionFixture.IsEnabled && !HighIntegrity; - protected TestBase(ITestOutputHelper output, SharedConnectionFixture fixture = null) - { - Output = output; - Output.WriteFrameworkVersion(); - Writer = new TextWriterOutputHelper(output, TestConfig.Current.LogToConsole); - _fixture = fixture; - ClearAmbientFailures(); - } + protected TestBase(ITestOutputHelper output, SharedConnectionFixture? connection = null, InProcServerFixture? server = null) + { + Output = output; + Output.WriteFrameworkVersion(); + Writer = new TextWriterOutputHelper(output); + _sharedConnectionFixture = connection; + _inProcServerFixture = server; + ClearAmbientFailures(); + } + + protected TestBase(ITestOutputHelper output, InProcServerFixture fixture) : this(output, null, fixture) + { + } - /// Useful to temporarily get extra worker threads for an otherwise synchronous test case which will 'block' the thread, on a synchronous API like Task.Wait() or Task.Result - /// Must NOT be used for test cases which *goes async*, as then the inferred return type will become 'async void', and we will fail to observe the result of the async part - /// See 'ConnectFailTimeout' class for example usage. - protected Task RunBlockingSynchronousWithExtraThreadAsync(Action testScenario) => Task.Factory.StartNew(testScenario, CancellationToken.None, TaskCreationOptions.LongRunning | TaskCreationOptions.DenyChildAttach, TaskScheduler.Default); + /// + /// Useful to temporarily get extra worker threads for an otherwise synchronous test case which will 'block' the thread, + /// on a synchronous API like or . + /// + /// + /// Must NOT be used for test cases which *goes async*, as then the inferred return type will become 'async void', + /// and we will fail to observe the result of the async part. + /// + /// See 'ConnectFailTimeout' class for example usage. + protected static Task RunBlockingSynchronousWithExtraThreadAsync(Action testScenario) => Task.Factory.StartNew(testScenario, CancellationToken.None, TaskCreationOptions.LongRunning | TaskCreationOptions.DenyChildAttach, TaskScheduler.Default); - protected void LogNoTime(string message) => LogNoTime(Writer, message); - internal static void LogNoTime(TextWriter output, string message) + public static void Log(TextWriter output, string message) + { + lock (output) { - lock (output) - { - output.WriteLine(message); - } - if (TestConfig.Current.LogToConsole) - { - Console.WriteLine(message); - } + output?.WriteLine(Time() + ": " + message); } - protected void Log(string message) => LogNoTime(Writer, message); - public static void Log(TextWriter output, string message) + } + + protected void Log(string? message, params object[] args) + { + if (args is { Length: > 0 }) { - lock (output) - { - output?.WriteLine(Time() + ": " + message); - } - if (TestConfig.Current.LogToConsole) - { - Console.WriteLine(message); - } + Output.WriteLine(Time() + ": " + message, args); } - protected void Log(string message, params object[] args) + else { - lock (Output) - { - Output.WriteLine(Time() + ": " + message, args); - } - if (TestConfig.Current.LogToConsole) - { - Console.WriteLine(message, args); - } + // avoid "not intended as a format specifier" scenarios + Output.WriteLine(Time() + ": " + message); } + } - protected void CollectGarbage() + protected ProfiledCommandEnumerable Log(ProfilingSession session) + { + var profile = session.FinishProfiling(); + foreach (var command in profile) { - GC.Collect(GC.MaxGeneration, GCCollectionMode.Forced); - GC.WaitForPendingFinalizers(); - GC.Collect(GC.MaxGeneration, GCCollectionMode.Forced); + Writer.WriteLineNoTime(command.ToString()); } + return profile; + } - [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1063:ImplementIDisposableCorrectly")] - public void Dispose() - { - _fixture?.Teardown(Writer); - Teardown(); - } + protected static void CollectGarbage() + { + GC.Collect(GC.MaxGeneration, GCCollectionMode.Forced); + GC.WaitForPendingFinalizers(); + GC.Collect(GC.MaxGeneration, GCCollectionMode.Forced); + } + + [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1063:ImplementIDisposableCorrectly", Justification = "Trust me yo")] + public void Dispose() + { + _sharedConnectionFixture?.Teardown(Writer); + Teardown(); + Writer.Dispose(); + GC.SuppressFinalize(this); + } #if VERBOSE - protected const int AsyncOpsQty = 100, SyncOpsQty = 10; + protected const int AsyncOpsQty = 100, SyncOpsQty = 10; #else - protected const int AsyncOpsQty = 2000, SyncOpsQty = 2000; + protected const int AsyncOpsQty = 2000, SyncOpsQty = 2000; #endif - static TestBase() + static TestBase() + { + TaskScheduler.UnobservedTaskException += (sender, args) => { - TaskScheduler.UnobservedTaskException += (sender, args) => + Console.WriteLine("Unobserved: " + args.Exception); + args.SetObserved(); + lock (sharedFailCount) { - Console.WriteLine("Unobserved: " + args.Exception); - args.SetObserved(); - lock (sharedFailCount) - { - if (sharedFailCount != null) - { - sharedFailCount.Value++; - } - } - lock (backgroundExceptions) + if (sharedFailCount != null) { - backgroundExceptions.Add(args.Exception.ToString()); + sharedFailCount.Value++; } - }; - Console.WriteLine("Setup information:"); - Console.WriteLine(" GC IsServer: " + GCSettings.IsServerGC); - Console.WriteLine(" GC LOH Mode: " + GCSettings.LargeObjectHeapCompactionMode); - Console.WriteLine(" GC Latency Mode: " + GCSettings.LatencyMode); - } - internal static string Time() => DateTime.UtcNow.ToString("HH:mm:ss.fff"); - protected void OnConnectionFailed(object sender, ConnectionFailedEventArgs e) - { - Interlocked.Increment(ref privateFailCount); - lock (privateExceptions) + } + lock (backgroundExceptions) { - privateExceptions.Add($"{Time()}: Connection failed ({e.FailureType}): {EndPointCollection.ToString(e.EndPoint)}/{e.ConnectionType}: {e.Exception}"); + backgroundExceptions.Add(args.Exception.ToString()); } + }; + Console.WriteLine("Setup information:"); + Console.WriteLine(" GC IsServer: " + GCSettings.IsServerGC); + Console.WriteLine(" GC LOH Mode: " + GCSettings.LargeObjectHeapCompactionMode); + Console.WriteLine(" GC Latency Mode: " + GCSettings.LatencyMode); + } + + internal static string Time() => DateTime.UtcNow.ToString("HH:mm:ss.ffff"); + protected void OnConnectionFailed(object? sender, ConnectionFailedEventArgs e) + { + Interlocked.Increment(ref privateFailCount); + lock (privateExceptions) + { + privateExceptions.Add($"{Time()}: Connection failed ({e.FailureType}): {EndPointCollection.ToString(e.EndPoint)}/{e.ConnectionType}: {e.Exception}"); } + Log($"Connection Failed ({e.ConnectionType},{e.FailureType}): {e.Exception}"); + } - protected void OnInternalError(object sender, InternalErrorEventArgs e) + protected void OnInternalError(object? sender, InternalErrorEventArgs e) + { + Interlocked.Increment(ref privateFailCount); + lock (privateExceptions) { - Interlocked.Increment(ref privateFailCount); - lock (privateExceptions) - { - privateExceptions.Add(Time() + ": Internal error: " + e.Origin + ", " + EndPointCollection.ToString(e.EndPoint) + "/" + e.ConnectionType); - } + privateExceptions.Add(Time() + ": Internal error: " + e.Origin + ", " + EndPointCollection.ToString(e.EndPoint) + "/" + e.ConnectionType); } + } - private int privateFailCount; - private static readonly AsyncLocal sharedFailCount = new AsyncLocal(); - private volatile int expectedFailCount; + private int privateFailCount; + private static readonly AsyncLocal sharedFailCount = new AsyncLocal(); + private volatile int expectedFailCount; - private readonly List privateExceptions = new List(); - private static readonly List backgroundExceptions = new List(); + private readonly List privateExceptions = []; + private static readonly List backgroundExceptions = []; - public void ClearAmbientFailures() + public void ClearAmbientFailures() + { + Interlocked.Exchange(ref privateFailCount, 0); + lock (sharedFailCount) + { + sharedFailCount.Value = 0; + } + expectedFailCount = 0; + lock (privateExceptions) + { + privateExceptions.Clear(); + } + lock (backgroundExceptions) + { + backgroundExceptions.Clear(); + } + } + + public void SetExpectedAmbientFailureCount(int count) + { + expectedFailCount = count; + } + + public void Teardown() + { + int sharedFails; + lock (sharedFailCount) + { + sharedFails = sharedFailCount.Value; + sharedFailCount.Value = 0; + } + if (expectedFailCount >= 0 && (sharedFails + privateFailCount) != expectedFailCount) { - Interlocked.Exchange(ref privateFailCount, 0); - lock (sharedFailCount) - { - sharedFailCount.Value = 0; - } - expectedFailCount = 0; lock (privateExceptions) { - privateExceptions.Clear(); + foreach (var item in privateExceptions.Take(5)) + { + Log(item); + } } lock (backgroundExceptions) { - backgroundExceptions.Clear(); + foreach (var item in backgroundExceptions.Take(5)) + { + Log(item); + } } + Assert.Skip($"There were {privateFailCount} private and {sharedFailCount.Value} ambient exceptions; expected {expectedFailCount}."); } + } - public void SetExpectedAmbientFailureCount(int count) + protected static IServer GetServer(IConnectionMultiplexer muxer) + { + IServer? result = null; + foreach (var server in muxer.GetServers()) { - expectedFailCount = count; + if (server.IsReplica || !server.IsConnected) continue; + if (result != null) throw new InvalidOperationException("Requires exactly one primary endpoint (found " + server.EndPoint + " and " + result.EndPoint + ")"); + result = server; } + if (result == null) throw new InvalidOperationException("Requires exactly one primary endpoint (found none)"); + return result; + } - public void Teardown() + protected static IServer GetAnyPrimary(IConnectionMultiplexer muxer) + { + foreach (var endpoint in muxer.GetEndPoints()) { - int sharedFails; - lock (sharedFailCount) - { - sharedFails = sharedFailCount.Value; - sharedFailCount.Value = 0; - } - if (expectedFailCount >= 0 && (sharedFails + privateFailCount) != expectedFailCount) - { - lock (privateExceptions) - { - foreach (var item in privateExceptions.Take(5)) - { - LogNoTime(item); - } - } - lock (backgroundExceptions) - { - foreach (var item in backgroundExceptions.Take(5)) - { - LogNoTime(item); - } - } - Skip.Inconclusive($"There were {privateFailCount} private and {sharedFailCount.Value} ambient exceptions; expected {expectedFailCount}."); - } - Log($"Service Counts: (Scheduler) Queue: {SocketManager.Shared?.SchedulerPool?.TotalServicedByQueue.ToString()}, Pool: {SocketManager.Shared?.SchedulerPool?.TotalServicedByPool.ToString()}"); + var server = muxer.GetServer(endpoint); + if (!server.IsReplica) return server; } + throw new InvalidOperationException("Requires a primary endpoint (found none)"); + } + + internal virtual bool HighIntegrity => false; + + internal virtual Tunnel? Tunnel => _inProcServerFixture?.Tunnel; - protected IServer GetServer(IConnectionMultiplexer muxer) + internal virtual IInternalConnectionMultiplexer Create( + string? clientName = null, + int? syncTimeout = null, + int? asyncTimeout = null, + bool? allowAdmin = null, + int? keepAlive = null, + int? connectTimeout = null, + string? password = null, + string? tieBreaker = null, + TextWriter? log = null, + bool fail = true, + string[]? disabledCommands = null, + string[]? enabledCommands = null, + bool checkConnect = true, + string? failMessage = null, + string? channelPrefix = null, + Proxy? proxy = null, + string? configuration = null, + bool logTransactionData = true, + bool shared = true, + int? defaultDatabase = null, + BacklogPolicy? backlogPolicy = null, + Version? require = null, + RedisProtocol? protocol = null, + [CallerMemberName] string caller = "") + { + if (Output == null) { - EndPoint[] endpoints = muxer.GetEndPoints(); - IServer result = null; - foreach (var endpoint in endpoints) - { - var server = muxer.GetServer(endpoint); - if (server.IsReplica || !server.IsConnected) continue; - if (result != null) throw new InvalidOperationException("Requires exactly one master endpoint (found " + server.EndPoint + " and " + result.EndPoint + ")"); - result = server; - } - if (result == null) throw new InvalidOperationException("Requires exactly one master endpoint (found none)"); - return result; + Assert.Fail("Failure: Be sure to call the TestBase constructor like this: BasicOpsTests(ITestOutputHelper output) : base(output) { }"); } - protected IServer GetAnyMaster(IConnectionMultiplexer muxer) + // Default to protocol context if not explicitly passed in + protocol ??= TestContext.Current.GetProtocol(); + + // Share a connection if instructed to and we can - many specifics mean no sharing + bool highIntegrity = HighIntegrity; + var tunnel = Tunnel; + if (tunnel is null && shared && expectedFailCount == 0 + && _sharedConnectionFixture != null && _sharedConnectionFixture.IsEnabled + && GetConfiguration() == GetDefaultConfiguration() + && CanShare(allowAdmin, password, tieBreaker, fail, disabledCommands, enabledCommands, channelPrefix, proxy, configuration, defaultDatabase, backlogPolicy, highIntegrity)) { - foreach (var endpoint in muxer.GetEndPoints()) + configuration = GetConfiguration(); + var fixtureConn = _sharedConnectionFixture.GetConnection(this, protocol.Value, caller: caller); + // Only return if we match + TestBase.ThrowIfIncorrectProtocol(fixtureConn, protocol); + + if (configuration == _sharedConnectionFixture.Configuration) { - var server = muxer.GetServer(endpoint); - if (!server.IsReplica) return server; + TestBase.ThrowIfBelowMinVersion(fixtureConn, require); + return fixtureConn; } - throw new InvalidOperationException("Requires a master endpoint (found none)"); } - internal virtual IInternalConnectionMultiplexer Create( - string clientName = null, int? syncTimeout = null, bool? allowAdmin = null, int? keepAlive = null, - int? connectTimeout = null, string password = null, string tieBreaker = null, TextWriter log = null, - bool fail = true, string[] disabledCommands = null, string[] enabledCommands = null, - bool checkConnect = true, string failMessage = null, - string channelPrefix = null, Proxy? proxy = null, - string configuration = null, bool logTransactionData = true, - bool shared = true, int? defaultDatabase = null, - [CallerMemberName] string caller = null) + var conn = CreateDefault( + Writer, + configuration ?? GetConfiguration(), + clientName, + syncTimeout, + asyncTimeout, + allowAdmin, + keepAlive, + connectTimeout, + password, + tieBreaker, + log, + fail, + disabledCommands, + enabledCommands, + checkConnect, + failMessage, + channelPrefix, + proxy, + logTransactionData, + defaultDatabase, + backlogPolicy, + protocol, + highIntegrity, + tunnel, + caller); + + TestBase.ThrowIfIncorrectProtocol(conn, protocol); + TestBase.ThrowIfBelowMinVersion(conn, require); + + conn.InternalError += OnInternalError; + conn.ConnectionFailed += OnConnectionFailed; + conn.ConnectionRestored += (s, e) => Log($"Connection Restored ({e.ConnectionType},{e.FailureType}): {e.Exception}"); + return conn; + } + + internal static bool CanShare( + bool? allowAdmin, + string? password, + string? tieBreaker, + bool fail, + string[]? disabledCommands, + string[]? enabledCommands, + string? channelPrefix, + Proxy? proxy, + string? configuration, + int? defaultDatabase, + BacklogPolicy? backlogPolicy, + bool highIntegrity) + => enabledCommands == null + && disabledCommands == null + && fail + && channelPrefix == null + && proxy == null + && configuration == null + && password == null + && tieBreaker == null + && defaultDatabase == null + && (allowAdmin == null || allowAdmin == true) + && backlogPolicy == null + && !highIntegrity; + + internal static void ThrowIfIncorrectProtocol(IInternalConnectionMultiplexer conn, RedisProtocol? requiredProtocol) + { + if (requiredProtocol is null) + { + return; + } + + var serverProtocol = conn.GetServerEndPoint(conn.GetEndPoints()[0]).Protocol ?? RedisProtocol.Resp2; + if (serverProtocol != requiredProtocol) + { + Assert.Skip($"Requires protocol {requiredProtocol}, but connection is {serverProtocol}."); + } + } + + internal static void ThrowIfBelowMinVersion(IInternalConnectionMultiplexer conn, Version? requiredVersion) + { + if (requiredVersion is null) + { + return; + } + + var serverVersion = conn.GetServerEndPoint(conn.GetEndPoints()[0]).Version; + if (!serverVersion.IsAtLeast(requiredVersion)) + { + Assert.Skip($"Requires server version {requiredVersion}, but server is only {serverVersion}."); + } + } + + public static ConnectionMultiplexer CreateDefault( + TextWriter? output, + string configuration, + string? clientName = null, + int? syncTimeout = null, + int? asyncTimeout = null, + bool? allowAdmin = null, + int? keepAlive = null, + int? connectTimeout = null, + string? password = null, + string? tieBreaker = null, + TextWriter? log = null, + bool fail = true, + string[]? disabledCommands = null, + string[]? enabledCommands = null, + bool checkConnect = true, + string? failMessage = null, + string? channelPrefix = null, + Proxy? proxy = null, + bool logTransactionData = true, + int? defaultDatabase = null, + BacklogPolicy? backlogPolicy = null, + RedisProtocol? protocol = null, + bool highIntegrity = false, + Tunnel? tunnel = null, + [CallerMemberName] string caller = "") + { + StringWriter? localLog = null; + log ??= localLog = new StringWriter(); + try { - if (Output == null) + var config = ConfigurationOptions.Parse(configuration); + if (disabledCommands != null && disabledCommands.Length != 0) { - Assert.True(false, "Failure: Be sure to call the TestBase constuctor like this: BasicOpsTests(ITestOutputHelper output) : base(output) { }"); + config.CommandMap = CommandMap.Create([.. disabledCommands], false); } - - if (shared && _fixture != null && _fixture.IsEnabled && enabledCommands == null && disabledCommands == null && fail && channelPrefix == null && proxy == null - && configuration == null && password == null && tieBreaker == null && defaultDatabase == null && (allowAdmin == null || allowAdmin == true) && expectedFailCount == 0) + else if (enabledCommands != null && enabledCommands.Length != 0) { - configuration = GetConfiguration(); - if (configuration == _fixture.Configuration) - { // only if the - return _fixture.Connection; - } + config.CommandMap = CommandMap.Create([.. enabledCommands], true); } - var muxer = CreateDefault( - Writer, - clientName, syncTimeout, allowAdmin, keepAlive, - connectTimeout, password, tieBreaker, log, - fail, disabledCommands, enabledCommands, - checkConnect, failMessage, - channelPrefix, proxy, - configuration ?? GetConfiguration(), - logTransactionData, defaultDatabase, caller); - muxer.InternalError += OnInternalError; - muxer.ConnectionFailed += OnConnectionFailed; - return muxer; - } - - public static ConnectionMultiplexer CreateDefault( - TextWriter output, - string clientName = null, int? syncTimeout = null, bool? allowAdmin = null, int? keepAlive = null, - int? connectTimeout = null, string password = null, string tieBreaker = null, TextWriter log = null, - bool fail = true, string[] disabledCommands = null, string[] enabledCommands = null, - bool checkConnect = true, string failMessage = null, - string channelPrefix = null, Proxy? proxy = null, - string configuration = null, bool logTransactionData = true, - int? defaultDatabase = null, - - [CallerMemberName] string caller = null) - { - StringWriter localLog = null; - if(log == null) + if (Debugger.IsAttached) { - log = localLog = new StringWriter(); + syncTimeout = int.MaxValue; } - try - { - var config = ConfigurationOptions.Parse(configuration); - if (disabledCommands != null && disabledCommands.Length != 0) - { - config.CommandMap = CommandMap.Create(new HashSet(disabledCommands), false); - } - else if (enabledCommands != null && enabledCommands.Length != 0) - { - config.CommandMap = CommandMap.Create(new HashSet(enabledCommands), true); - } - if (Debugger.IsAttached) - { - syncTimeout = int.MaxValue; - } - - if (channelPrefix != null) config.ChannelPrefix = channelPrefix; - if (tieBreaker != null) config.TieBreaker = tieBreaker; - if (password != null) config.Password = string.IsNullOrEmpty(password) ? null : password; - if (clientName != null) config.ClientName = clientName; - else if (caller != null) config.ClientName = caller; - if (syncTimeout != null) config.SyncTimeout = syncTimeout.Value; - if (allowAdmin != null) config.AllowAdmin = allowAdmin.Value; - if (keepAlive != null) config.KeepAlive = keepAlive.Value; - if (connectTimeout != null) config.ConnectTimeout = connectTimeout.Value; - if (proxy != null) config.Proxy = proxy.Value; - if (defaultDatabase != null) config.DefaultDatabase = defaultDatabase.Value; - var watch = Stopwatch.StartNew(); - var task = ConnectionMultiplexer.ConnectAsync(config, log); - if (!task.Wait(config.ConnectTimeout >= (int.MaxValue / 2) ? int.MaxValue : config.ConnectTimeout * 2)) - { - task.ContinueWith(x => + config.Tunnel = tunnel; + if (channelPrefix is not null) config.ChannelPrefix = RedisChannel.Literal(channelPrefix); + if (tieBreaker is not null) config.TieBreaker = tieBreaker; + if (password is not null) config.Password = string.IsNullOrEmpty(password) ? null : password; + if (clientName is not null) config.ClientName = clientName; + else if (!string.IsNullOrEmpty(caller)) config.ClientName = caller; + if (syncTimeout is not null) config.SyncTimeout = syncTimeout.Value; + if (asyncTimeout is not null) config.AsyncTimeout = asyncTimeout.Value; + if (allowAdmin is not null) config.AllowAdmin = allowAdmin.Value; + if (keepAlive is not null) config.KeepAlive = keepAlive.Value; + if (connectTimeout is not null) config.ConnectTimeout = connectTimeout.Value; + if (proxy is not null) config.Proxy = proxy.Value; + if (defaultDatabase is not null) config.DefaultDatabase = defaultDatabase.Value; + if (backlogPolicy is not null) config.BacklogPolicy = backlogPolicy; + if (protocol is not null) config.Protocol = protocol; + if (highIntegrity) config.HighIntegrity = highIntegrity; + var watch = Stopwatch.StartNew(); + var task = ConnectionMultiplexer.ConnectAsync(config, log); + if (!task.Wait(config.ConnectTimeout >= (int.MaxValue / 2) ? int.MaxValue : config.ConnectTimeout * 2)) + { + task.ContinueWith( + x => { try { GC.KeepAlive(x.Exception); } catch { /* No boom */ } - }, TaskContinuationOptions.OnlyOnFaulted); - throw new TimeoutException("Connect timeout"); - } - watch.Stop(); - if (output != null) - { - Log(output, "Connect took: " + watch.ElapsedMilliseconds + "ms"); - } - var muxer = task.Result; - if (checkConnect && (muxer == null || !muxer.IsConnected)) - { - // If fail is true, we throw. - Assert.False(fail, failMessage + "Server is not available"); - Skip.Inconclusive(failMessage + "Server is not available"); - } - if (output != null) + }, + TaskContinuationOptions.OnlyOnFaulted); + throw new TimeoutException("Connect timeout"); + } + watch.Stop(); + if (output != null) + { + Log(output, "Connect took: " + watch.ElapsedMilliseconds + "ms"); + } + var conn = task.Result; + if (checkConnect && !conn.IsConnected) + { + // If fail is true, we throw. + Assert.False(fail, failMessage + "Server is not available"); + Assert.Skip(failMessage + "Server is not available"); + } + if (output != null) + { + conn.MessageFaulted += (msg, ex, origin) => { - muxer.MessageFaulted += (msg, ex, origin) => - { - output?.WriteLine($"Faulted from '{origin}': '{msg}' - '{(ex == null ? "(null)" : ex.Message)}'"); - if (ex != null && ex.Data.Contains("got")) - { - output?.WriteLine($"Got: '{ex.Data["got"]}'"); - } - }; - muxer.Connecting += (e, t) => output?.WriteLine($"Connecting to {Format.ToString(e)} as {t}"); - if (logTransactionData) + output?.WriteLine($"Faulted from '{origin}': '{msg}' - '{(ex == null ? "(null)" : ex.Message)}'"); + if (ex != null && ex.Data.Contains("got")) { - muxer.TransactionLog += msg => output?.WriteLine("tran: " + msg); + output?.WriteLine($"Got: '{ex.Data["got"]}'"); } - muxer.InfoMessage += msg => output?.WriteLine(msg); - muxer.Resurrecting += (e, t) => output?.WriteLine($"Resurrecting {Format.ToString(e)} as {t}"); - muxer.Closing += complete => output?.WriteLine(complete ? "Closed" : "Closing..."); + }; + conn.Connecting += (e, t) => output?.WriteLine($"Connecting to {Format.ToString(e)} as {t}"); + if (logTransactionData) + { + conn.TransactionLog += msg => output?.WriteLine("tran: " + msg); } - return muxer; - } - catch - { - if (localLog != null) output?.WriteLine(localLog.ToString()); - throw; + conn.InfoMessage += msg => output?.WriteLine(msg); + conn.Resurrecting += (e, t) => output?.WriteLine($"Resurrecting {Format.ToString(e)} as {t}"); + conn.Closing += complete => output?.WriteLine(complete ? "Closed" : "Closing..."); } + return conn; + } + catch + { + if (localLog != null) output?.WriteLine(localLog.ToString()); + throw; } + } - public static string Me([CallerFilePath] string filePath = null, [CallerMemberName] string caller = null) => -#if NET462 - "net462-" -#elif NETCOREAPP2_1 - "netcoreapp2.1-" -#else - "unknown-" -#endif - + Path.GetFileNameWithoutExtension(filePath) + "-" + caller; - - protected static TimeSpan RunConcurrent(Action work, int threads, int timeout = 10000, [CallerMemberName] string caller = null) - { - if (work == null) throw new ArgumentNullException(nameof(work)); - if (threads < 1) throw new ArgumentOutOfRangeException(nameof(threads)); - if (string.IsNullOrWhiteSpace(caller)) caller = Me(); - Stopwatch watch = null; - ManualResetEvent allDone = new ManualResetEvent(false); - object token = new object(); - int active = 0; - void callback() + public virtual string Me([CallerFilePath] string? filePath = null, [CallerMemberName] string? caller = null) => + Environment.Version.ToString() + "-" + GetType().Name + "-" + Path.GetFileNameWithoutExtension(filePath) + "-" + caller + TestContext.Current.KeySuffix(); + + protected TimeSpan RunConcurrent(Action work, int threads, int timeout = 10000, [CallerMemberName] string? caller = null) + { + if (work == null) + { + throw new ArgumentNullException(nameof(work)); + } + if (threads < 1) + { + throw new ArgumentOutOfRangeException(nameof(threads)); + } + if (string.IsNullOrWhiteSpace(caller)) + { + caller = Me(); + } + + Stopwatch? watch = null; + ManualResetEvent allDone = new ManualResetEvent(false); + object token = new object(); + int active = 0; + void Callback() + { + lock (token) { - lock (token) + int nowActive = Interlocked.Increment(ref active); + if (nowActive == threads) { - int nowActive = Interlocked.Increment(ref active); - if (nowActive == threads) - { - watch = Stopwatch.StartNew(); - Monitor.PulseAll(token); - } - else - { - Monitor.Wait(token); - } + watch = Stopwatch.StartNew(); + Monitor.PulseAll(token); } - work(); - if (Interlocked.Decrement(ref active) == 0) + else { - watch.Stop(); - allDone.Set(); + Monitor.Wait(token); } } + work(); + if (Interlocked.Decrement(ref active) == 0) + { + watch?.Stop(); + allDone.Set(); + } + } - var threadArr = new Thread[threads]; + var threadArr = new Thread[threads]; + for (int i = 0; i < threads; i++) + { + var thd = new Thread(Callback) + { + Name = caller, + }; + threadArr[i] = thd; + thd.Start(); + } + if (!allDone.WaitOne(timeout)) + { for (int i = 0; i < threads; i++) { - var thd = new Thread(callback) - { - Name = caller - }; - threadArr[i] = thd; - thd.Start(); + var thd = threadArr[i]; +#if !NET + if (thd.IsAlive) thd.Abort(); +#endif } - if (!allDone.WaitOne(timeout)) + throw new TimeoutException(); + } + + return watch?.Elapsed ?? TimeSpan.Zero; + } + + private static readonly TimeSpan DefaultWaitPerLoop = TimeSpan.FromMilliseconds(50); + protected static async Task UntilConditionAsync(TimeSpan maxWaitTime, Func predicate, TimeSpan? waitPerLoop = null) + { + TimeSpan spent = TimeSpan.Zero; + while (spent < maxWaitTime && !predicate()) + { + var wait = waitPerLoop ?? DefaultWaitPerLoop; + await Task.Delay(wait).ForAwait(); + spent += wait; + } + } + + // simplified usage to get an interchangeable dedicated vs shared in-process server, useful for debugging + protected virtual bool UseDedicatedInProcessServer => false; // use the shared server by default + + internal ClientFactory ConnectFactory(bool allowAdmin = false, string? channelPrefix = null, bool shared = true) + { + if (UseDedicatedInProcessServer) + { + var server = new InProcessTestServer(Output); + return new ClientFactory(this, allowAdmin, channelPrefix, shared, server); + } + return new ClientFactory(this, allowAdmin, channelPrefix, shared, null); + } + + protected void SkipIfWouldUseInProcessServer(string? reason = null) + { + Assert.SkipWhen(_inProcServerFixture != null || UseDedicatedInProcessServer, reason ?? "In-process server is in use."); + } + + protected void SkipIfWouldUseRealServer(string? reason = null) + { + Assert.SkipUnless(_inProcServerFixture != null || UseDedicatedInProcessServer, reason ?? "Real server is in use."); + } + + internal sealed class ClientFactory : IDisposable, IAsyncDisposable + { + private readonly TestBase _testBase; + private readonly bool _allowAdmin; + private readonly string? _channelPrefix; + private readonly bool _shared; + private readonly InProcessTestServer? _server; + private IInternalConnectionMultiplexer? _defaultClient; + + internal ClientFactory(TestBase testBase, bool allowAdmin, string? channelPrefix, bool shared, InProcessTestServer? server) + { + _testBase = testBase; + _allowAdmin = allowAdmin; + _channelPrefix = channelPrefix; + _shared = shared; + _server = server; + } + + public IInternalConnectionMultiplexer DefaultClient => _defaultClient ??= CreateClient(); + + public InProcessTestServer? Server => _server; + + public IInternalConnectionMultiplexer CreateClient() + { + if (_server is not null) { - for (int i = 0; i < threads; i++) + var config = _server.GetClientConfig(); + config.AllowAdmin = _allowAdmin; + if (_channelPrefix is not null) { - var thd = threadArr[i]; -#pragma warning disable SYSLIB0006 // yes, we know - if (thd.IsAlive) thd.Abort(); -#pragma warning restore SYSLIB0006 // yes, we know + config.ChannelPrefix = RedisChannel.Literal(_channelPrefix); } - throw new TimeoutException(); + return ConnectionMultiplexer.ConnectAsync(config).Result; } + return _testBase.Create(allowAdmin: _allowAdmin, channelPrefix: _channelPrefix, shared: _shared); + } + + public IDatabase GetDatabase(int db = -1) => DefaultClient.GetDatabase(db); - return watch.Elapsed; + public ISubscriber GetSubscriber() => DefaultClient.GetSubscriber(); + + public void Dispose() + { + _server?.Dispose(); + _defaultClient?.Dispose(); } - private static readonly TimeSpan DefaultWaitPerLoop = TimeSpan.FromMilliseconds(50); - protected async Task UntilCondition(TimeSpan maxWaitTime, Func predicate, TimeSpan? waitPerLoop = null) + public ValueTask DisposeAsync() { - TimeSpan spent = TimeSpan.Zero; - while (spent < maxWaitTime && !predicate()) + _server?.Dispose(); + if (_defaultClient is not null) { - var wait = waitPerLoop ?? DefaultWaitPerLoop; - await Task.Delay(wait).ForAwait(); - spent += wait; + return _defaultClient.DisposeAsync(); } + return default; } } } diff --git a/tests/StackExchange.Redis.Tests/TestInfoReplicationChecks.cs b/tests/StackExchange.Redis.Tests/TestInfoReplicationChecks.cs deleted file mode 100644 index 169246894..000000000 --- a/tests/StackExchange.Redis.Tests/TestInfoReplicationChecks.cs +++ /dev/null @@ -1,29 +0,0 @@ -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - public class TestInfoReplicationChecks : TestBase - { - protected override string GetConfiguration() => base.GetConfiguration() + ",configCheckSeconds=2"; - public TestInfoReplicationChecks(ITestOutputHelper output) : base (output) { } - - [Fact] - public async Task Exec() - { - Skip.Inconclusive("need to think about CompletedSynchronously"); - - using(var conn = Create()) - { - var parsed = ConfigurationOptions.Parse(conn.Configuration); - Assert.Equal(2, parsed.ConfigCheckSeconds); - var before = conn.GetCounters(); - await Task.Delay(7000).ForAwait(); - var after = conn.GetCounters(); - int done = (int)(after.Interactive.CompletedSynchronously - before.Interactive.CompletedSynchronously); - Assert.True(done >= 2, $"expected >=2, got {done}"); - } - } - } -} diff --git a/tests/StackExchange.Redis.Tests/TransactionTests.cs b/tests/StackExchange.Redis.Tests/TransactionTests.cs new file mode 100644 index 000000000..3a0f1e40e --- /dev/null +++ b/tests/StackExchange.Redis.Tests/TransactionTests.cs @@ -0,0 +1,1434 @@ +using System; +using System.Threading.Tasks; +using Xunit; + +namespace StackExchange.Redis.Tests; + +[RunPerProtocol] +public class TransactionTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) +{ + [Fact] + public async Task BasicEmptyTran() + { + await using var conn = Create(); + + RedisKey key = Me(); + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + Assert.False(db.KeyExists(key)); + + var tran = db.CreateTransaction(); + + var result = tran.Execute(); + Assert.True(result); + } + + [Fact] + public async Task NestedTransactionThrows() + { + await using var conn = Create(); + + var db = conn.GetDatabase(); + var tran = db.CreateTransaction(); + var redisTransaction = Assert.IsType(tran); + Assert.Throws(() => redisTransaction.CreateTransaction(null)); + } + + [Theory] + [InlineData(false, false, true)] + [InlineData(false, true, false)] + [InlineData(true, false, false)] + [InlineData(true, true, true)] + public async Task BasicTranWithExistsCondition(bool demandKeyExists, bool keyExists, bool expectTranResult) + { + await using var conn = Create(disabledCommands: ["info", "config"]); + + RedisKey key = Me(), key2 = Me() + "2"; + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.KeyDelete(key2, CommandFlags.FireAndForget); + if (keyExists) db.StringSet(key2, "any value", flags: CommandFlags.FireAndForget); + Assert.False(db.KeyExists(key)); + Assert.Equal(keyExists, db.KeyExists(key2)); + + var tran = db.CreateTransaction(); + var cond = tran.AddCondition(demandKeyExists ? Condition.KeyExists(key2) : Condition.KeyNotExists(key2)); + var incr = tran.StringIncrementAsync(key); + var exec = tran.ExecuteAsync(); + var get = db.StringGet(key); + + Assert.Equal(expectTranResult, await exec); + if (demandKeyExists == keyExists) + { + Assert.True(await exec, "eq: exec"); + Assert.True(cond.WasSatisfied, "eq: was satisfied"); + Assert.Equal(1, await incr); // eq: incr + Assert.Equal(1, (long)get); // eq: get + } + else + { + Assert.False(await exec, "neq: exec"); + Assert.False(cond.WasSatisfied, "neq: was satisfied"); + Assert.Equal(TaskStatus.Canceled, SafeStatus(incr)); // neq: incr + Assert.Equal(0, (long)get); // neq: get + } + } + + [Theory] + [InlineData("same", "same", true, true)] + [InlineData("x", "y", true, false)] + [InlineData("x", null, true, false)] + [InlineData(null, "y", true, false)] + [InlineData(null, null, true, true)] + + [InlineData("same", "same", false, false)] + [InlineData("x", "y", false, true)] + [InlineData("x", null, false, true)] + [InlineData(null, "y", false, true)] + [InlineData(null, null, false, false)] + public async Task BasicTranWithEqualsCondition(string? expected, string? value, bool expectEqual, bool expectTranResult) + { + await using var conn = Create(); + + RedisKey key = Me(), key2 = Me() + "2"; + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.KeyDelete(key2, CommandFlags.FireAndForget); + + if (value != null) db.StringSet(key2, value, flags: CommandFlags.FireAndForget); + Assert.False(db.KeyExists(key)); + Assert.Equal(value, db.StringGet(key2)); + + var tran = db.CreateTransaction(); + var cond = tran.AddCondition(expectEqual ? Condition.StringEqual(key2, expected) : Condition.StringNotEqual(key2, expected)); + var incr = tran.StringIncrementAsync(key); + var exec = tran.ExecuteAsync(); + var get = db.StringGet(key); + + Assert.Equal(expectTranResult, await exec); + if (expectEqual == (value == expected)) + { + Assert.True(await exec, "eq: exec"); + Assert.True(cond.WasSatisfied, "eq: was satisfied"); + Assert.Equal(1, await incr); // eq: incr + Assert.Equal(1, (long)get); // eq: get + } + else + { + Assert.False(await exec, "neq: exec"); + Assert.False(cond.WasSatisfied, "neq: was satisfied"); + Assert.Equal(TaskStatus.Canceled, SafeStatus(incr)); // neq: incr + Assert.Equal(0, (long)get); // neq: get + } + } + + [Theory] + [InlineData(false, false, true)] + [InlineData(false, true, false)] + [InlineData(true, false, false)] + [InlineData(true, true, true)] + public async Task BasicTranWithHashExistsCondition(bool demandKeyExists, bool keyExists, bool expectTranResult) + { + await using var conn = Create(disabledCommands: ["info", "config"]); + + RedisKey key = Me(), key2 = Me() + "2"; + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.KeyDelete(key2, CommandFlags.FireAndForget); + RedisValue hashField = "field"; + if (keyExists) db.HashSet(key2, hashField, "any value", flags: CommandFlags.FireAndForget); + Assert.False(db.KeyExists(key)); + Assert.Equal(keyExists, db.HashExists(key2, hashField)); + + var tran = db.CreateTransaction(); + var cond = tran.AddCondition(demandKeyExists ? Condition.HashExists(key2, hashField) : Condition.HashNotExists(key2, hashField)); + var incr = tran.StringIncrementAsync(key); + var exec = tran.ExecuteAsync(); + var get = db.StringGet(key); + + Assert.Equal(expectTranResult, await exec); + if (demandKeyExists == keyExists) + { + Assert.True(await exec, "eq: exec"); + Assert.True(cond.WasSatisfied, "eq: was satisfied"); + Assert.Equal(1, await incr); // eq: incr + Assert.Equal(1, (long)get); // eq: get + } + else + { + Assert.False(await exec, "neq: exec"); + Assert.False(cond.WasSatisfied, "neq: was satisfied"); + Assert.Equal(TaskStatus.Canceled, SafeStatus(incr)); // neq: incr + Assert.Equal(0, (long)get); // neq: get + } + } + + [Theory] + [InlineData("same", "same", true, true)] + [InlineData("x", "y", true, false)] + [InlineData("x", null, true, false)] + [InlineData(null, "y", true, false)] + [InlineData(null, null, true, true)] + + [InlineData("same", "same", false, false)] + [InlineData("x", "y", false, true)] + [InlineData("x", null, false, true)] + [InlineData(null, "y", false, true)] + [InlineData(null, null, false, false)] + public async Task BasicTranWithHashEqualsCondition(string? expected, string? value, bool expectEqual, bool expectedTranResult) + { + await using var conn = Create(); + + RedisKey key = Me(), key2 = Me() + "2"; + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.KeyDelete(key2, CommandFlags.FireAndForget); + + RedisValue hashField = "field"; + if (value != null) db.HashSet(key2, hashField, value, flags: CommandFlags.FireAndForget); + Assert.False(db.KeyExists(key)); + Assert.Equal(value, db.HashGet(key2, hashField)); + + var tran = db.CreateTransaction(); + var cond = tran.AddCondition(expectEqual ? Condition.HashEqual(key2, hashField, expected) : Condition.HashNotEqual(key2, hashField, expected)); + var incr = tran.StringIncrementAsync(key); + var exec = tran.ExecuteAsync(); + var get = db.StringGet(key); + + Assert.Equal(expectedTranResult, await exec); + if (expectEqual == (value == expected)) + { + Assert.True(await exec, "eq: exec"); + Assert.True(cond.WasSatisfied, "eq: was satisfied"); + Assert.Equal(1, await incr); // eq: incr + Assert.Equal(1, (long)get); // eq: get + } + else + { + Assert.False(await exec, "neq: exec"); + Assert.False(cond.WasSatisfied, "neq: was satisfied"); + Assert.Equal(TaskStatus.Canceled, SafeStatus(incr)); // neq: incr + Assert.Equal(0, (long)get); // neq: get + } + } + + private static TaskStatus SafeStatus(Task task) + { + if (task.Status == TaskStatus.WaitingForActivation) + { + try + { + if (!task.Wait(1000)) throw new TimeoutException("timeout waiting for task to complete"); + } + catch (AggregateException ex) + when (ex.InnerException is TaskCanceledException + || (ex.InnerExceptions.Count == 1 && ex.InnerException is TaskCanceledException)) + { + return TaskStatus.Canceled; + } + catch (TaskCanceledException) + { + return TaskStatus.Canceled; + } + } + return task.Status; + } + + [Theory] + [InlineData(false, false, true)] + [InlineData(false, true, false)] + [InlineData(true, false, false)] + [InlineData(true, true, true)] + public async Task BasicTranWithListExistsCondition(bool demandKeyExists, bool keyExists, bool expectTranResult) + { + await using var conn = Create(disabledCommands: ["info", "config"]); + + RedisKey key = Me(), key2 = Me() + "2"; + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.KeyDelete(key2, CommandFlags.FireAndForget); + if (keyExists) db.ListRightPush(key2, "any value", flags: CommandFlags.FireAndForget); + Assert.False(db.KeyExists(key)); + Assert.Equal(keyExists, db.KeyExists(key2)); + + var tran = db.CreateTransaction(); + var cond = tran.AddCondition(demandKeyExists ? Condition.ListIndexExists(key2, 0) : Condition.ListIndexNotExists(key2, 0)); + var push = tran.ListRightPushAsync(key, "any value"); + var exec = tran.ExecuteAsync(); + var get = db.ListGetByIndex(key, 0); + + Assert.Equal(expectTranResult, await exec); + if (demandKeyExists == keyExists) + { + Assert.True(await exec, "eq: exec"); + Assert.True(cond.WasSatisfied, "eq: was satisfied"); + Assert.Equal(1, await push); // eq: push + Assert.Equal("any value", get); // eq: get + } + else + { + Assert.False(await exec, "neq: exec"); + Assert.False(cond.WasSatisfied, "neq: was satisfied"); + Assert.Equal(TaskStatus.Canceled, SafeStatus(push)); // neq: push + Assert.Null((string?)get); // neq: get + } + } + + [Theory] + [InlineData("same", "same", true, true)] + [InlineData("x", "y", true, false)] + [InlineData("x", null, true, false)] + [InlineData(null, "y", true, false)] + [InlineData(null, null, true, true)] + + [InlineData("same", "same", false, false)] + [InlineData("x", "y", false, true)] + [InlineData("x", null, false, true)] + [InlineData(null, "y", false, true)] + [InlineData(null, null, false, false)] + public async Task BasicTranWithListEqualsCondition(string? expected, string? value, bool expectEqual, bool expectTranResult) + { + await using var conn = Create(); + + RedisKey key = Me(), key2 = Me() + "2"; + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.KeyDelete(key2, CommandFlags.FireAndForget); + + if (value != null) db.ListRightPush(key2, value, flags: CommandFlags.FireAndForget); + Assert.False(db.KeyExists(key)); + Assert.Equal(value, db.ListGetByIndex(key2, 0)); + + var tran = db.CreateTransaction(); + var cond = tran.AddCondition(expectEqual ? Condition.ListIndexEqual(key2, 0, expected) : Condition.ListIndexNotEqual(key2, 0, expected)); + var push = tran.ListRightPushAsync(key, "any value"); + var exec = tran.ExecuteAsync(); + var get = db.ListGetByIndex(key, 0); + + Assert.Equal(expectTranResult, await exec); + if (expectEqual == (value == expected)) + { + Assert.True(await exec, "eq: exec"); + Assert.True(cond.WasSatisfied, "eq: was satisfied"); + Assert.Equal(1, await push); // eq: push + Assert.Equal("any value", get); // eq: get + } + else + { + Assert.False(await exec, "neq: exec"); + Assert.False(cond.WasSatisfied, "neq: was satisfied"); + Assert.Equal(TaskStatus.Canceled, SafeStatus(push)); // neq: push + Assert.Null((string?)get); // neq: get + } + } + + public enum ComparisonType + { + Equal, + LessThan, + GreaterThan, + } + + [Theory] + [InlineData("five", ComparisonType.Equal, 5L, false)] + [InlineData("four", ComparisonType.Equal, 4L, true)] + [InlineData("three", ComparisonType.Equal, 3L, false)] + [InlineData("", ComparisonType.Equal, 2L, false)] + [InlineData("", ComparisonType.Equal, 0L, true)] + [InlineData(null, ComparisonType.Equal, 1L, false)] + [InlineData(null, ComparisonType.Equal, 0L, true)] + + [InlineData("five", ComparisonType.LessThan, 5L, true)] + [InlineData("four", ComparisonType.LessThan, 4L, false)] + [InlineData("three", ComparisonType.LessThan, 3L, false)] + [InlineData("", ComparisonType.LessThan, 2L, true)] + [InlineData("", ComparisonType.LessThan, 0L, false)] + [InlineData(null, ComparisonType.LessThan, 1L, true)] + [InlineData(null, ComparisonType.LessThan, 0L, false)] + + [InlineData("five", ComparisonType.GreaterThan, 5L, false)] + [InlineData("four", ComparisonType.GreaterThan, 4L, false)] + [InlineData("three", ComparisonType.GreaterThan, 3L, true)] + [InlineData("", ComparisonType.GreaterThan, 2L, false)] + [InlineData("", ComparisonType.GreaterThan, 0L, false)] + [InlineData(null, ComparisonType.GreaterThan, 1L, false)] + [InlineData(null, ComparisonType.GreaterThan, 0L, false)] + public async Task BasicTranWithStringLengthCondition(string? value, ComparisonType type, long length, bool expectTranResult) + { + await using var conn = Create(); + + RedisKey key = Me(), key2 = Me() + "2"; + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.KeyDelete(key2, CommandFlags.FireAndForget); + + bool expectSuccess; + Condition? condition; + var valueLength = value?.Length ?? 0; + switch (type) + { + case ComparisonType.Equal: + expectSuccess = valueLength == length; + condition = Condition.StringLengthEqual(key2, length); + Assert.Contains("String length == " + length, condition.ToString()); + break; + case ComparisonType.GreaterThan: + expectSuccess = valueLength > length; + condition = Condition.StringLengthGreaterThan(key2, length); + Assert.Contains("String length > " + length, condition.ToString()); + break; + case ComparisonType.LessThan: + expectSuccess = valueLength < length; + condition = Condition.StringLengthLessThan(key2, length); + Assert.Contains("String length < " + length, condition.ToString()); + break; + default: + throw new ArgumentOutOfRangeException(nameof(type)); + } + + if (value != null) db.StringSet(key2, value, flags: CommandFlags.FireAndForget); + Assert.False(db.KeyExists(key)); + Assert.Equal(value, db.StringGet(key2)); + + var tran = db.CreateTransaction(); + var cond = tran.AddCondition(condition); + var push = tran.StringSetAsync(key, "any value"); + var exec = tran.ExecuteAsync(); + var get = db.StringLength(key); + + Assert.Equal(expectTranResult, await exec); + + if (expectSuccess) + { + Assert.True(await exec, "eq: exec"); + Assert.True(cond.WasSatisfied, "eq: was satisfied"); + Assert.True(await push); // eq: push + Assert.Equal("any value".Length, get); // eq: get + } + else + { + Assert.False(await exec, "neq: exec"); + Assert.False(cond.WasSatisfied, "neq: was satisfied"); + Assert.Equal(TaskStatus.Canceled, SafeStatus(push)); // neq: push + Assert.Equal(0, get); // neq: get + } + } + + [Theory] + [InlineData("five", ComparisonType.Equal, 5L, false)] + [InlineData("four", ComparisonType.Equal, 4L, true)] + [InlineData("three", ComparisonType.Equal, 3L, false)] + [InlineData("", ComparisonType.Equal, 2L, false)] + [InlineData("", ComparisonType.Equal, 0L, true)] + + [InlineData("five", ComparisonType.LessThan, 5L, true)] + [InlineData("four", ComparisonType.LessThan, 4L, false)] + [InlineData("three", ComparisonType.LessThan, 3L, false)] + [InlineData("", ComparisonType.LessThan, 2L, true)] + [InlineData("", ComparisonType.LessThan, 0L, false)] + + [InlineData("five", ComparisonType.GreaterThan, 5L, false)] + [InlineData("four", ComparisonType.GreaterThan, 4L, false)] + [InlineData("three", ComparisonType.GreaterThan, 3L, true)] + [InlineData("", ComparisonType.GreaterThan, 2L, false)] + [InlineData("", ComparisonType.GreaterThan, 0L, false)] + public async Task BasicTranWithHashLengthCondition(string value, ComparisonType type, long length, bool expectTranResult) + { + await using var conn = Create(); + + RedisKey key = Me(), key2 = Me() + "2"; + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.KeyDelete(key2, CommandFlags.FireAndForget); + + bool expectSuccess; + Condition? condition; + var valueLength = value?.Length ?? 0; + switch (type) + { + case ComparisonType.Equal: + expectSuccess = valueLength == length; + condition = Condition.HashLengthEqual(key2, length); + break; + case ComparisonType.GreaterThan: + expectSuccess = valueLength > length; + condition = Condition.HashLengthGreaterThan(key2, length); + break; + case ComparisonType.LessThan: + expectSuccess = valueLength < length; + condition = Condition.HashLengthLessThan(key2, length); + break; + default: + throw new ArgumentOutOfRangeException(nameof(type)); + } + + for (var i = 0; i < valueLength; i++) + { + db.HashSet(key2, i, value![i].ToString(), flags: CommandFlags.FireAndForget); + } + Assert.False(db.KeyExists(key)); + Assert.Equal(valueLength, db.HashLength(key2)); + + var tran = db.CreateTransaction(); + var cond = tran.AddCondition(condition); + var push = tran.StringSetAsync(key, "any value"); + var exec = tran.ExecuteAsync(); + var get = db.StringLength(key); + + Assert.Equal(expectTranResult, await exec); + + if (expectSuccess) + { + Assert.True(await exec, "eq: exec"); + Assert.True(cond.WasSatisfied, "eq: was satisfied"); + Assert.True(await push); // eq: push + Assert.Equal("any value".Length, get); // eq: get + } + else + { + Assert.False(await exec, "neq: exec"); + Assert.False(cond.WasSatisfied, "neq: was satisfied"); + Assert.Equal(TaskStatus.Canceled, SafeStatus(push)); // neq: push + Assert.Equal(0, get); // neq: get + } + } + + [Theory] + [InlineData("five", ComparisonType.Equal, 5L, false)] + [InlineData("four", ComparisonType.Equal, 4L, true)] + [InlineData("three", ComparisonType.Equal, 3L, false)] + [InlineData("", ComparisonType.Equal, 2L, false)] + [InlineData("", ComparisonType.Equal, 0L, true)] + + [InlineData("five", ComparisonType.LessThan, 5L, true)] + [InlineData("four", ComparisonType.LessThan, 4L, false)] + [InlineData("three", ComparisonType.LessThan, 3L, false)] + [InlineData("", ComparisonType.LessThan, 2L, true)] + [InlineData("", ComparisonType.LessThan, 0L, false)] + + [InlineData("five", ComparisonType.GreaterThan, 5L, false)] + [InlineData("four", ComparisonType.GreaterThan, 4L, false)] + [InlineData("three", ComparisonType.GreaterThan, 3L, true)] + [InlineData("", ComparisonType.GreaterThan, 2L, false)] + [InlineData("", ComparisonType.GreaterThan, 0L, false)] + public async Task BasicTranWithSetCardinalityCondition(string value, ComparisonType type, long length, bool expectTranResult) + { + await using var conn = Create(); + + RedisKey key = Me(), key2 = Me() + "2"; + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.KeyDelete(key2, CommandFlags.FireAndForget); + + bool expectSuccess; + Condition? condition; + var valueLength = value?.Length ?? 0; + switch (type) + { + case ComparisonType.Equal: + expectSuccess = valueLength == length; + condition = Condition.SetLengthEqual(key2, length); + break; + case ComparisonType.GreaterThan: + expectSuccess = valueLength > length; + condition = Condition.SetLengthGreaterThan(key2, length); + break; + case ComparisonType.LessThan: + expectSuccess = valueLength < length; + condition = Condition.SetLengthLessThan(key2, length); + break; + default: + throw new ArgumentOutOfRangeException(nameof(type)); + } + + for (var i = 0; i < valueLength; i++) + { + db.SetAdd(key2, i, flags: CommandFlags.FireAndForget); + } + Assert.False(db.KeyExists(key)); + Assert.Equal(valueLength, db.SetLength(key2)); + + var tran = db.CreateTransaction(); + var cond = tran.AddCondition(condition); + var push = tran.StringSetAsync(key, "any value"); + var exec = tran.ExecuteAsync(); + var get = db.StringLength(key); + + Assert.Equal(expectTranResult, await exec); + + if (expectSuccess) + { + Assert.True(await exec, "eq: exec"); + Assert.True(cond.WasSatisfied, "eq: was satisfied"); + Assert.True(await push); // eq: push + Assert.Equal("any value".Length, get); // eq: get + } + else + { + Assert.False(await exec, "neq: exec"); + Assert.False(cond.WasSatisfied, "neq: was satisfied"); + Assert.Equal(TaskStatus.Canceled, SafeStatus(push)); // neq: push + Assert.Equal(0, get); // neq: get + } + } + + [Theory] + [InlineData(false, false, true)] + [InlineData(false, true, false)] + [InlineData(true, false, false)] + [InlineData(true, true, true)] + public async Task BasicTranWithSetContainsCondition(bool demandKeyExists, bool keyExists, bool expectTranResult) + { + await using var conn = Create(disabledCommands: ["info", "config"]); + + RedisKey key = Me(), key2 = Me() + "2"; + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.KeyDelete(key2, CommandFlags.FireAndForget); + RedisValue member = "value"; + if (keyExists) db.SetAdd(key2, member, flags: CommandFlags.FireAndForget); + Assert.False(db.KeyExists(key)); + Assert.Equal(keyExists, db.SetContains(key2, member)); + + var tran = db.CreateTransaction(); + var cond = tran.AddCondition(demandKeyExists ? Condition.SetContains(key2, member) : Condition.SetNotContains(key2, member)); + var incr = tran.StringIncrementAsync(key); + var exec = tran.ExecuteAsync(); + var get = db.StringGet(key); + + Assert.Equal(expectTranResult, await exec); + if (demandKeyExists == keyExists) + { + Assert.True(await exec, "eq: exec"); + Assert.True(cond.WasSatisfied, "eq: was satisfied"); + Assert.Equal(1, await incr); // eq: incr + Assert.Equal(1, (long)get); // eq: get + } + else + { + Assert.False(await exec, "neq: exec"); + Assert.False(cond.WasSatisfied, "neq: was satisfied"); + Assert.Equal(TaskStatus.Canceled, SafeStatus(incr)); // neq: incr + Assert.Equal(0, (long)get); // neq: get + } + } + + [Theory] + [InlineData("five", ComparisonType.Equal, 5L, false)] + [InlineData("four", ComparisonType.Equal, 4L, true)] + [InlineData("three", ComparisonType.Equal, 3L, false)] + [InlineData("", ComparisonType.Equal, 2L, false)] + [InlineData("", ComparisonType.Equal, 0L, true)] + + [InlineData("five", ComparisonType.LessThan, 5L, true)] + [InlineData("four", ComparisonType.LessThan, 4L, false)] + [InlineData("three", ComparisonType.LessThan, 3L, false)] + [InlineData("", ComparisonType.LessThan, 2L, true)] + [InlineData("", ComparisonType.LessThan, 0L, false)] + + [InlineData("five", ComparisonType.GreaterThan, 5L, false)] + [InlineData("four", ComparisonType.GreaterThan, 4L, false)] + [InlineData("three", ComparisonType.GreaterThan, 3L, true)] + [InlineData("", ComparisonType.GreaterThan, 2L, false)] + [InlineData("", ComparisonType.GreaterThan, 0L, false)] + public async Task BasicTranWithSortedSetCardinalityCondition(string value, ComparisonType type, long length, bool expectTranResult) + { + await using var conn = Create(); + + RedisKey key = Me(), key2 = Me() + "2"; + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.KeyDelete(key2, CommandFlags.FireAndForget); + + bool expectSuccess; + Condition? condition; + var valueLength = value?.Length ?? 0; + switch (type) + { + case ComparisonType.Equal: + expectSuccess = valueLength == length; + condition = Condition.SortedSetLengthEqual(key2, length); + break; + case ComparisonType.GreaterThan: + expectSuccess = valueLength > length; + condition = Condition.SortedSetLengthGreaterThan(key2, length); + break; + case ComparisonType.LessThan: + expectSuccess = valueLength < length; + condition = Condition.SortedSetLengthLessThan(key2, length); + break; + default: + throw new ArgumentOutOfRangeException(nameof(type)); + } + + for (var i = 0; i < valueLength; i++) + { + db.SortedSetAdd(key2, i, i, flags: CommandFlags.FireAndForget); + } + Assert.False(db.KeyExists(key)); + Assert.Equal(valueLength, db.SortedSetLength(key2)); + + var tran = db.CreateTransaction(); + var cond = tran.AddCondition(condition); + var push = tran.StringSetAsync(key, "any value"); + var exec = tran.ExecuteAsync(); + var get = db.StringLength(key); + + Assert.Equal(expectTranResult, await exec); + + if (expectSuccess) + { + Assert.True(await exec, "eq: exec"); + Assert.True(cond.WasSatisfied, "eq: was satisfied"); + Assert.True(await push); // eq: push + Assert.Equal("any value".Length, get); // eq: get + } + else + { + Assert.False(await exec, "neq: exec"); + Assert.False(cond.WasSatisfied, "neq: was satisfied"); + Assert.Equal(TaskStatus.Canceled, SafeStatus(push)); // neq: push + Assert.Equal(0, get); // neq: get + } + } + + [Theory] + [InlineData(1, 4, ComparisonType.Equal, 5L, false)] + [InlineData(1, 4, ComparisonType.Equal, 4L, true)] + [InlineData(1, 2, ComparisonType.Equal, 3L, false)] + [InlineData(1, 1, ComparisonType.Equal, 2L, false)] + [InlineData(0, 0, ComparisonType.Equal, 0L, false)] + + [InlineData(1, 4, ComparisonType.LessThan, 5L, true)] + [InlineData(1, 4, ComparisonType.LessThan, 4L, false)] + [InlineData(1, 3, ComparisonType.LessThan, 3L, false)] + [InlineData(1, 1, ComparisonType.LessThan, 2L, true)] + [InlineData(0, 0, ComparisonType.LessThan, 0L, false)] + + [InlineData(1, 5, ComparisonType.GreaterThan, 5L, false)] + [InlineData(1, 4, ComparisonType.GreaterThan, 4L, false)] + [InlineData(1, 4, ComparisonType.GreaterThan, 3L, true)] + [InlineData(1, 2, ComparisonType.GreaterThan, 2L, false)] + [InlineData(0, 0, ComparisonType.GreaterThan, 0L, true)] + public async Task BasicTranWithSortedSetRangeCountCondition(double min, double max, ComparisonType type, long length, bool expectTranResult) + { + await using var conn = Create(); + + RedisKey key = Me(), key2 = Me() + "2"; + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.KeyDelete(key2, CommandFlags.FireAndForget); + + bool expectSuccess; + Condition? condition; + var valueLength = (int)(max - min) + 1; + switch (type) + { + case ComparisonType.Equal: + expectSuccess = valueLength == length; + condition = Condition.SortedSetLengthEqual(key2, length, min, max); + break; + case ComparisonType.GreaterThan: + expectSuccess = valueLength > length; + condition = Condition.SortedSetLengthGreaterThan(key2, length, min, max); + break; + case ComparisonType.LessThan: + expectSuccess = valueLength < length; + condition = Condition.SortedSetLengthLessThan(key2, length, min, max); + break; + default: + throw new ArgumentOutOfRangeException(nameof(type)); + } + + for (var i = 0; i < 5; i++) + { + db.SortedSetAdd(key2, i, i, flags: CommandFlags.FireAndForget); + } + Assert.False(db.KeyExists(key)); + Assert.Equal(5, db.SortedSetLength(key2)); + + var tran = db.CreateTransaction(); + var cond = tran.AddCondition(condition); + var push = tran.StringSetAsync(key, "any value"); + var exec = tran.ExecuteAsync(); + var get = db.StringLength(key); + + Assert.Equal(expectTranResult, await exec); + + if (expectSuccess) + { + Assert.True(await exec, "eq: exec"); + Assert.True(cond.WasSatisfied, "eq: was satisfied"); + Assert.True(await push); // eq: push + Assert.Equal("any value".Length, get); // eq: get + } + else + { + Assert.False(await exec, "neq: exec"); + Assert.False(cond.WasSatisfied, "neq: was satisfied"); + Assert.Equal(TaskStatus.Canceled, SafeStatus(push)); // neq: push + Assert.Equal(0, get); // neq: get + } + } + + [Theory] + [InlineData(false, false, true)] + [InlineData(false, true, false)] + [InlineData(true, false, false)] + [InlineData(true, true, true)] + public async Task BasicTranWithSortedSetContainsCondition(bool demandKeyExists, bool keyExists, bool expectTranResult) + { + await using var conn = Create(disabledCommands: ["info", "config"]); + + RedisKey key = Me(), key2 = Me() + "2"; + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.KeyDelete(key2, CommandFlags.FireAndForget); + RedisValue member = "value"; + if (keyExists) db.SortedSetAdd(key2, member, 0.0, flags: CommandFlags.FireAndForget); + Assert.False(db.KeyExists(key)); + Assert.Equal(keyExists, db.SortedSetScore(key2, member).HasValue); + + var tran = db.CreateTransaction(); + var cond = tran.AddCondition(demandKeyExists ? Condition.SortedSetContains(key2, member) : Condition.SortedSetNotContains(key2, member)); + var incr = tran.StringIncrementAsync(key); + var exec = tran.ExecuteAsync(); + var get = db.StringGet(key); + + Assert.Equal(expectTranResult, await exec); + if (demandKeyExists == keyExists) + { + Assert.True(await exec, "eq: exec"); + Assert.True(cond.WasSatisfied, "eq: was satisfied"); + Assert.Equal(1, await incr); // eq: incr + Assert.Equal(1, (long)get); // eq: get + } + else + { + Assert.False(await exec, "neq: exec"); + Assert.False(cond.WasSatisfied, "neq: was satisfied"); + Assert.Equal(TaskStatus.Canceled, SafeStatus(incr)); // neq: incr + Assert.Equal(0, (long)get); // neq: get + } + } + + public enum SortedSetValue + { + None, + Exact, + Shorter, + Longer, + } + + [Theory] + [InlineData(false, SortedSetValue.None, true)] + [InlineData(false, SortedSetValue.Shorter, true)] + [InlineData(false, SortedSetValue.Exact, false)] + [InlineData(false, SortedSetValue.Longer, false)] + [InlineData(true, SortedSetValue.None, false)] + [InlineData(true, SortedSetValue.Shorter, false)] + [InlineData(true, SortedSetValue.Exact, true)] + [InlineData(true, SortedSetValue.Longer, true)] + public async Task BasicTranWithSortedSetStartsWithCondition_String(bool requestExists, SortedSetValue existingValue, bool expectTranResult) + { + using var conn = Create(); + + RedisKey key1 = Me() + "_1", key2 = Me() + "_2"; + var db = conn.GetDatabase(); + db.KeyDelete(key1, CommandFlags.FireAndForget); + db.KeyDelete(key2, CommandFlags.FireAndForget); + + db.SortedSetAdd(key2, "unrelated", 0.0, flags: CommandFlags.FireAndForget); + switch (existingValue) + { + case SortedSetValue.Shorter: + db.SortedSetAdd(key2, "see", 0.0, flags: CommandFlags.FireAndForget); + break; + case SortedSetValue.Exact: + db.SortedSetAdd(key2, "seek", 0.0, flags: CommandFlags.FireAndForget); + break; + case SortedSetValue.Longer: + db.SortedSetAdd(key2, "seeks", 0.0, flags: CommandFlags.FireAndForget); + break; + } + + var tran = db.CreateTransaction(); + var cond = tran.AddCondition(requestExists ? Condition.SortedSetContainsStarting(key2, "seek") : Condition.SortedSetNotContainsStarting(key2, "seek")); + var incr = tran.StringIncrementAsync(key1); + var exec = await tran.ExecuteAsync(); + var get = await db.StringGetAsync(key1); + + Assert.Equal(expectTranResult, exec); + Assert.Equal(expectTranResult, cond.WasSatisfied); + + if (expectTranResult) + { + Assert.Equal(1, await incr); // eq: incr + Assert.Equal(1, (long)get); // eq: get + } + else + { + Assert.Equal(TaskStatus.Canceled, SafeStatus(incr)); // neq: incr + Assert.Equal(0, (long)get); // neq: get + } + } + + [Theory] + [InlineData(false, SortedSetValue.None, true)] + [InlineData(false, SortedSetValue.Shorter, true)] + [InlineData(false, SortedSetValue.Exact, false)] + [InlineData(false, SortedSetValue.Longer, false)] + [InlineData(true, SortedSetValue.None, false)] + [InlineData(true, SortedSetValue.Shorter, false)] + [InlineData(true, SortedSetValue.Exact, true)] + [InlineData(true, SortedSetValue.Longer, true)] + public async Task BasicTranWithSortedSetStartsWithCondition_Integer(bool requestExists, SortedSetValue existingValue, bool expectTranResult) + { + using var conn = Create(); + + RedisKey key1 = Me() + "_1", key2 = Me() + "_2"; + var db = conn.GetDatabase(); + db.KeyDelete(key1, CommandFlags.FireAndForget); + db.KeyDelete(key2, CommandFlags.FireAndForget); + + db.SortedSetAdd(key2, 789, 0.0, flags: CommandFlags.FireAndForget); + switch (existingValue) + { + case SortedSetValue.Shorter: + db.SortedSetAdd(key2, 123, 0.0, flags: CommandFlags.FireAndForget); + break; + case SortedSetValue.Exact: + db.SortedSetAdd(key2, 1234, 0.0, flags: CommandFlags.FireAndForget); + break; + case SortedSetValue.Longer: + db.SortedSetAdd(key2, 12345, 0.0, flags: CommandFlags.FireAndForget); + break; + } + + var tran = db.CreateTransaction(); + var cond = tran.AddCondition(requestExists ? Condition.SortedSetContainsStarting(key2, 1234) : Condition.SortedSetNotContainsStarting(key2, 1234)); + var incr = tran.StringIncrementAsync(key1); + var exec = await tran.ExecuteAsync(); + var get = await db.StringGetAsync(key1); + + Assert.Equal(expectTranResult, exec); + Assert.Equal(expectTranResult, cond.WasSatisfied); + + if (expectTranResult) + { + Assert.Equal(1, await incr); // eq: incr + Assert.Equal(1, (long)get); // eq: get + } + else + { + Assert.Equal(TaskStatus.Canceled, SafeStatus(incr)); // neq: incr + Assert.Equal(0, (long)get); // neq: get + } + } + + [Theory] + [InlineData(4D, 4D, true, true)] + [InlineData(4D, 5D, true, false)] + [InlineData(4D, null, true, false)] + [InlineData(null, 5D, true, false)] + [InlineData(null, null, true, true)] + + [InlineData(4D, 4D, false, false)] + [InlineData(4D, 5D, false, true)] + [InlineData(4D, null, false, true)] + [InlineData(null, 5D, false, true)] + [InlineData(null, null, false, false)] + public async Task BasicTranWithSortedSetEqualCondition(double? expected, double? value, bool expectEqual, bool expectedTranResult) + { + await using var conn = Create(); + + RedisKey key = Me(), key2 = Me() + "2"; + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.KeyDelete(key2, CommandFlags.FireAndForget); + + RedisValue member = "member"; + if (value != null) db.SortedSetAdd(key2, member, value.Value, flags: CommandFlags.FireAndForget); + Assert.False(db.KeyExists(key)); + Assert.Equal(value, db.SortedSetScore(key2, member)); + + var tran = db.CreateTransaction(); + var cond = tran.AddCondition(expectEqual ? Condition.SortedSetEqual(key2, member, expected) : Condition.SortedSetNotEqual(key2, member, expected)); + var incr = tran.StringIncrementAsync(key); + var exec = tran.ExecuteAsync(); + var get = db.StringGet(key); + + Assert.Equal(expectedTranResult, await exec); + if (expectEqual == (value == expected)) + { + Assert.True(await exec, "eq: exec"); + Assert.True(cond.WasSatisfied, "eq: was satisfied"); + Assert.Equal(1, await incr); // eq: incr + Assert.Equal(1, (long)get); // eq: get + } + else + { + Assert.False(await exec, "neq: exec"); + Assert.False(cond.WasSatisfied, "neq: was satisfied"); + Assert.Equal(TaskStatus.Canceled, SafeStatus(incr)); // neq: incr + Assert.Equal(0, (long)get); // neq: get + } + } + + [Theory] + [InlineData(true, true, true, true)] + [InlineData(true, false, true, true)] + [InlineData(false, true, true, true)] + [InlineData(true, true, false, false)] + [InlineData(true, false, false, false)] + [InlineData(false, true, false, false)] + [InlineData(false, false, true, false)] + [InlineData(false, false, false, true)] + public async Task BasicTranWithSortedSetScoreExistsCondition(bool member1HasScore, bool member2HasScore, bool demandScoreExists, bool expectedTranResult) + { + await using var conn = Create(); + + RedisKey key = Me(), key2 = Me() + "2"; + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.KeyDelete(key2, CommandFlags.FireAndForget); + + const double Score = 4D; + RedisValue member1 = "member1"; + RedisValue member2 = "member2"; + if (member1HasScore) + { + db.SortedSetAdd(key2, member1, Score, flags: CommandFlags.FireAndForget); + } + + if (member2HasScore) + { + db.SortedSetAdd(key2, member2, Score, flags: CommandFlags.FireAndForget); + } + + Assert.False(db.KeyExists(key)); + Assert.Equal(member1HasScore ? Score : null, db.SortedSetScore(key2, member1)); + Assert.Equal(member2HasScore ? Score : null, db.SortedSetScore(key2, member2)); + + var tran = db.CreateTransaction(); + var cond = tran.AddCondition(demandScoreExists ? Condition.SortedSetScoreExists(key2, Score) : Condition.SortedSetScoreNotExists(key2, Score)); + var incr = tran.StringIncrementAsync(key); + var exec = tran.ExecuteAsync(); + var get = db.StringGet(key); + + Assert.Equal(expectedTranResult, await exec); + if ((member1HasScore || member2HasScore) == demandScoreExists) + { + Assert.True(await exec, "eq: exec"); + Assert.True(cond.WasSatisfied, "eq: was satisfied"); + Assert.Equal(1, await incr); // eq: incr + Assert.Equal(1, (long)get); // eq: get + } + else + { + Assert.False(await exec, "neq: exec"); + Assert.False(cond.WasSatisfied, "neq: was satisfied"); + Assert.Equal(TaskStatus.Canceled, SafeStatus(incr)); // neq: incr + Assert.Equal(0, (long)get); // neq: get + } + } + + [Theory] + [InlineData(true, true, 2L, true, true)] + [InlineData(true, true, 2L, false, false)] + [InlineData(true, true, 1L, true, false)] + [InlineData(true, true, 1L, false, true)] + [InlineData(true, false, 2L, true, false)] + [InlineData(true, false, 2L, false, true)] + [InlineData(true, false, 1L, true, true)] + [InlineData(true, false, 1L, false, false)] + [InlineData(false, true, 2L, true, false)] + [InlineData(false, true, 2L, false, true)] + [InlineData(false, true, 1L, true, true)] + [InlineData(false, true, 1L, false, false)] + [InlineData(false, false, 2L, true, false)] + [InlineData(false, false, 2L, false, true)] + [InlineData(false, false, 1L, true, false)] + [InlineData(false, false, 1L, false, true)] + public async Task BasicTranWithSortedSetScoreCountExistsCondition(bool member1HasScore, bool member2HasScore, long expectedLength, bool expectEqual, bool expectedTranResult) + { + await using var conn = Create(); + + RedisKey key = Me(), key2 = Me() + "2"; + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.KeyDelete(key2, CommandFlags.FireAndForget); + + const double Score = 4D; + var length = 0L; + RedisValue member1 = "member1"; + RedisValue member2 = "member2"; + if (member1HasScore) + { + db.SortedSetAdd(key2, member1, Score, flags: CommandFlags.FireAndForget); + length++; + } + + if (member2HasScore) + { + db.SortedSetAdd(key2, member2, Score, flags: CommandFlags.FireAndForget); + length++; + } + + Assert.False(db.KeyExists(key)); + Assert.Equal(length, db.SortedSetLength(key2, Score, Score)); + + var tran = db.CreateTransaction(); + var cond = tran.AddCondition(expectEqual ? Condition.SortedSetScoreExists(key2, Score, expectedLength) : Condition.SortedSetScoreNotExists(key2, Score, expectedLength)); + var incr = tran.StringIncrementAsync(key); + var exec = tran.ExecuteAsync(); + var get = db.StringGet(key); + + Assert.Equal(expectedTranResult, await exec); + if (expectEqual == (length == expectedLength)) + { + Assert.True(await exec, "eq: exec"); + Assert.True(cond.WasSatisfied, "eq: was satisfied"); + Assert.Equal(1, await incr); // eq: incr + Assert.Equal(1, (long)get); // eq: get + } + else + { + Assert.False(await exec, "neq: exec"); + Assert.False(cond.WasSatisfied, "neq: was satisfied"); + Assert.Equal(TaskStatus.Canceled, SafeStatus(incr)); // neq: incr + Assert.Equal(0, (long)get); // neq: get + } + } + + [Theory] + [InlineData("five", ComparisonType.Equal, 5L, false)] + [InlineData("four", ComparisonType.Equal, 4L, true)] + [InlineData("three", ComparisonType.Equal, 3L, false)] + [InlineData("", ComparisonType.Equal, 2L, false)] + [InlineData("", ComparisonType.Equal, 0L, true)] + + [InlineData("five", ComparisonType.LessThan, 5L, true)] + [InlineData("four", ComparisonType.LessThan, 4L, false)] + [InlineData("three", ComparisonType.LessThan, 3L, false)] + [InlineData("", ComparisonType.LessThan, 2L, true)] + [InlineData("", ComparisonType.LessThan, 0L, false)] + + [InlineData("five", ComparisonType.GreaterThan, 5L, false)] + [InlineData("four", ComparisonType.GreaterThan, 4L, false)] + [InlineData("three", ComparisonType.GreaterThan, 3L, true)] + [InlineData("", ComparisonType.GreaterThan, 2L, false)] + [InlineData("", ComparisonType.GreaterThan, 0L, false)] + public async Task BasicTranWithListLengthCondition(string value, ComparisonType type, long length, bool expectTranResult) + { + await using var conn = Create(); + + RedisKey key = Me(), key2 = Me() + "2"; + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.KeyDelete(key2, CommandFlags.FireAndForget); + + bool expectSuccess; + Condition? condition; + var valueLength = value?.Length ?? 0; + switch (type) + { + case ComparisonType.Equal: + expectSuccess = valueLength == length; + condition = Condition.ListLengthEqual(key2, length); + break; + case ComparisonType.GreaterThan: + expectSuccess = valueLength > length; + condition = Condition.ListLengthGreaterThan(key2, length); + break; + case ComparisonType.LessThan: + expectSuccess = valueLength < length; + condition = Condition.ListLengthLessThan(key2, length); + break; + default: + throw new ArgumentOutOfRangeException(nameof(type)); + } + + for (var i = 0; i < valueLength; i++) + { + db.ListRightPush(key2, i, flags: CommandFlags.FireAndForget); + } + Assert.False(db.KeyExists(key)); + Assert.Equal(valueLength, db.ListLength(key2)); + + var tran = db.CreateTransaction(); + var cond = tran.AddCondition(condition); + var push = tran.StringSetAsync(key, "any value"); + var exec = tran.ExecuteAsync(); + var get = db.StringLength(key); + + Assert.Equal(expectTranResult, await exec); + + if (expectSuccess) + { + Assert.True(await exec, "eq: exec"); + Assert.True(cond.WasSatisfied, "eq: was satisfied"); + Assert.True(await push); // eq: push + Assert.Equal("any value".Length, get); // eq: get + } + else + { + Assert.False(await exec, "neq: exec"); + Assert.False(cond.WasSatisfied, "neq: was satisfied"); + Assert.Equal(TaskStatus.Canceled, SafeStatus(push)); // neq: push + Assert.Equal(0, get); // neq: get + } + } + + [Theory] + [InlineData("five", ComparisonType.Equal, 5L, false)] + [InlineData("four", ComparisonType.Equal, 4L, true)] + [InlineData("three", ComparisonType.Equal, 3L, false)] + [InlineData("", ComparisonType.Equal, 2L, false)] + [InlineData("", ComparisonType.Equal, 0L, true)] + + [InlineData("five", ComparisonType.LessThan, 5L, true)] + [InlineData("four", ComparisonType.LessThan, 4L, false)] + [InlineData("three", ComparisonType.LessThan, 3L, false)] + [InlineData("", ComparisonType.LessThan, 2L, true)] + [InlineData("", ComparisonType.LessThan, 0L, false)] + + [InlineData("five", ComparisonType.GreaterThan, 5L, false)] + [InlineData("four", ComparisonType.GreaterThan, 4L, false)] + [InlineData("three", ComparisonType.GreaterThan, 3L, true)] + [InlineData("", ComparisonType.GreaterThan, 2L, false)] + [InlineData("", ComparisonType.GreaterThan, 0L, false)] + public async Task BasicTranWithStreamLengthCondition(string value, ComparisonType type, long length, bool expectTranResult) + { + await using var conn = Create(require: RedisFeatures.v5_0_0); + + RedisKey key = Me(), key2 = Me() + "2"; + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + db.KeyDelete(key2, CommandFlags.FireAndForget); + + bool expectSuccess; + Condition? condition; + var valueLength = value?.Length ?? 0; + switch (type) + { + case ComparisonType.Equal: + expectSuccess = valueLength == length; + condition = Condition.StreamLengthEqual(key2, length); + break; + case ComparisonType.GreaterThan: + expectSuccess = valueLength > length; + condition = Condition.StreamLengthGreaterThan(key2, length); + break; + case ComparisonType.LessThan: + expectSuccess = valueLength < length; + condition = Condition.StreamLengthLessThan(key2, length); + break; + default: + throw new ArgumentOutOfRangeException(nameof(type)); + } + RedisValue fieldName = "Test"; + for (var i = 0; i < valueLength; i++) + { + db.StreamAdd(key2, fieldName, i, flags: CommandFlags.FireAndForget); + } + Assert.False(db.KeyExists(key)); + Assert.Equal(valueLength, db.StreamLength(key2)); + + var tran = db.CreateTransaction(); + var cond = tran.AddCondition(condition); + var push = tran.StringSetAsync(key, "any value"); + var exec = tran.ExecuteAsync(); + var get = db.StringLength(key); + + Assert.Equal(expectTranResult, await exec); + + if (expectSuccess) + { + Assert.True(await exec, "eq: exec"); + Assert.True(cond.WasSatisfied, "eq: was satisfied"); + Assert.True(await push); // eq: push + Assert.Equal("any value".Length, get); // eq: get + } + else + { + Assert.False(await exec, "neq: exec"); + Assert.False(cond.WasSatisfied, "neq: was satisfied"); + Assert.Equal(TaskStatus.Canceled, SafeStatus(push)); // neq: push + Assert.Equal(0, get); // neq: get + } + } + + [Fact] + public async Task BasicTran() + { + await using var conn = Create(); + + RedisKey key = Me(); + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + Assert.False(db.KeyExists(key)); + + var tran = db.CreateTransaction(); + var a = tran.StringIncrementAsync(key, 10); + var b = tran.StringIncrementAsync(key, 5); + var c = tran.StringGetAsync(key); + var d = tran.KeyExistsAsync(key); + var e = tran.KeyDeleteAsync(key); + var f = tran.KeyExistsAsync(key); + Assert.False(a.IsCompleted); + Assert.False(b.IsCompleted); + Assert.False(c.IsCompleted); + Assert.False(d.IsCompleted); + Assert.False(e.IsCompleted); + Assert.False(f.IsCompleted); + var result = await tran.ExecuteAsync().ForAwait(); + Assert.True(result, "result"); + await Task.WhenAll(a, b, c, d, e, f).ForAwait(); + Assert.True(a.IsCompleted, "a"); + Assert.True(b.IsCompleted, "b"); + Assert.True(c.IsCompleted, "c"); + Assert.True(d.IsCompleted, "d"); + Assert.True(e.IsCompleted, "e"); + Assert.True(f.IsCompleted, "f"); + + var g = db.KeyExists(key); + + Assert.Equal(10, await a.ForAwait()); + Assert.Equal(15, await b.ForAwait()); + Assert.Equal(15, (long)await c.ForAwait()); + Assert.True(await d.ForAwait()); + Assert.True(await e.ForAwait()); + Assert.False(await f.ForAwait()); + Assert.False(g); + } + + [Fact] + public async Task CombineFireAndForgetAndRegularAsyncInTransaction() + { + await using var conn = Create(); + + RedisKey key = Me(); + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + Assert.False(db.KeyExists(key)); + + var tran = db.CreateTransaction("state"); + var a = tran.StringIncrementAsync(key, 5); + var b = tran.StringIncrementAsync(key, 10, CommandFlags.FireAndForget); + var c = tran.StringIncrementAsync(key, 15); + Assert.True(tran.Execute()); + var count = (long)db.StringGet(key); + + Assert.Equal(5, await a); + Assert.Equal("state", a.AsyncState); + Assert.Equal(0, await b); + Assert.Null(b.AsyncState); + Assert.Equal(30, await c); + Assert.Equal("state", a.AsyncState); + Assert.Equal(30, count); + } + + [Fact] + public async Task TransactionWithAdHocCommandsAndSelectDisabled() + { + await using var conn = Create(disabledCommands: ["SELECT"]); + RedisKey key = Me(); + var db = conn.GetDatabase(); + db.KeyDelete(key, CommandFlags.FireAndForget); + Assert.False(db.KeyExists(key)); + + var tran = db.CreateTransaction("state"); + var a = tran.ExecuteAsync("SET", "foo", "bar"); + Assert.True(await tran.ExecuteAsync()); + await a; + var setting = db.StringGet("foo"); + Assert.Equal("bar", setting); + } + +#if VERBOSE + [Fact] + public async Task WatchAbort_StringEqual() + { + await using var vicConn = Create(); + await using var perpConn = Create(); + + var key = Me(); + var db = vicConn.GetDatabase(); + + // expect foo, change to bar at the last minute + vicConn.PreTransactionExec += cmd => + { + Writer.WriteLine($"'{cmd}' detected; changing it..."); + perpConn.GetDatabase().StringSet(key, "bar"); + }; + db.KeyDelete(key); + db.StringSet(key, "foo"); + var tran = db.CreateTransaction(); + tran.AddCondition(Condition.StringEqual(key, "foo")); + var pong = tran.PingAsync(); + Assert.False(await tran.ExecuteAsync(), "expected abort"); + await Assert.ThrowsAsync(() => pong); + } + + [Fact] + public async Task WatchAbort_HashLengthEqual() + { + await using var vicConn = Create(); + await using var perpConn = Create(); + + var key = Me(); + var db = vicConn.GetDatabase(); + + // expect foo, change to bar at the last minute + vicConn.PreTransactionExec += cmd => + { + Writer.WriteLine($"'{cmd}' detected; changing it..."); + perpConn.GetDatabase().HashSet(key, "bar", "def"); + }; + db.KeyDelete(key); + db.HashSet(key, "foo", "abc"); + var tran = db.CreateTransaction(); + tran.AddCondition(Condition.HashLengthEqual(key, 1)); + var pong = tran.PingAsync(); + Assert.False(await tran.ExecuteAsync()); + await Assert.ThrowsAsync(() => pong); + } +#endif + + [Fact] + public async Task ExecCompletes_Issue943() + { + Skip.UnlessLongRunning(); + int hashHit = 0, hashMiss = 0, expireHit = 0, expireMiss = 0; + await using (var conn = Create()) + { + var db = conn.GetDatabase(); + for (int i = 0; i < 40000; i++) + { + RedisKey key = Me(); + await db.KeyDeleteAsync(key); + HashEntry[] hashEntries = + [ + new HashEntry("blah", DateTime.UtcNow.ToString("R")), + ]; + ITransaction transaction = db.CreateTransaction(); + transaction.AddCondition(Condition.KeyNotExists(key)); + Task hashSetTask = transaction.HashSetAsync(key, hashEntries); + Task expireTask = transaction.KeyExpireAsync(key, TimeSpan.FromSeconds(30)); + bool committed = await transaction.ExecuteAsync(); + if (committed) + { + if (hashSetTask.IsCompleted) hashHit++; else hashMiss++; + if (expireTask.IsCompleted) expireHit++; else expireMiss++; + await hashSetTask; + await expireTask; + } + } + } + + Log($"hash hit: {hashHit}, miss: {hashMiss}; expire hit: {expireHit}, miss: {expireMiss}"); + Assert.Equal(0, hashMiss); + Assert.Equal(0, expireMiss); + } +} diff --git a/tests/StackExchange.Redis.Tests/TransactionWrapperTests.cs b/tests/StackExchange.Redis.Tests/TransactionWrapperTests.cs deleted file mode 100644 index c776e2f47..000000000 --- a/tests/StackExchange.Redis.Tests/TransactionWrapperTests.cs +++ /dev/null @@ -1,134 +0,0 @@ -using System.Text; -using Moq; -using StackExchange.Redis.KeyspaceIsolation; -using Xunit; - -namespace StackExchange.Redis.Tests -{ -#pragma warning disable RCS1047 // Non-asynchronous method name should not end with 'Async'. - [Collection(nameof(MoqDependentCollection))] - public sealed class TransactionWrapperTests - { - private readonly Mock mock; - private readonly TransactionWrapper wrapper; - - public TransactionWrapperTests() - { - mock = new Mock(); - wrapper = new TransactionWrapper(mock.Object, Encoding.UTF8.GetBytes("prefix:")); - } - - [Fact] - public void AddCondition_HashEqual() - { - wrapper.AddCondition(Condition.HashEqual("key", "field", "value")); - mock.Verify(_ => _.AddCondition(It.Is(value => "prefix:key Hash > field == value" == value.ToString()))); - } - - [Fact] - public void AddCondition_HashNotEqual() - { - wrapper.AddCondition(Condition.HashNotEqual("key", "field", "value")); - mock.Verify(_ => _.AddCondition(It.Is(value => "prefix:key Hash > field != value" == value.ToString()))); - } - - [Fact] - public void AddCondition_HashExists() - { - wrapper.AddCondition(Condition.HashExists("key", "field")); - mock.Verify(_ => _.AddCondition(It.Is(value => "prefix:key Hash > field exists" == value.ToString()))); - } - - [Fact] - public void AddCondition_HashNotExists() - { - wrapper.AddCondition(Condition.HashNotExists("key", "field")); - mock.Verify(_ => _.AddCondition(It.Is(value => "prefix:key Hash > field does not exists" == value.ToString()))); - } - - [Fact] - public void AddCondition_KeyExists() - { - wrapper.AddCondition(Condition.KeyExists("key")); - mock.Verify(_ => _.AddCondition(It.Is(value => "prefix:key exists" == value.ToString()))); - } - - [Fact] - public void AddCondition_KeyNotExists() - { - wrapper.AddCondition(Condition.KeyNotExists("key")); - mock.Verify(_ => _.AddCondition(It.Is(value => "prefix:key does not exists" == value.ToString()))); - } - - [Fact] - public void AddCondition_StringEqual() - { - wrapper.AddCondition(Condition.StringEqual("key", "value")); - mock.Verify(_ => _.AddCondition(It.Is(value => "prefix:key == value" == value.ToString()))); - } - - [Fact] - public void AddCondition_StringNotEqual() - { - wrapper.AddCondition(Condition.StringNotEqual("key", "value")); - mock.Verify(_ => _.AddCondition(It.Is(value => "prefix:key != value" == value.ToString()))); - } - - [Fact] - public void AddCondition_SortedSetEqual() - { - wrapper.AddCondition(Condition.SortedSetEqual("key", "member", "score")); - mock.Verify(_ => _.AddCondition(It.Is(value => "prefix:key SortedSet > member == score" == value.ToString()))); - } - - [Fact] - public void AddCondition_SortedSetNotEqual() - { - wrapper.AddCondition(Condition.SortedSetNotEqual("key", "member", "score")); - mock.Verify(_ => _.AddCondition(It.Is(value => "prefix:key SortedSet > member != score" == value.ToString()))); - } - - [Fact] - public void AddCondition_SortedSetScoreExists() - { - wrapper.AddCondition(Condition.SortedSetScoreExists("key", "score")); - mock.Verify(_ => _.AddCondition(It.Is(value => "prefix:key not contains 0 members with score: score" == value.ToString()))); - } - - [Fact] - public void AddCondition_SortedSetScoreNotExists() - { - wrapper.AddCondition(Condition.SortedSetScoreNotExists("key", "score")); - mock.Verify(_ => _.AddCondition(It.Is(value => "prefix:key contains 0 members with score: score" == value.ToString()))); - } - - [Fact] - public void AddCondition_SortedSetScoreCountExists() - { - wrapper.AddCondition(Condition.SortedSetScoreExists("key", "score", "count")); - mock.Verify(_ => _.AddCondition(It.Is(value => "prefix:key contains count members with score: score" == value.ToString()))); - } - - [Fact] - public void AddCondition_SortedSetScoreCountNotExists() - { - wrapper.AddCondition(Condition.SortedSetScoreNotExists("key", "score", "count")); - mock.Verify(_ => _.AddCondition(It.Is(value => "prefix:key not contains count members with score: score" == value.ToString()))); - } - - [Fact] - public void ExecuteAsync() - { - wrapper.ExecuteAsync(CommandFlags.None); - mock.Verify(_ => _.ExecuteAsync(CommandFlags.None), Times.Once()); - } - - [Fact] - public void Execute() - { - wrapper.Execute(CommandFlags.None); - mock.Verify(_ => _.Execute(CommandFlags.None), Times.Once()); - } - } -#pragma warning restore RCS1047 // Non-asynchronous method name should not end with 'Async'. -} diff --git a/tests/StackExchange.Redis.Tests/Transactions.cs b/tests/StackExchange.Redis.Tests/Transactions.cs deleted file mode 100644 index 018b384c0..000000000 --- a/tests/StackExchange.Redis.Tests/Transactions.cs +++ /dev/null @@ -1,1240 +0,0 @@ -#pragma warning disable RCS1090 // Call 'ConfigureAwait(false)'. - -using System; -using System.Threading.Tasks; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - [Collection(SharedConnectionFixture.Key)] - public class Transactions : TestBase - { - public Transactions(ITestOutputHelper output, SharedConnectionFixture fixture) : base(output, fixture) { } - - [Fact] - public void BasicEmptyTran() - { - using (var muxer = Create()) - { - RedisKey key = Me(); - var db = muxer.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - Assert.False(db.KeyExists(key)); - - var tran = db.CreateTransaction(); - - var result = tran.Execute(); - Assert.True(result); - } - } - - [Fact] - public void NestedTransactionThrows() - { - using (var muxer = Create()) - { - var db = muxer.GetDatabase(); - var tran = db.CreateTransaction(); - var redisTransaction = Assert.IsType(tran); - Assert.Throws(() => redisTransaction.CreateTransaction(null)); - } - } - - [Theory] - [InlineData(false, false, true)] - [InlineData(false, true, false)] - [InlineData(true, false, false)] - [InlineData(true, true, true)] - public async Task BasicTranWithExistsCondition(bool demandKeyExists, bool keyExists, bool expectTranResult) - { - using (var muxer = Create(disabledCommands: new[] { "info", "config" })) - { - RedisKey key = Me(), key2 = Me() + "2"; - var db = muxer.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.KeyDelete(key2, CommandFlags.FireAndForget); - if (keyExists) db.StringSet(key2, "any value", flags: CommandFlags.FireAndForget); - Assert.False(db.KeyExists(key)); - Assert.Equal(keyExists, db.KeyExists(key2)); - - var tran = db.CreateTransaction(); - var cond = tran.AddCondition(demandKeyExists ? Condition.KeyExists(key2) : Condition.KeyNotExists(key2)); - var incr = tran.StringIncrementAsync(key); - var exec = tran.ExecuteAsync(); - var get = db.StringGet(key); - - Assert.Equal(expectTranResult, await exec); - if (demandKeyExists == keyExists) - { - Assert.True(await exec, "eq: exec"); - Assert.True(cond.WasSatisfied, "eq: was satisfied"); - Assert.Equal(1, await incr); // eq: incr - Assert.Equal(1, (long)get); // eq: get - } - else - { - Assert.False(await exec, "neq: exec"); - Assert.False(cond.WasSatisfied, "neq: was satisfied"); - Assert.Equal(TaskStatus.Canceled, SafeStatus(incr)); // neq: incr - Assert.Equal(0, (long)get); // neq: get - } - } - } - - [Theory] - [InlineData("same", "same", true, true)] - [InlineData("x", "y", true, false)] - [InlineData("x", null, true, false)] - [InlineData(null, "y", true, false)] - [InlineData(null, null, true, true)] - - [InlineData("same", "same", false, false)] - [InlineData("x", "y", false, true)] - [InlineData("x", null, false, true)] - [InlineData(null, "y", false, true)] - [InlineData(null, null, false, false)] - public async Task BasicTranWithEqualsCondition(string expected, string value, bool expectEqual, bool expectTranResult) - { - using (var muxer = Create()) - { - RedisKey key = Me(), key2 = Me() + "2"; - var db = muxer.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.KeyDelete(key2, CommandFlags.FireAndForget); - - if (value != null) db.StringSet(key2, value, flags: CommandFlags.FireAndForget); - Assert.False(db.KeyExists(key)); - Assert.Equal(value, db.StringGet(key2)); - - var tran = db.CreateTransaction(); - var cond = tran.AddCondition(expectEqual ? Condition.StringEqual(key2, expected) : Condition.StringNotEqual(key2, expected)); - var incr = tran.StringIncrementAsync(key); - var exec = tran.ExecuteAsync(); - var get = db.StringGet(key); - - Assert.Equal(expectTranResult, await exec); - if (expectEqual == (value == expected)) - { - Assert.True(await exec, "eq: exec"); - Assert.True(cond.WasSatisfied, "eq: was satisfied"); - Assert.Equal(1, await incr); // eq: incr - Assert.Equal(1, (long)get); // eq: get - } - else - { - Assert.False(await exec, "neq: exec"); - Assert.False(cond.WasSatisfied, "neq: was satisfied"); - Assert.Equal(TaskStatus.Canceled, SafeStatus(incr)); // neq: incr - Assert.Equal(0, (long)get); // neq: get - } - } - } - - [Theory] - [InlineData(false, false, true)] - [InlineData(false, true, false)] - [InlineData(true, false, false)] - [InlineData(true, true, true)] - public async Task BasicTranWithHashExistsCondition(bool demandKeyExists, bool keyExists, bool expectTranResult) - { - using (var muxer = Create(disabledCommands: new[] { "info", "config" })) - { - RedisKey key = Me(), key2 = Me() + "2"; - var db = muxer.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.KeyDelete(key2, CommandFlags.FireAndForget); - RedisValue hashField = "field"; - if (keyExists) db.HashSet(key2, hashField, "any value", flags: CommandFlags.FireAndForget); - Assert.False(db.KeyExists(key)); - Assert.Equal(keyExists, db.HashExists(key2, hashField)); - - var tran = db.CreateTransaction(); - var cond = tran.AddCondition(demandKeyExists ? Condition.HashExists(key2, hashField) : Condition.HashNotExists(key2, hashField)); - var incr = tran.StringIncrementAsync(key); - var exec = tran.ExecuteAsync(); - var get = db.StringGet(key); - - Assert.Equal(expectTranResult, await exec); - if (demandKeyExists == keyExists) - { - Assert.True(await exec, "eq: exec"); - Assert.True(cond.WasSatisfied, "eq: was satisfied"); - Assert.Equal(1, await incr); // eq: incr - Assert.Equal(1, (long)get); // eq: get - } - else - { - Assert.False(await exec, "neq: exec"); - Assert.False(cond.WasSatisfied, "neq: was satisfied"); - Assert.Equal(TaskStatus.Canceled, SafeStatus(incr)); // neq: incr - Assert.Equal(0, (long)get); // neq: get - } - } - } - - [Theory] - [InlineData("same", "same", true, true)] - [InlineData("x", "y", true, false)] - [InlineData("x", null, true, false)] - [InlineData(null, "y", true, false)] - [InlineData(null, null, true, true)] - - [InlineData("same", "same", false, false)] - [InlineData("x", "y", false, true)] - [InlineData("x", null, false, true)] - [InlineData(null, "y", false, true)] - [InlineData(null, null, false, false)] - public async Task BasicTranWithHashEqualsCondition(string expected, string value, bool expectEqual, bool expectedTranResult) - { - using (var muxer = Create()) - { - RedisKey key = Me(), key2 = Me() + "2"; - var db = muxer.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.KeyDelete(key2, CommandFlags.FireAndForget); - - RedisValue hashField = "field"; - if (value != null) db.HashSet(key2, hashField, value, flags: CommandFlags.FireAndForget); - Assert.False(db.KeyExists(key)); - Assert.Equal(value, db.HashGet(key2, hashField)); - - var tran = db.CreateTransaction(); - var cond = tran.AddCondition(expectEqual ? Condition.HashEqual(key2, hashField, expected) : Condition.HashNotEqual(key2, hashField, expected)); - var incr = tran.StringIncrementAsync(key); - var exec = tran.ExecuteAsync(); - var get = db.StringGet(key); - - Assert.Equal(expectedTranResult, await exec); - if (expectEqual == (value == expected)) - { - Assert.True(await exec, "eq: exec"); - Assert.True(cond.WasSatisfied, "eq: was satisfied"); - Assert.Equal(1, await incr); // eq: incr - Assert.Equal(1, (long)get); // eq: get - } - else - { - Assert.False(await exec, "neq: exec"); - Assert.False(cond.WasSatisfied, "neq: was satisfied"); - Assert.Equal(TaskStatus.Canceled, SafeStatus(incr)); // neq: incr - Assert.Equal(0, (long)get); // neq: get - } - } - } - - private static TaskStatus SafeStatus(Task task) - { - if (task.Status == TaskStatus.WaitingForActivation) - { - try - { - if (!task.Wait(1000)) throw new TimeoutException("timeout waiting for task to complete"); - } - catch (AggregateException ex) - when (ex.InnerException is TaskCanceledException - || (ex.InnerExceptions.Count == 1 && ex.InnerException is TaskCanceledException)) - { - return TaskStatus.Canceled; - } - catch (TaskCanceledException) - { - return TaskStatus.Canceled; - } - } - return task.Status; - } - - [Theory] - [InlineData(false, false, true)] - [InlineData(false, true, false)] - [InlineData(true, false, false)] - [InlineData(true, true, true)] - public async Task BasicTranWithListExistsCondition(bool demandKeyExists, bool keyExists, bool expectTranResult) - { - using (var muxer = Create(disabledCommands: new[] { "info", "config" })) - { - RedisKey key = Me(), key2 = Me() + "2"; - var db = muxer.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.KeyDelete(key2, CommandFlags.FireAndForget); - if (keyExists) db.ListRightPush(key2, "any value", flags: CommandFlags.FireAndForget); - Assert.False(db.KeyExists(key)); - Assert.Equal(keyExists, db.KeyExists(key2)); - - var tran = db.CreateTransaction(); - var cond = tran.AddCondition(demandKeyExists ? Condition.ListIndexExists(key2, 0) : Condition.ListIndexNotExists(key2, 0)); - var push = tran.ListRightPushAsync(key, "any value"); - var exec = tran.ExecuteAsync(); - var get = db.ListGetByIndex(key, 0); - - Assert.Equal(expectTranResult, await exec); - if (demandKeyExists == keyExists) - { - Assert.True(await exec, "eq: exec"); - Assert.True(cond.WasSatisfied, "eq: was satisfied"); - Assert.Equal(1, await push); // eq: push - Assert.Equal("any value", get); // eq: get - } - else - { - Assert.False(await exec, "neq: exec"); - Assert.False(cond.WasSatisfied, "neq: was satisfied"); - Assert.Equal(TaskStatus.Canceled, SafeStatus(push)); // neq: push - Assert.Null((string)get); // neq: get - } - } - } - - [Theory] - [InlineData("same", "same", true, true)] - [InlineData("x", "y", true, false)] - [InlineData("x", null, true, false)] - [InlineData(null, "y", true, false)] - [InlineData(null, null, true, true)] - - [InlineData("same", "same", false, false)] - [InlineData("x", "y", false, true)] - [InlineData("x", null, false, true)] - [InlineData(null, "y", false, true)] - [InlineData(null, null, false, false)] - public async Task BasicTranWithListEqualsCondition(string expected, string value, bool expectEqual, bool expectTranResult) - { - using (var muxer = Create()) - { - RedisKey key = Me(), key2 = Me() + "2"; - var db = muxer.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.KeyDelete(key2, CommandFlags.FireAndForget); - - if (value != null) db.ListRightPush(key2, value, flags: CommandFlags.FireAndForget); - Assert.False(db.KeyExists(key)); - Assert.Equal(value, db.ListGetByIndex(key2, 0)); - - var tran = db.CreateTransaction(); - var cond = tran.AddCondition(expectEqual ? Condition.ListIndexEqual(key2, 0, expected) : Condition.ListIndexNotEqual(key2, 0, expected)); - var push = tran.ListRightPushAsync(key, "any value"); - var exec = tran.ExecuteAsync(); - var get = db.ListGetByIndex(key, 0); - - Assert.Equal(expectTranResult, await exec); - if (expectEqual == (value == expected)) - { - Assert.True(await exec, "eq: exec"); - Assert.True(cond.WasSatisfied, "eq: was satisfied"); - Assert.Equal(1, await push); // eq: push - Assert.Equal("any value", get); // eq: get - } - else - { - Assert.False(await exec, "neq: exec"); - Assert.False(cond.WasSatisfied, "neq: was satisfied"); - Assert.Equal(TaskStatus.Canceled, SafeStatus(push)); // neq: push - Assert.Null((string)get); // neq: get - } - } - } - - public enum ComparisonType - { - Equal, - LessThan, - GreaterThan - } - - [Theory] - [InlineData("five", ComparisonType.Equal, 5L, false)] - [InlineData("four", ComparisonType.Equal, 4L, true)] - [InlineData("three", ComparisonType.Equal, 3L, false)] - [InlineData("", ComparisonType.Equal, 2L, false)] - [InlineData("", ComparisonType.Equal, 0L, true)] - [InlineData(null, ComparisonType.Equal, 1L, false)] - [InlineData(null, ComparisonType.Equal, 0L, true)] - - [InlineData("five", ComparisonType.LessThan, 5L, true)] - [InlineData("four", ComparisonType.LessThan, 4L, false)] - [InlineData("three", ComparisonType.LessThan, 3L, false)] - [InlineData("", ComparisonType.LessThan, 2L, true)] - [InlineData("", ComparisonType.LessThan, 0L, false)] - [InlineData(null, ComparisonType.LessThan, 1L, true)] - [InlineData(null, ComparisonType.LessThan, 0L, false)] - - [InlineData("five", ComparisonType.GreaterThan, 5L, false)] - [InlineData("four", ComparisonType.GreaterThan, 4L, false)] - [InlineData("three", ComparisonType.GreaterThan, 3L, true)] - [InlineData("", ComparisonType.GreaterThan, 2L, false)] - [InlineData("", ComparisonType.GreaterThan, 0L, false)] - [InlineData(null, ComparisonType.GreaterThan, 1L, false)] - [InlineData(null, ComparisonType.GreaterThan, 0L, false)] - public async Task BasicTranWithStringLengthCondition(string value, ComparisonType type, long length, bool expectTranResult) - { - using (var muxer = Create()) - { - RedisKey key = Me(), key2 = Me() + "2"; - var db = muxer.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.KeyDelete(key2, CommandFlags.FireAndForget); - - var expectSuccess = false; - Condition condition = null; - var valueLength = value?.Length ?? 0; - switch (type) - { - case ComparisonType.Equal: - expectSuccess = valueLength == length; - condition = Condition.StringLengthEqual(key2, length); - Assert.Contains("String length == " + length, condition.ToString()); - break; - case ComparisonType.GreaterThan: - expectSuccess = valueLength > length; - condition = Condition.StringLengthGreaterThan(key2, length); - Assert.Contains("String length > " + length, condition.ToString()); - break; - case ComparisonType.LessThan: - expectSuccess = valueLength < length; - condition = Condition.StringLengthLessThan(key2, length); - Assert.Contains("String length < " + length, condition.ToString()); - break; - } - - if (value != null) db.StringSet(key2, value, flags: CommandFlags.FireAndForget); - Assert.False(db.KeyExists(key)); - Assert.Equal(value, db.StringGet(key2)); - - var tran = db.CreateTransaction(); - var cond = tran.AddCondition(condition); - var push = tran.StringSetAsync(key, "any value"); - var exec = tran.ExecuteAsync(); - var get = db.StringLength(key); - - Assert.Equal(expectTranResult, await exec); - - if (expectSuccess) - { - Assert.True(await exec, "eq: exec"); - Assert.True(cond.WasSatisfied, "eq: was satisfied"); - Assert.True(await push); // eq: push - Assert.Equal("any value".Length, get); // eq: get - } - else - { - Assert.False(await exec, "neq: exec"); - Assert.False(cond.WasSatisfied, "neq: was satisfied"); - Assert.Equal(TaskStatus.Canceled, SafeStatus(push)); // neq: push - Assert.Equal(0, get); // neq: get - } - } - } - - [Theory] - [InlineData("five", ComparisonType.Equal, 5L, false)] - [InlineData("four", ComparisonType.Equal, 4L, true)] - [InlineData("three", ComparisonType.Equal, 3L, false)] - [InlineData("", ComparisonType.Equal, 2L, false)] - [InlineData("", ComparisonType.Equal, 0L, true)] - - [InlineData("five", ComparisonType.LessThan, 5L, true)] - [InlineData("four", ComparisonType.LessThan, 4L, false)] - [InlineData("three", ComparisonType.LessThan, 3L, false)] - [InlineData("", ComparisonType.LessThan, 2L, true)] - [InlineData("", ComparisonType.LessThan, 0L, false)] - - [InlineData("five", ComparisonType.GreaterThan, 5L, false)] - [InlineData("four", ComparisonType.GreaterThan, 4L, false)] - [InlineData("three", ComparisonType.GreaterThan, 3L, true)] - [InlineData("", ComparisonType.GreaterThan, 2L, false)] - [InlineData("", ComparisonType.GreaterThan, 0L, false)] - public async Task BasicTranWithHashLengthCondition(string value, ComparisonType type, long length, bool expectTranResult) - { - using (var muxer = Create()) - { - RedisKey key = Me(), key2 = Me() + "2"; - var db = muxer.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.KeyDelete(key2, CommandFlags.FireAndForget); - - var expectSuccess = false; - Condition condition = null; - var valueLength = value?.Length ?? 0; - switch (type) - { - case ComparisonType.Equal: - expectSuccess = valueLength == length; - condition = Condition.HashLengthEqual(key2, length); - break; - case ComparisonType.GreaterThan: - expectSuccess = valueLength > length; - condition = Condition.HashLengthGreaterThan(key2, length); - break; - case ComparisonType.LessThan: - expectSuccess = valueLength < length; - condition = Condition.HashLengthLessThan(key2, length); - break; - } - - for (var i = 0; i < valueLength; i++) - { - db.HashSet(key2, i, value[i].ToString(), flags: CommandFlags.FireAndForget); - } - Assert.False(db.KeyExists(key)); - Assert.Equal(valueLength, db.HashLength(key2)); - - var tran = db.CreateTransaction(); - var cond = tran.AddCondition(condition); - var push = tran.StringSetAsync(key, "any value"); - var exec = tran.ExecuteAsync(); - var get = db.StringLength(key); - - Assert.Equal(expectTranResult, await exec); - - if (expectSuccess) - { - Assert.True(await exec, "eq: exec"); - Assert.True(cond.WasSatisfied, "eq: was satisfied"); - Assert.True(await push); // eq: push - Assert.Equal("any value".Length, get); // eq: get - } - else - { - Assert.False(await exec, "neq: exec"); - Assert.False(cond.WasSatisfied, "neq: was satisfied"); - Assert.Equal(TaskStatus.Canceled, SafeStatus(push)); // neq: push - Assert.Equal(0, get); // neq: get - } - } - } - - [Theory] - [InlineData("five", ComparisonType.Equal, 5L, false)] - [InlineData("four", ComparisonType.Equal, 4L, true)] - [InlineData("three", ComparisonType.Equal, 3L, false)] - [InlineData("", ComparisonType.Equal, 2L, false)] - [InlineData("", ComparisonType.Equal, 0L, true)] - - [InlineData("five", ComparisonType.LessThan, 5L, true)] - [InlineData("four", ComparisonType.LessThan, 4L, false)] - [InlineData("three", ComparisonType.LessThan, 3L, false)] - [InlineData("", ComparisonType.LessThan, 2L, true)] - [InlineData("", ComparisonType.LessThan, 0L, false)] - - [InlineData("five", ComparisonType.GreaterThan, 5L, false)] - [InlineData("four", ComparisonType.GreaterThan, 4L, false)] - [InlineData("three", ComparisonType.GreaterThan, 3L, true)] - [InlineData("", ComparisonType.GreaterThan, 2L, false)] - [InlineData("", ComparisonType.GreaterThan, 0L, false)] - public async Task BasicTranWithSetCardinalityCondition(string value, ComparisonType type, long length, bool expectTranResult) - { - using (var muxer = Create()) - { - RedisKey key = Me(), key2 = Me() + "2"; - var db = muxer.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.KeyDelete(key2, CommandFlags.FireAndForget); - - var expectSuccess = false; - Condition condition = null; - var valueLength = value?.Length ?? 0; - switch (type) - { - case ComparisonType.Equal: - expectSuccess = valueLength == length; - condition = Condition.SetLengthEqual(key2, length); - break; - case ComparisonType.GreaterThan: - expectSuccess = valueLength > length; - condition = Condition.SetLengthGreaterThan(key2, length); - break; - case ComparisonType.LessThan: - expectSuccess = valueLength < length; - condition = Condition.SetLengthLessThan(key2, length); - break; - } - - for (var i = 0; i < valueLength; i++) - { - db.SetAdd(key2, i, flags: CommandFlags.FireAndForget); - } - Assert.False(db.KeyExists(key)); - Assert.Equal(valueLength, db.SetLength(key2)); - - var tran = db.CreateTransaction(); - var cond = tran.AddCondition(condition); - var push = tran.StringSetAsync(key, "any value"); - var exec = tran.ExecuteAsync(); - var get = db.StringLength(key); - - Assert.Equal(expectTranResult, await exec); - - if (expectSuccess) - { - Assert.True(await exec, "eq: exec"); - Assert.True(cond.WasSatisfied, "eq: was satisfied"); - Assert.True(await push); // eq: push - Assert.Equal("any value".Length, get); // eq: get - } - else - { - Assert.False(await exec, "neq: exec"); - Assert.False(cond.WasSatisfied, "neq: was satisfied"); - Assert.Equal(TaskStatus.Canceled, SafeStatus(push)); // neq: push - Assert.Equal(0, get); // neq: get - } - } - } - - [Theory] - [InlineData(false, false, true)] - [InlineData(false, true, false)] - [InlineData(true, false, false)] - [InlineData(true, true, true)] - public async Task BasicTranWithSetContainsCondition(bool demandKeyExists, bool keyExists, bool expectTranResult) - { - using (var muxer = Create(disabledCommands: new[] { "info", "config" })) - { - RedisKey key = Me(), key2 = Me() + "2"; - var db = muxer.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.KeyDelete(key2, CommandFlags.FireAndForget); - RedisValue member = "value"; - if (keyExists) db.SetAdd(key2, member, flags: CommandFlags.FireAndForget); - Assert.False(db.KeyExists(key)); - Assert.Equal(keyExists, db.SetContains(key2, member)); - - var tran = db.CreateTransaction(); - var cond = tran.AddCondition(demandKeyExists ? Condition.SetContains(key2, member) : Condition.SetNotContains(key2, member)); - var incr = tran.StringIncrementAsync(key); - var exec = tran.ExecuteAsync(); - var get = db.StringGet(key); - - Assert.Equal(expectTranResult, await exec); - if (demandKeyExists == keyExists) - { - Assert.True(await exec, "eq: exec"); - Assert.True(cond.WasSatisfied, "eq: was satisfied"); - Assert.Equal(1, await incr); // eq: incr - Assert.Equal(1, (long)get); // eq: get - } - else - { - Assert.False(await exec, "neq: exec"); - Assert.False(cond.WasSatisfied, "neq: was satisfied"); - Assert.Equal(TaskStatus.Canceled, SafeStatus(incr)); // neq: incr - Assert.Equal(0, (long)get); // neq: get - } - } - } - - [Theory] - [InlineData("five", ComparisonType.Equal, 5L, false)] - [InlineData("four", ComparisonType.Equal, 4L, true)] - [InlineData("three", ComparisonType.Equal, 3L, false)] - [InlineData("", ComparisonType.Equal, 2L, false)] - [InlineData("", ComparisonType.Equal, 0L, true)] - - [InlineData("five", ComparisonType.LessThan, 5L, true)] - [InlineData("four", ComparisonType.LessThan, 4L, false)] - [InlineData("three", ComparisonType.LessThan, 3L, false)] - [InlineData("", ComparisonType.LessThan, 2L, true)] - [InlineData("", ComparisonType.LessThan, 0L, false)] - - [InlineData("five", ComparisonType.GreaterThan, 5L, false)] - [InlineData("four", ComparisonType.GreaterThan, 4L, false)] - [InlineData("three", ComparisonType.GreaterThan, 3L, true)] - [InlineData("", ComparisonType.GreaterThan, 2L, false)] - [InlineData("", ComparisonType.GreaterThan, 0L, false)] - public async Task BasicTranWithSortedSetCardinalityCondition(string value, ComparisonType type, long length, bool expectTranResult) - { - using (var muxer = Create()) - { - RedisKey key = Me(), key2 = Me() + "2"; - var db = muxer.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.KeyDelete(key2, CommandFlags.FireAndForget); - - var expectSuccess = false; - Condition condition = null; - var valueLength = value?.Length ?? 0; - switch (type) - { - case ComparisonType.Equal: - expectSuccess = valueLength == length; - condition = Condition.SortedSetLengthEqual(key2, length); - break; - case ComparisonType.GreaterThan: - expectSuccess = valueLength > length; - condition = Condition.SortedSetLengthGreaterThan(key2, length); - break; - case ComparisonType.LessThan: - expectSuccess = valueLength < length; - condition = Condition.SortedSetLengthLessThan(key2, length); - break; - } - - for (var i = 0; i < valueLength; i++) - { - db.SortedSetAdd(key2, i, i, flags: CommandFlags.FireAndForget); - } - Assert.False(db.KeyExists(key)); - Assert.Equal(valueLength, db.SortedSetLength(key2)); - - var tran = db.CreateTransaction(); - var cond = tran.AddCondition(condition); - var push = tran.StringSetAsync(key, "any value"); - var exec = tran.ExecuteAsync(); - var get = db.StringLength(key); - - Assert.Equal(expectTranResult, await exec); - - if (expectSuccess) - { - Assert.True(await exec, "eq: exec"); - Assert.True(cond.WasSatisfied, "eq: was satisfied"); - Assert.True(await push); // eq: push - Assert.Equal("any value".Length, get); // eq: get - } - else - { - Assert.False(await exec, "neq: exec"); - Assert.False(cond.WasSatisfied, "neq: was satisfied"); - Assert.Equal(TaskStatus.Canceled, SafeStatus(push)); // neq: push - Assert.Equal(0, get); // neq: get - } - } - } - - [Theory] - [InlineData(1, 4, ComparisonType.Equal, 5L, false)] - [InlineData(1, 4, ComparisonType.Equal, 4L, true)] - [InlineData(1, 2, ComparisonType.Equal, 3L, false)] - [InlineData(1, 1, ComparisonType.Equal, 2L, false)] - [InlineData(0, 0, ComparisonType.Equal, 0L, false)] - - [InlineData(1, 4, ComparisonType.LessThan, 5L, true)] - [InlineData(1, 4, ComparisonType.LessThan, 4L, false)] - [InlineData(1, 3, ComparisonType.LessThan, 3L, false)] - [InlineData(1, 1, ComparisonType.LessThan, 2L, true)] - [InlineData(0, 0, ComparisonType.LessThan, 0L, false)] - - [InlineData(1, 5, ComparisonType.GreaterThan, 5L, false)] - [InlineData(1, 4, ComparisonType.GreaterThan, 4L, false)] - [InlineData(1, 4, ComparisonType.GreaterThan, 3L, true)] - [InlineData(1, 2, ComparisonType.GreaterThan, 2L, false)] - [InlineData(0, 0, ComparisonType.GreaterThan, 0L, true)] - public async Task BasicTranWithSortedSetRangeCountCondition(double min, double max, ComparisonType type, long length, bool expectTranResult) - { - using (var muxer = Create()) - { - RedisKey key = Me(), key2 = Me() + "2"; - var db = muxer.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.KeyDelete(key2, CommandFlags.FireAndForget); - - var expectSuccess = false; - Condition condition = null; - var valueLength = (int)(max - min) + 1; - switch (type) - { - case ComparisonType.Equal: - expectSuccess = valueLength == length; - condition = Condition.SortedSetLengthEqual(key2, length, min, max); - break; - case ComparisonType.GreaterThan: - expectSuccess = valueLength > length; - condition = Condition.SortedSetLengthGreaterThan(key2, length, min, max); - break; - case ComparisonType.LessThan: - expectSuccess = valueLength < length; - condition = Condition.SortedSetLengthLessThan(key2, length, min, max); - break; - } - - for (var i = 0; i < 5; i++) - { - db.SortedSetAdd(key2, i, i, flags: CommandFlags.FireAndForget); - } - Assert.False(db.KeyExists(key)); - Assert.Equal(5, db.SortedSetLength(key2)); - - var tran = db.CreateTransaction(); - var cond = tran.AddCondition(condition); - var push = tran.StringSetAsync(key, "any value"); - var exec = tran.ExecuteAsync(); - var get = db.StringLength(key); - - Assert.Equal(expectTranResult, await exec); - - if (expectSuccess) - { - Assert.True(await exec, "eq: exec"); - Assert.True(cond.WasSatisfied, "eq: was satisfied"); - Assert.True(await push); // eq: push - Assert.Equal("any value".Length, get); // eq: get - } - else - { - Assert.False(await exec, "neq: exec"); - Assert.False(cond.WasSatisfied, "neq: was satisfied"); - Assert.Equal(TaskStatus.Canceled, SafeStatus(push)); // neq: push - Assert.Equal(0, get); // neq: get - } - } - } - - [Theory] - [InlineData(false, false, true)] - [InlineData(false, true, false)] - [InlineData(true, false, false)] - [InlineData(true, true, true)] - public async Task BasicTranWithSortedSetContainsCondition(bool demandKeyExists, bool keyExists, bool expectTranResult) - { - using (var muxer = Create(disabledCommands: new[] { "info", "config" })) - { - RedisKey key = Me(), key2 = Me() + "2"; - var db = muxer.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.KeyDelete(key2, CommandFlags.FireAndForget); - RedisValue member = "value"; - if (keyExists) db.SortedSetAdd(key2, member, 0.0, flags: CommandFlags.FireAndForget); - Assert.False(db.KeyExists(key)); - Assert.Equal(keyExists, db.SortedSetScore(key2, member).HasValue); - - var tran = db.CreateTransaction(); - var cond = tran.AddCondition(demandKeyExists ? Condition.SortedSetContains(key2, member) : Condition.SortedSetNotContains(key2, member)); - var incr = tran.StringIncrementAsync(key); - var exec = tran.ExecuteAsync(); - var get = db.StringGet(key); - - Assert.Equal(expectTranResult, await exec); - if (demandKeyExists == keyExists) - { - Assert.True(await exec, "eq: exec"); - Assert.True(cond.WasSatisfied, "eq: was satisfied"); - Assert.Equal(1, await incr); // eq: incr - Assert.Equal(1, (long)get); // eq: get - } - else - { - Assert.False(await exec, "neq: exec"); - Assert.False(cond.WasSatisfied, "neq: was satisfied"); - Assert.Equal(TaskStatus.Canceled, SafeStatus(incr)); // neq: incr - Assert.Equal(0, (long)get); // neq: get - } - } - } - - [Theory] - [InlineData(4D, 4D, true, true)] - [InlineData(4D, 5D, true, false)] - [InlineData(4D, null, true, false)] - [InlineData(null, 5D, true, false)] - [InlineData(null, null, true, true)] - - [InlineData(4D, 4D, false, false)] - [InlineData(4D, 5D, false, true)] - [InlineData(4D, null, false, true)] - [InlineData(null, 5D, false, true)] - [InlineData(null, null, false, false)] - public async Task BasicTranWithSortedSetEqualCondition(double? expected, double? value, bool expectEqual, bool expectedTranResult) - { - using (var muxer = Create()) - { - RedisKey key = Me(), key2 = Me() + "2"; - var db = muxer.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.KeyDelete(key2, CommandFlags.FireAndForget); - - RedisValue member = "member"; - if (value != null) db.SortedSetAdd(key2, member, value.Value, flags: CommandFlags.FireAndForget); - Assert.False(db.KeyExists(key)); - Assert.Equal(value, db.SortedSetScore(key2, member)); - - var tran = db.CreateTransaction(); - var cond = tran.AddCondition(expectEqual ? Condition.SortedSetEqual(key2, member, expected) : Condition.SortedSetNotEqual(key2, member, expected)); - var incr = tran.StringIncrementAsync(key); - var exec = tran.ExecuteAsync(); - var get = db.StringGet(key); - - Assert.Equal(expectedTranResult, await exec); - if (expectEqual == (value == expected)) - { - Assert.True(await exec, "eq: exec"); - Assert.True(cond.WasSatisfied, "eq: was satisfied"); - Assert.Equal(1, await incr); // eq: incr - Assert.Equal(1, (long)get); // eq: get - } - else - { - Assert.False(await exec, "neq: exec"); - Assert.False(cond.WasSatisfied, "neq: was satisfied"); - Assert.Equal(TaskStatus.Canceled, SafeStatus(incr)); // neq: incr - Assert.Equal(0, (long)get); // neq: get - } - } - } - - [Theory] - [InlineData(true, true, true, true)] - [InlineData(true, false, true, true)] - [InlineData(false, true, true, true)] - [InlineData(true, true, false, false)] - [InlineData(true, false, false, false)] - [InlineData(false, true, false, false)] - [InlineData(false, false, true, false)] - [InlineData(false, false, false, true)] - public async Task BasicTranWithSortedSetScoreExistsCondition(bool member1HasScore, bool member2HasScore, bool demandScoreExists, bool expectedTranResult) - { - using (var muxer = Create()) - { - RedisKey key = Me(), key2 = Me() + "2"; - var db = muxer.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.KeyDelete(key2, CommandFlags.FireAndForget); - - const double Score = 4D; - RedisValue member1 = "member1"; - RedisValue member2 = "member2"; - if (member1HasScore) - { - db.SortedSetAdd(key2, member1, Score, flags: CommandFlags.FireAndForget); - } - - if (member2HasScore) - { - db.SortedSetAdd(key2, member2, Score, flags: CommandFlags.FireAndForget); - } - - Assert.False(db.KeyExists(key)); - Assert.Equal(member1HasScore ? (double?)Score : null, db.SortedSetScore(key2, member1)); - Assert.Equal(member2HasScore ? (double?)Score : null, db.SortedSetScore(key2, member2)); - - var tran = db.CreateTransaction(); - var cond = tran.AddCondition(demandScoreExists ? Condition.SortedSetScoreExists(key2, Score) : Condition.SortedSetScoreNotExists(key2, Score)); - var incr = tran.StringIncrementAsync(key); - var exec = tran.ExecuteAsync(); - var get = db.StringGet(key); - - Assert.Equal(expectedTranResult, await exec); - if ((member1HasScore || member2HasScore) == demandScoreExists) - { - Assert.True(await exec, "eq: exec"); - Assert.True(cond.WasSatisfied, "eq: was satisfied"); - Assert.Equal(1, await incr); // eq: incr - Assert.Equal(1, (long)get); // eq: get - } - else - { - Assert.False(await exec, "neq: exec"); - Assert.False(cond.WasSatisfied, "neq: was satisfied"); - Assert.Equal(TaskStatus.Canceled, SafeStatus(incr)); // neq: incr - Assert.Equal(0, (long)get); // neq: get - } - } - } - - [Theory] - [InlineData(true, true, 2L, true, true)] - [InlineData(true, true, 2L, false, false)] - [InlineData(true, true, 1L, true, false)] - [InlineData(true, true, 1L, false, true)] - [InlineData(true, false, 2L, true, false)] - [InlineData(true, false, 2L, false, true)] - [InlineData(true, false, 1L, true, true)] - [InlineData(true, false, 1L, false, false)] - [InlineData(false, true, 2L, true, false)] - [InlineData(false, true, 2L, false, true)] - [InlineData(false, true, 1L, true, true)] - [InlineData(false, true, 1L, false, false)] - [InlineData(false, false, 2L, true, false)] - [InlineData(false, false, 2L, false, true)] - [InlineData(false, false, 1L, true, false)] - [InlineData(false, false, 1L, false, true)] - public async Task BasicTranWithSortedSetScoreCountExistsCondition(bool member1HasScore, bool member2HasScore, long expectedLength, bool expectEqual, bool expectedTranResult) - { - using (var muxer = Create()) - { - RedisKey key = Me(), key2 = Me() + "2"; - var db = muxer.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.KeyDelete(key2, CommandFlags.FireAndForget); - - const double Score = 4D; - var length = 0L; - RedisValue member1 = "member1"; - RedisValue member2 = "member2"; - if (member1HasScore) - { - db.SortedSetAdd(key2, member1, Score, flags: CommandFlags.FireAndForget); - length++; - } - - if (member2HasScore) - { - db.SortedSetAdd(key2, member2, Score, flags: CommandFlags.FireAndForget); - length++; - } - - Assert.False(db.KeyExists(key)); - Assert.Equal(length, db.SortedSetLength(key2, Score, Score)); - - var tran = db.CreateTransaction(); - var cond = tran.AddCondition(expectEqual ? Condition.SortedSetScoreExists(key2, Score, expectedLength) : Condition.SortedSetScoreNotExists(key2, Score, expectedLength)); - var incr = tran.StringIncrementAsync(key); - var exec = tran.ExecuteAsync(); - var get = db.StringGet(key); - - Assert.Equal(expectedTranResult, await exec); - if (expectEqual == (length == expectedLength)) - { - Assert.True(await exec, "eq: exec"); - Assert.True(cond.WasSatisfied, "eq: was satisfied"); - Assert.Equal(1, await incr); // eq: incr - Assert.Equal(1, (long)get); // eq: get - } - else - { - Assert.False(await exec, "neq: exec"); - Assert.False(cond.WasSatisfied, "neq: was satisfied"); - Assert.Equal(TaskStatus.Canceled, SafeStatus(incr)); // neq: incr - Assert.Equal(0, (long)get); // neq: get - } - } - } - - [Theory] - [InlineData("five", ComparisonType.Equal, 5L, false)] - [InlineData("four", ComparisonType.Equal, 4L, true)] - [InlineData("three", ComparisonType.Equal, 3L, false)] - [InlineData("", ComparisonType.Equal, 2L, false)] - [InlineData("", ComparisonType.Equal, 0L, true)] - - [InlineData("five", ComparisonType.LessThan, 5L, true)] - [InlineData("four", ComparisonType.LessThan, 4L, false)] - [InlineData("three", ComparisonType.LessThan, 3L, false)] - [InlineData("", ComparisonType.LessThan, 2L, true)] - [InlineData("", ComparisonType.LessThan, 0L, false)] - - [InlineData("five", ComparisonType.GreaterThan, 5L, false)] - [InlineData("four", ComparisonType.GreaterThan, 4L, false)] - [InlineData("three", ComparisonType.GreaterThan, 3L, true)] - [InlineData("", ComparisonType.GreaterThan, 2L, false)] - [InlineData("", ComparisonType.GreaterThan, 0L, false)] - public async Task BasicTranWithListLengthCondition(string value, ComparisonType type, long length, bool expectTranResult) - { - using (var muxer = Create()) - { - RedisKey key = Me(), key2 = Me() + "2"; - var db = muxer.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - db.KeyDelete(key2, CommandFlags.FireAndForget); - - var expectSuccess = false; - Condition condition = null; - var valueLength = value?.Length ?? 0; - switch (type) - { - case ComparisonType.Equal: - expectSuccess = valueLength == length; - condition = Condition.ListLengthEqual(key2, length); - break; - case ComparisonType.GreaterThan: - expectSuccess = valueLength > length; - condition = Condition.ListLengthGreaterThan(key2, length); - break; - case ComparisonType.LessThan: - expectSuccess = valueLength < length; - condition = Condition.ListLengthLessThan(key2, length); - break; - } - - for (var i = 0; i < valueLength; i++) - { - db.ListRightPush(key2, i, flags: CommandFlags.FireAndForget); - } - Assert.False(db.KeyExists(key)); - Assert.Equal(valueLength, db.ListLength(key2)); - - var tran = db.CreateTransaction(); - var cond = tran.AddCondition(condition); - var push = tran.StringSetAsync(key, "any value"); - var exec = tran.ExecuteAsync(); - var get = db.StringLength(key); - - Assert.Equal(expectTranResult, await exec); - - if (expectSuccess) - { - Assert.True(await exec, "eq: exec"); - Assert.True(cond.WasSatisfied, "eq: was satisfied"); - Assert.True(await push); // eq: push - Assert.Equal("any value".Length, get); // eq: get - } - else - { - Assert.False(await exec, "neq: exec"); - Assert.False(cond.WasSatisfied, "neq: was satisfied"); - Assert.Equal(TaskStatus.Canceled, SafeStatus(push)); // neq: push - Assert.Equal(0, get); // neq: get - } - } - } - - [Fact] - public async Task BasicTran() - { - using (var muxer = Create()) - { - RedisKey key = Me(); - var db = muxer.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - Assert.False(db.KeyExists(key)); - - var tran = db.CreateTransaction(); - var a = tran.StringIncrementAsync(key, 10); - var b = tran.StringIncrementAsync(key, 5); - var c = tran.StringGetAsync(key); - var d = tran.KeyExistsAsync(key); - var e = tran.KeyDeleteAsync(key); - var f = tran.KeyExistsAsync(key); - Assert.False(a.IsCompleted); - Assert.False(b.IsCompleted); - Assert.False(c.IsCompleted); - Assert.False(d.IsCompleted); - Assert.False(e.IsCompleted); - Assert.False(f.IsCompleted); - var result = await tran.ExecuteAsync().ForAwait(); - Assert.True(result, "result"); - await Task.WhenAll(a, b, c, d, e, f).ForAwait(); - Assert.True(a.IsCompleted, "a"); - Assert.True(b.IsCompleted, "b"); - Assert.True(c.IsCompleted, "c"); - Assert.True(d.IsCompleted, "d"); - Assert.True(e.IsCompleted, "e"); - Assert.True(f.IsCompleted, "f"); - - var g = db.KeyExists(key); - - Assert.Equal(10, await a.ForAwait()); - Assert.Equal(15, await b.ForAwait()); - Assert.Equal(15, (long)await c.ForAwait()); - Assert.True(await d.ForAwait()); - Assert.True(await e.ForAwait()); - Assert.False(await f.ForAwait()); - Assert.False(g); - } - } - - [Fact] - public async Task CombineFireAndForgetAndRegularAsyncInTransaction() - { - using (var muxer = Create()) - { - RedisKey key = Me(); - var db = muxer.GetDatabase(); - db.KeyDelete(key, CommandFlags.FireAndForget); - Assert.False(db.KeyExists(key)); - - var tran = db.CreateTransaction("state"); - var a = tran.StringIncrementAsync(key, 5); - var b = tran.StringIncrementAsync(key, 10, CommandFlags.FireAndForget); - var c = tran.StringIncrementAsync(key, 15); - Assert.True(tran.Execute()); - var count = (long)db.StringGet(key); - - Assert.Equal(5, await a); - Assert.Equal("state", a.AsyncState); - Assert.Equal(0, await b); - Assert.Null(b.AsyncState); - Assert.Equal(30, await c); - Assert.Equal("state", a.AsyncState); - Assert.Equal(30, count); - } - } - -#if VERBOSE - [Fact] - public async Task WatchAbort_StringEqual() - { - using (var vic = Create()) - using (var perp = Create()) - { - var key = Me(); - var db = vic.GetDatabase(); - - // expect foo, change to bar at the last minute - vic.PreTransactionExec += cmd => - { - Writer.WriteLine($"'{cmd}' detected; changing it..."); - perp.GetDatabase().StringSet(key, "bar"); - }; - db.KeyDelete(key); - db.StringSet(key, "foo"); - var tran = db.CreateTransaction(); - tran.AddCondition(Condition.StringEqual(key, "foo")); - var pong = tran.PingAsync(); - Assert.False(await tran.ExecuteAsync(), "expected abort"); - await Assert.ThrowsAsync(() => pong); - } - } - - [Fact] - public async Task WatchAbort_HashLengthEqual() - { - using (var vic = Create()) - using (var perp = Create()) - { - var key = Me(); - var db = vic.GetDatabase(); - - // expect foo, change to bar at the last minute - vic.PreTransactionExec += cmd => - { - Writer.WriteLine($"'{cmd}' detected; changing it..."); - perp.GetDatabase().HashSet(key, "bar", "def"); - }; - db.KeyDelete(key); - db.HashSet(key, "foo", "abc"); - var tran = db.CreateTransaction(); - tran.AddCondition(Condition.HashLengthEqual(key, 1)); - var pong = tran.PingAsync(); - Assert.False(await tran.ExecuteAsync()); - await Assert.ThrowsAsync(() => pong); - } - } -#endif - - [FactLongRunning] - public async Task ExecCompletes_Issue943() - { - int hashHit = 0, hashMiss = 0, expireHit = 0, expireMiss = 0; - using (var conn = Create()) - { - var db = conn.GetDatabase(); - for (int i = 0; i < 40000; i++) - { - RedisKey key = Me(); - await db.KeyDeleteAsync(key); - HashEntry[] hashEntries = new [] - { - new HashEntry("blah", DateTime.UtcNow.ToString("R")) - }; - ITransaction transaction = db.CreateTransaction(); - transaction.AddCondition(Condition.KeyNotExists(key)); - Task hashSetTask = transaction.HashSetAsync(key, hashEntries); - Task expireTask = transaction.KeyExpireAsync(key, TimeSpan.FromSeconds(30)); - bool committed = await transaction.ExecuteAsync(); - if (committed) - { - if (hashSetTask.IsCompleted) hashHit++; else hashMiss++; - if (expireTask.IsCompleted) expireHit++; else expireMiss++; - await hashSetTask; - await expireTask; - } - } - } - - Writer.WriteLine($"hash hit: {hashHit}, miss: {hashMiss}; expire hit: {expireHit}, miss: {expireMiss}"); - Assert.Equal(0, hashMiss); - Assert.Equal(0, expireMiss); - } - } -} diff --git a/tests/StackExchange.Redis.Tests/ValueTests.cs b/tests/StackExchange.Redis.Tests/ValueTests.cs new file mode 100644 index 000000000..69a8a2cbc --- /dev/null +++ b/tests/StackExchange.Redis.Tests/ValueTests.cs @@ -0,0 +1,48 @@ +using System; +using System.IO; +using System.Text; +using Xunit; + +namespace StackExchange.Redis.Tests; + +public class ValueTests(ITestOutputHelper output) : TestBase(output) +{ + [Fact] + public void NullValueChecks() + { + RedisValue four = 4; + Assert.False(four.IsNull); + Assert.True(four.IsInteger); + Assert.True(four.HasValue); + Assert.False(four.IsNullOrEmpty); + + RedisValue n = default; + Assert.True(n.IsNull); + Assert.False(n.IsInteger); + Assert.False(n.HasValue); + Assert.True(n.IsNullOrEmpty); + + RedisValue emptyArr = Array.Empty(); + Assert.False(emptyArr.IsNull); + Assert.False(emptyArr.IsInteger); + Assert.False(emptyArr.HasValue); + Assert.True(emptyArr.IsNullOrEmpty); + } + + [Fact] + public void FromStream() + { + var arr = Encoding.UTF8.GetBytes("hello world"); + var ms = new MemoryStream(arr); + var val = RedisValue.CreateFrom(ms); + Assert.Equal("hello world", val); + + ms = new MemoryStream(arr, 1, 6, false, false); + val = RedisValue.CreateFrom(ms); + Assert.Equal("ello w", val); + + ms = new MemoryStream(arr, 2, 6, false, true); + val = RedisValue.CreateFrom(ms); + Assert.Equal("llo wo", val); + } +} diff --git a/tests/StackExchange.Redis.Tests/Values.cs b/tests/StackExchange.Redis.Tests/Values.cs deleted file mode 100644 index 0b3b1fad2..000000000 --- a/tests/StackExchange.Redis.Tests/Values.cs +++ /dev/null @@ -1,51 +0,0 @@ -using System.IO; -using System.Text; -using Xunit; -using Xunit.Abstractions; - -namespace StackExchange.Redis.Tests -{ - public class Values : TestBase - { - public Values(ITestOutputHelper output) : base (output) { } - - [Fact] - public void NullValueChecks() - { - RedisValue four = 4; - Assert.False(four.IsNull); - Assert.True(four.IsInteger); - Assert.True(four.HasValue); - Assert.False(four.IsNullOrEmpty); - - RedisValue n = default(RedisValue); - Assert.True(n.IsNull); - Assert.False(n.IsInteger); - Assert.False(n.HasValue); - Assert.True(n.IsNullOrEmpty); - - RedisValue emptyArr = new byte[0]; - Assert.False(emptyArr.IsNull); - Assert.False(emptyArr.IsInteger); - Assert.False(emptyArr.HasValue); - Assert.True(emptyArr.IsNullOrEmpty); - } - - [Fact] - public void FromStream() - { - var arr = Encoding.UTF8.GetBytes("hello world"); - var ms = new MemoryStream(arr); - var val = RedisValue.CreateFrom(ms); - Assert.Equal("hello world", val); - - ms = new MemoryStream(arr, 1, 6, false, false); - val = RedisValue.CreateFrom(ms); - Assert.Equal("ello w", val); - - ms = new MemoryStream(arr, 2, 6, false, true); - val = RedisValue.CreateFrom(ms); - Assert.Equal("llo wo", val); - } - } -} diff --git a/tests/StackExchange.Redis.Tests/VectorSetIntegrationTests.cs b/tests/StackExchange.Redis.Tests/VectorSetIntegrationTests.cs new file mode 100644 index 000000000..fb8e5d52a --- /dev/null +++ b/tests/StackExchange.Redis.Tests/VectorSetIntegrationTests.cs @@ -0,0 +1,1160 @@ +using System; +using System.Globalization; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using Newtonsoft.Json; +using Xunit; + +namespace StackExchange.Redis.Tests; + +[RunPerProtocol] +public sealed class VectorSetIntegrationTests(ITestOutputHelper output) : TestBase(output) +{ + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task VectorSetAdd_BasicOperation(bool suppressFp32) + { + await using var conn = Create(require: RedisFeatures.v8_0_0_M04); + var db = conn.GetDatabase(); + var key = Me(); + + // Clean up any existing data + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + var vector = new[] { 1.0f, 2.0f, 3.0f, 4.0f }; + + if (suppressFp32) VectorSetAddMessage.SuppressFp32(); + try + { + var request = VectorSetAddRequest.Member("element1", vector.AsMemory(), null); + var result = await db.VectorSetAddAsync(key, request); + + Assert.True(result); + } + finally + { + if (suppressFp32) VectorSetAddMessage.RestoreFp32(); + } + } + + [Fact] + public async Task VectorSetAdd_WithAttributes() + { + await using var conn = Create(require: RedisFeatures.v8_0_0_M04); + var db = conn.GetDatabase(); + var key = Me(); + + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + var vector = new[] { 1.0f, 2.0f, 3.0f, 4.0f }; + var attributes = """{"category":"test","id":123}"""; + + var request = VectorSetAddRequest.Member("element1", vector.AsMemory(), attributes); + var result = await db.VectorSetAddAsync(key, request); + + Assert.True(result); + + // Verify attributes were stored + var retrievedAttributes = await db.VectorSetGetAttributesJsonAsync(key, "element1"); + Assert.Equal(attributes, retrievedAttributes); + } + + [Theory] + [InlineData(VectorSetQuantization.Int8)] + [InlineData(VectorSetQuantization.None)] + [InlineData(VectorSetQuantization.Binary)] + public async Task VectorSetAdd_WithEverything(VectorSetQuantization quantization) + { + await using var conn = Create(require: RedisFeatures.v8_0_0_M04); + var db = conn.GetDatabase(); + var key = Me(); + + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + var vector = new[] { 1.0f, 2.0f, 3.0f, 4.0f }; + var attributes = """{"category":"test","id":123}"""; + + var request = VectorSetAddRequest.Member( + "element1", + vector.AsMemory(), + attributes); + request.Quantization = quantization; + request.ReducedDimensions = 64; + request.BuildExplorationFactor = 300; + request.MaxConnections = 32; + request.UseCheckAndSet = true; + var result = await db.VectorSetAddAsync( + key, + request); + + Assert.True(result); + + // Verify attributes were stored + var retrievedAttributes = await db.VectorSetGetAttributesJsonAsync(key, "element1"); + Assert.Equal(attributes, retrievedAttributes); + } + + [Fact] + public async Task VectorSetLength_EmptySet() + { + await using var conn = Create(require: RedisFeatures.v8_0_0_M04); + var db = conn.GetDatabase(); + var key = Me(); + + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + var length = await db.VectorSetLengthAsync(key); + Assert.Equal(0, length); + } + + [Fact] + public async Task VectorSetLength_WithElements() + { + await using var conn = Create(require: RedisFeatures.v8_0_0_M04); + var db = conn.GetDatabase(); + var key = Me(); + + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + var vector1 = new[] { 1.0f, 2.0f, 3.0f }; + var vector2 = new[] { 4.0f, 5.0f, 6.0f }; + + var request = VectorSetAddRequest.Member("element1", vector1.AsMemory()); + await db.VectorSetAddAsync(key, request); + request = VectorSetAddRequest.Member("element2", vector2.AsMemory()); + await db.VectorSetAddAsync(key, request); + + var length = await db.VectorSetLengthAsync(key); + Assert.Equal(2, length); + } + + [Fact] + public async Task VectorSetDimension() + { + await using var conn = Create(require: RedisFeatures.v8_0_0_M04); + var db = conn.GetDatabase(); + var key = Me(); + + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + var vector = new[] { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f }; + var request = VectorSetAddRequest.Member("element1", vector.AsMemory()); + await db.VectorSetAddAsync(key, request); + + var dimension = await db.VectorSetDimensionAsync(key); + Assert.Equal(5, dimension); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task VectorSetContains(bool suppressFp32) + { + await using var conn = Create(require: RedisFeatures.v8_0_0_M04); + var db = conn.GetDatabase(); + var key = Me(); + + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + var vector = new[] { 1.0f, 2.0f, 3.0f }; + if (suppressFp32) VectorSetAddMessage.SuppressFp32(); + try + { + var request = VectorSetAddRequest.Member("element1", vector.AsMemory()); + await db.VectorSetAddAsync(key, request); + + var exists = await db.VectorSetContainsAsync(key, "element1"); + var notExists = await db.VectorSetContainsAsync(key, "element2"); + + Assert.True(exists); + Assert.False(notExists); + } + finally + { + if (suppressFp32) VectorSetAddMessage.RestoreFp32(); + } + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task VectorSetGetApproximateVector(bool suppressFp32) + { + await using var conn = Create(require: RedisFeatures.v8_0_0_M04); + var db = conn.GetDatabase(); + var key = Me(); + + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + var originalVector = new[] { 1.0f, 2.0f, 3.0f, 4.0f }; + if (suppressFp32) VectorSetAddMessage.SuppressFp32(); + try + { + var request = VectorSetAddRequest.Member("element1", originalVector.AsMemory()); + await db.VectorSetAddAsync(key, request); + + using var retrievedLease = await db.VectorSetGetApproximateVectorAsync(key, "element1"); + + Assert.NotNull(retrievedLease); + var retrievedVector = retrievedLease.Span; + + Assert.Equal(originalVector.Length, retrievedVector.Length); + // Note: Due to quantization, values might not be exactly equal + for (int i = 0; i < originalVector.Length; i++) + { + Assert.True( + Math.Abs(originalVector[i] - retrievedVector[i]) < 0.1f, + $"Vector component {i} differs too much: expected {originalVector[i]}, got {retrievedVector[i]}"); + } + } + finally + { + if (suppressFp32) VectorSetAddMessage.RestoreFp32(); + } + } + + [Fact] + public async Task VectorSetRemove() + { + await using var conn = Create(require: RedisFeatures.v8_0_0_M04); + var db = conn.GetDatabase(); + var key = Me(); + + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + var vector = new[] { 1.0f, 2.0f, 3.0f }; + var request = VectorSetAddRequest.Member("element1", vector.AsMemory()); + await db.VectorSetAddAsync(key, request); + + var removed = await db.VectorSetRemoveAsync(key, "element1"); + Assert.True(removed); + + removed = await db.VectorSetRemoveAsync(key, "element1"); + Assert.False(removed); + + var exists = await db.VectorSetContainsAsync(key, "element1"); + Assert.False(exists); + } + + [Theory] + [InlineData(VectorSetQuantization.Int8)] + [InlineData(VectorSetQuantization.Binary)] + [InlineData(VectorSetQuantization.None)] + public async Task VectorSetInfo(VectorSetQuantization quantization) + { + await using var conn = Create(require: RedisFeatures.v8_0_0_M04); + var db = conn.GetDatabase(); + var key = Me(); + + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + var vector = new[] { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f }; + var request = VectorSetAddRequest.Member("element1", vector.AsMemory()); + request.Quantization = quantization; + await db.VectorSetAddAsync(key, request); + + var info = await db.VectorSetInfoAsync(key); + + Assert.NotNull(info); + var v = info.GetValueOrDefault(); + Assert.Equal(5, v.Dimension); + Assert.Equal(1, v.Length); + Assert.Equal(quantization, v.Quantization); + Assert.Null(v.QuantizationRaw); // Should be null for known quant types + + Assert.NotEqual(0, v.VectorSetUid); + Assert.NotEqual(0, v.HnswMaxNodeUid); + } + + [Fact] + public async Task VectorSetRandomMember() + { + await using var conn = Create(require: RedisFeatures.v8_0_0_M04); + var db = conn.GetDatabase(); + var key = Me(); + + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + var vector1 = new[] { 1.0f, 2.0f, 3.0f }; + var vector2 = new[] { 4.0f, 5.0f, 6.0f }; + + var request = VectorSetAddRequest.Member("element1", vector1.AsMemory()); + await db.VectorSetAddAsync(key, request); + request = VectorSetAddRequest.Member("element2", vector2.AsMemory()); + await db.VectorSetAddAsync(key, request); + + var randomMember = await db.VectorSetRandomMemberAsync(key); + Assert.True(randomMember == "element1" || randomMember == "element2"); + } + + [Fact] + public async Task VectorSetRandomMembers() + { + await using var conn = Create(require: RedisFeatures.v8_0_0_M04); + var db = conn.GetDatabase(); + var key = Me(); + + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + var vector1 = new[] { 1.0f, 2.0f, 3.0f }; + var vector2 = new[] { 4.0f, 5.0f, 6.0f }; + var vector3 = new[] { 7.0f, 8.0f, 9.0f }; + + var request = VectorSetAddRequest.Member("element1", vector1.AsMemory()); + await db.VectorSetAddAsync(key, request); + request = VectorSetAddRequest.Member("element2", vector2.AsMemory()); + await db.VectorSetAddAsync(key, request); + request = VectorSetAddRequest.Member("element3", vector3.AsMemory()); + await db.VectorSetAddAsync(key, request); + + var randomMembers = await db.VectorSetRandomMembersAsync(key, 2); + + Assert.Equal(2, randomMembers.Length); + Assert.All(randomMembers, member => + Assert.True(member == "element1" || member == "element2" || member == "element3")); + } + + [Theory] + [InlineData(false, false)] + [InlineData(false, true)] + [InlineData(true, false)] + [InlineData(true, true)] + public async Task VectorSetSimilaritySearch_ByVector(bool withScores, bool withAttributes) + { + await using var conn = Create(require: RedisFeatures.v8_0_0_M04); + var db = conn.GetDatabase(); + var disambiguator = (withScores ? 1 : 0) + (withAttributes ? 2 : 0); + var key = Me() + disambiguator; + + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + // Add some test vectors + var vector1 = new[] { 1.0f, 0.0f, 0.0f }; + var vector2 = new[] { 0.0f, 1.0f, 0.0f }; + var vector3 = new[] { 0.9f, 0.1f, 0.0f }; // Similar to vector1 + + var request = + VectorSetAddRequest.Member("element1", vector1.AsMemory(), attributesJson: """{"category":"x"}"""); + await db.VectorSetAddAsync(key, request); + request = VectorSetAddRequest.Member("element2", vector2.AsMemory(), attributesJson: """{"category":"y"}"""); + await db.VectorSetAddAsync(key, request); + request = VectorSetAddRequest.Member("element3", vector3.AsMemory(), attributesJson: """{"category":"z"}"""); + await db.VectorSetAddAsync(key, request); + + // Search for vectors similar to vector1 + var query = VectorSetSimilaritySearchRequest.ByVector(vector1.AsMemory()); + query.Count = 2; + query.WithScores = withScores; + query.WithAttributes = withAttributes; + using var results = await db.VectorSetSimilaritySearchAsync(key, query); + + Assert.NotNull(results); + foreach (var result in results.Span) + { + Log(result.ToString()); + } + + var resultsArray = results.Span.ToArray(); + + Assert.True(resultsArray.Length <= 2); + Assert.Contains(resultsArray, r => r.Member == "element1"); + var found = resultsArray.First(r => r.Member == "element1"); + + if (withAttributes) + { + Assert.Equal("""{"category":"x"}""", found.AttributesJson); + } + else + { + Assert.Null(found.AttributesJson); + } + + Assert.NotEqual(withScores, double.IsNaN(found.Score)); + } + + [Theory] + [InlineData(false, false)] + [InlineData(false, true)] + [InlineData(true, false)] + [InlineData(true, true)] + public async Task VectorSetSimilaritySearch_ByMember(bool withScores, bool withAttributes) + { + await using var conn = Create(require: RedisFeatures.v8_0_0_M04); + var db = conn.GetDatabase(); + var disambiguator = (withScores ? 1 : 0) + (withAttributes ? 2 : 0); + var key = Me() + disambiguator; + + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + var vector1 = new[] { 1.0f, 0.0f, 0.0f }; + var vector2 = new[] { 0.0f, 1.0f, 0.0f }; + + var request = + VectorSetAddRequest.Member("element1", vector1.AsMemory(), attributesJson: """{"category":"x"}"""); + await db.VectorSetAddAsync(key, request); + request = VectorSetAddRequest.Member("element2", vector2.AsMemory(), attributesJson: """{"category":"y"}"""); + await db.VectorSetAddAsync(key, request); + + var query = VectorSetSimilaritySearchRequest.ByMember("element1"); + query.Count = 1; + query.WithScores = withScores; + query.WithAttributes = withAttributes; + using var results = await db.VectorSetSimilaritySearchAsync(key, query); + + Assert.NotNull(results); + foreach (var result in results.Span) + { + Log(result.ToString()); + } + + var resultsArray = results.Span.ToArray(); + + Assert.Single(resultsArray); + Assert.Equal("element1", resultsArray[0].Member); + if (withAttributes) + { + Assert.Equal("""{"category":"x"}""", resultsArray[0].AttributesJson); + } + else + { + Assert.Null(resultsArray[0].AttributesJson); + } + + Assert.NotEqual(withScores, double.IsNaN(resultsArray[0].Score)); + } + + [Theory] + [InlineData(false, false)] + [InlineData(true, false)] + [InlineData(false, true)] + [InlineData(true, true)] + public async Task VectorSetSimilaritySearch_WithFilter(bool corruptPrefix, bool corruptSuffix) + { + await using var conn = Create(require: RedisFeatures.v8_0_0_M04); + var db = conn.GetDatabase(); + var key = Me(); + + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + Random rand = new Random(); + + float[] vector = new float[50]; + + void ScrambleVector() + { + var arr = vector; + for (int i = 0; i < arr.Length; i++) + { + arr[i] = (float)rand.NextDouble(); + } + } + + string[] regions = new[] { "us-west", "us-east", "eu-west", "eu-east", "ap-south", "ap-north" }; + for (int i = 0; i < 100; i++) + { + var region = regions[rand.Next(regions.Length)]; + var json = (corruptPrefix ? "oops" : "") + + JsonConvert.SerializeObject(new { id = i, region }) + + (corruptSuffix ? "oops" : ""); + ScrambleVector(); + var request = VectorSetAddRequest.Member($"element{i}", vector.AsMemory(), json); + await db.VectorSetAddAsync(key, request); + } + + ScrambleVector(); + var query = VectorSetSimilaritySearchRequest.ByVector(vector); + query.Count = 100; + query.WithScores = true; + query.WithAttributes = true; + query.FilterExpression = ".id >= 30"; + using var results = await db.VectorSetSimilaritySearchAsync(key, query); + + Assert.NotNull(results); + foreach (var result in results.Span) + { + Log(result.ToString()); + } + + Log($"Total matches: {results.Span.Length}"); + + var resultsArray = results.Span.ToArray(); + if (corruptPrefix) + { + // server short-circuits failure to be no match; we just want to assert + // what the observed behavior *is* + Assert.Empty(resultsArray); + } + else + { + Assert.Equal(70, resultsArray.Length); + Assert.All(resultsArray, r => Assert.True( + r.Score is > 0.0 and < 1.0 && GetId(r.Member!) >= 30)); + } + + static int GetId(string member) + { + if (member.StartsWith("element")) + { + return int.Parse(member.Substring(7), NumberStyles.Integer, CultureInfo.InvariantCulture); + } + + return -1; + } + } + + [Theory] + [InlineData(null)] + [InlineData("")] + [InlineData(" ")] + [InlineData(".id >= 30")] + public async Task VectorSetSimilaritySearch_TestFilterValues(string? filterExpression) + { + await using var conn = Create(require: RedisFeatures.v8_0_0_M04); + var db = conn.GetDatabase(); + var key = Me(); + + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + Random rand = new Random(); + + float[] vector = new float[50]; + + void ScrambleVector() + { + var arr = vector; + for (int i = 0; i < arr.Length; i++) + { + arr[i] = (float)rand.NextDouble(); + } + } + + string[] regions = new[] { "us-west", "us-east", "eu-west", "eu-east", "ap-south", "ap-north" }; + for (int i = 0; i < 100; i++) + { + var region = regions[rand.Next(regions.Length)]; + var json = JsonConvert.SerializeObject(new { id = i, region }); + ScrambleVector(); + var request = VectorSetAddRequest.Member($"element{i}", vector.AsMemory(), json); + await db.VectorSetAddAsync(key, request); + } + + ScrambleVector(); + var query = VectorSetSimilaritySearchRequest.ByVector(vector); + query.Count = 100; + query.WithScores = true; + query.WithAttributes = true; + query.FilterExpression = filterExpression; + + using var results = await db.VectorSetSimilaritySearchAsync(key, query); + + Assert.NotNull(results); + foreach (var result in results.Span) + { + Log(result.ToString()); + } + + Log($"Total matches: {results.Span.Length}"); + // we're not interested in the specific results; we're just checking that the + // filter expression was added and parsed without exploding about arg mismatch + } + + [Fact] + public async Task VectorSetSetAttributesJson() + { + await using var conn = Create(require: RedisFeatures.v8_0_0_M04); + var db = conn.GetDatabase(); + var key = Me(); + + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + var vector = new[] { 1.0f, 2.0f, 3.0f }; + var request = VectorSetAddRequest.Member("element1", vector.AsMemory()); + await db.VectorSetAddAsync(key, request); + + // Set attributes for existing element + var attributes = """{"category":"updated","priority":"high","timestamp":"2024-01-01"}"""; + var result = await db.VectorSetSetAttributesJsonAsync(key, "element1", attributes); + + Assert.True(result); + + // Verify attributes were set + var retrievedAttributes = await db.VectorSetGetAttributesJsonAsync(key, "element1"); + Assert.Equal(attributes, retrievedAttributes); + + // Try setting attributes for non-existent element + var failResult = await db.VectorSetSetAttributesJsonAsync(key, "nonexistent", attributes); + Assert.False(failResult); + } + + [Fact] + public async Task VectorSetGetLinks() + { + await using var conn = Create(require: RedisFeatures.v8_0_0_M04); + var db = conn.GetDatabase(); + var key = Me(); + + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + // Add some vectors that should be linked + var vector1 = new[] { 1.0f, 0.0f, 0.0f }; + var vector2 = new[] { 0.9f, 0.1f, 0.0f }; // Similar to vector1 + var vector3 = new[] { 0.0f, 1.0f, 0.0f }; // Different from vector1 + + var request = VectorSetAddRequest.Member("element1", vector1.AsMemory()); + await db.VectorSetAddAsync(key, request); + request = VectorSetAddRequest.Member("element2", vector2.AsMemory()); + await db.VectorSetAddAsync(key, request); + request = VectorSetAddRequest.Member("element3", vector3.AsMemory()); + await db.VectorSetAddAsync(key, request); + + // Get links for element1 (should include similar vectors) + using var links = await db.VectorSetGetLinksAsync(key, "element1"); + + Assert.NotNull(links); + foreach (var link in links.Span) + { + Log(link.ToString()); + } + + var linksArray = links.Span.ToArray(); + + // Should contain the other elements (note there can be transient duplicates, so: contains, not exact) + Assert.Contains("element2", linksArray); + Assert.Contains("element3", linksArray); + } + + [Fact] + public async Task VectorSetGetLinksWithScores() + { + await using var conn = Create(require: RedisFeatures.v8_0_0_M04); + var db = conn.GetDatabase(); + var key = Me(); + + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + // Add some vectors with known relationships + var vector1 = new[] { 1.0f, 0.0f, 0.0f }; + var vector2 = new[] { 0.9f, 0.1f, 0.0f }; // Similar to vector1 + var vector3 = new[] { 0.0f, 1.0f, 0.0f }; // Different from vector1 + + var request = VectorSetAddRequest.Member("element1", vector1.AsMemory()); + await db.VectorSetAddAsync(key, request); + request = VectorSetAddRequest.Member("element2", vector2.AsMemory()); + await db.VectorSetAddAsync(key, request); + request = VectorSetAddRequest.Member("element3", vector3.AsMemory()); + await db.VectorSetAddAsync(key, request); + + // Get links with scores for element1 + using var linksWithScores = await db.VectorSetGetLinksWithScoresAsync(key, "element1"); + Assert.NotNull(linksWithScores); + foreach (var link in linksWithScores.Span) + { + Log(link.ToString()); + } + + var linksArray = linksWithScores.Span.ToArray(); + Assert.NotEmpty(linksArray); + + // Verify each link has a valid score + // ReSharper disable once ParameterOnlyUsedForPreconditionCheck.Local + Assert.All(linksArray, static link => + { + Assert.False(link.Member.IsNull); + Assert.False(double.IsNaN(link.Score)); + Assert.True(link.Score >= 0.0); // Similarity scores should be non-negative + }); + + // Should contain the other elements (note there can be transient duplicates, so: contains, not exact) + Assert.Contains(linksArray, l => l.Member == "element2"); + Assert.Contains(linksArray, l => l.Member == "element3"); + + Assert.True(linksArray.First(l => l.Member == "element2").Score > 0.9); // similar + Assert.True(linksArray.First(l => l.Member == "element3").Score < 0.8); // less-so + } + + [Fact] + public async Task VectorSetRange_BasicOperation() + { + await using var conn = Create(require: RedisFeatures.v8_4_0_rc1); + var db = conn.GetDatabase(); + var key = Me(); + + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + // Add members with lexicographically ordered names + var vector = new[] { 1.0f, 2.0f, 3.0f }; + var members = new[] { "alpha", "beta", "delta", "gamma" }; // note: delta before gamma because lexicographical + + foreach (var member in members) + { + var request = VectorSetAddRequest.Member(member, vector.AsMemory()); + await db.VectorSetAddAsync(key, request); + } + + // Get all members - should be in lexicographical order + using var result = await db.VectorSetRangeAsync(key); + + Assert.NotNull(result); + Assert.Equal(4, result.Length); + // Lexicographical order: alpha, beta, delta, gamma + Assert.Equal(new[] { "alpha", "beta", "delta", "gamma" }, result.Span.ToArray().Select(r => (string?)r).ToArray()); + } + + [Fact] + public async Task VectorSetRange_WithStartAndEnd() + { + await using var conn = Create(require: RedisFeatures.v8_4_0_rc1); + var db = conn.GetDatabase(); + var key = Me(); + + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + var vector = new[] { 1.0f, 2.0f, 3.0f }; + var members = new[] { "apple", "banana", "cherry", "date", "elderberry" }; + + foreach (var member in members) + { + var request = VectorSetAddRequest.Member(member, vector.AsMemory()); + await db.VectorSetAddAsync(key, request); + } + + // Get range from "banana" to "date" (inclusive) + using var result = await db.VectorSetRangeAsync(key, start: "banana", end: "date"); + + Assert.NotNull(result); + Assert.Equal(3, result.Length); + Assert.Equal(new[] { "banana", "cherry", "date" }, result.Span.ToArray().Select(r => (string?)r).ToArray()); + } + + [Fact] + public async Task VectorSetRange_WithCount() + { + await using var conn = Create(require: RedisFeatures.v8_4_0_rc1); + var db = conn.GetDatabase(); + var key = Me(); + + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + var vector = new[] { 1.0f, 2.0f, 3.0f }; + + // Add 10 members + for (int i = 0; i < 10; i++) + { + var request = VectorSetAddRequest.Member($"member{i}", vector.AsMemory()); + await db.VectorSetAddAsync(key, request); + } + + // Get only 5 members + using var result = await db.VectorSetRangeAsync(key, count: 5); + + Assert.NotNull(result); + Assert.Equal(5, result.Length); + } + + [Fact] + public async Task VectorSetRange_WithExcludeStart() + { + await using var conn = Create(require: RedisFeatures.v8_4_0_rc1); + var db = conn.GetDatabase(); + var key = Me(); + + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + var vector = new[] { 1.0f, 2.0f, 3.0f }; + var members = new[] { "a", "b", "c", "d" }; + + foreach (var member in members) + { + var request = VectorSetAddRequest.Member(member, vector.AsMemory()); + await db.VectorSetAddAsync(key, request); + } + + // Get range excluding start + using var result = await db.VectorSetRangeAsync(key, start: "a", end: "d", exclude: Exclude.Start); + + Assert.NotNull(result); + Assert.Equal(3, result.Length); + Assert.Equal(new[] { "b", "c", "d" }, result.Span.ToArray().Select(r => (string?)r).ToArray()); + } + + [Fact] + public async Task VectorSetRange_WithExcludeEnd() + { + await using var conn = Create(require: RedisFeatures.v8_4_0_rc1); + var db = conn.GetDatabase(); + var key = Me(); + + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + var vector = new[] { 1.0f, 2.0f, 3.0f }; + var members = new[] { "a", "b", "c", "d" }; + + foreach (var member in members) + { + var request = VectorSetAddRequest.Member(member, vector.AsMemory()); + await db.VectorSetAddAsync(key, request); + } + + // Get range excluding end + using var result = await db.VectorSetRangeAsync(key, start: "a", end: "d", exclude: Exclude.Stop); + + Assert.NotNull(result); + Assert.Equal(3, result.Length); + Assert.Equal(new[] { "a", "b", "c" }, result.Span.ToArray().Select(r => (string?)r).ToArray()); + } + + [Fact] + public async Task VectorSetRange_WithExcludeBoth() + { + await using var conn = Create(require: RedisFeatures.v8_4_0_rc1); + var db = conn.GetDatabase(); + var key = Me(); + + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + var vector = new[] { 1.0f, 2.0f, 3.0f }; + var members = new[] { "a", "b", "c", "d", "e" }; + + foreach (var member in members) + { + var request = VectorSetAddRequest.Member(member, vector.AsMemory()); + await db.VectorSetAddAsync(key, request); + } + + // Get range excluding both boundaries + using var result = await db.VectorSetRangeAsync(key, start: "a", end: "e", exclude: Exclude.Both); + + Assert.NotNull(result); + Assert.Equal(3, result.Length); + Assert.Equal(new[] { "b", "c", "d" }, result.Span.ToArray().Select(r => (string?)r).ToArray()); + } + + [Fact] + public async Task VectorSetRange_EmptySet() + { + await using var conn = Create(require: RedisFeatures.v8_4_0_rc1); + var db = conn.GetDatabase(); + var key = Me(); + + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + // Don't add any members + using var result = await db.VectorSetRangeAsync(key); + + Assert.NotNull(result); + Assert.Empty(result.Span.ToArray()); + } + + [Fact] + public async Task VectorSetRange_NoMatches() + { + await using var conn = Create(require: RedisFeatures.v8_4_0_rc1); + var db = conn.GetDatabase(); + var key = Me(); + + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + var vector = new[] { 1.0f, 2.0f, 3.0f }; + var members = new[] { "a", "b", "c" }; + + foreach (var member in members) + { + var request = VectorSetAddRequest.Member(member, vector.AsMemory()); + await db.VectorSetAddAsync(key, request); + } + + // Query range with no matching members + using var result = await db.VectorSetRangeAsync(key, start: "x", end: "z"); + + Assert.NotNull(result); + Assert.Empty(result.Span.ToArray()); + } + + [Fact] + public async Task VectorSetRange_OpenStart() + { + await using var conn = Create(require: RedisFeatures.v8_4_0_rc1); + var db = conn.GetDatabase(); + var key = Me(); + + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + var vector = new[] { 1.0f, 2.0f, 3.0f }; + var members = new[] { "alpha", "beta", "gamma" }; + + foreach (var member in members) + { + var request = VectorSetAddRequest.Member(member, vector.AsMemory()); + await db.VectorSetAddAsync(key, request); + } + + // Get from beginning to "beta" + using var result = await db.VectorSetRangeAsync(key, end: "beta"); + + Assert.NotNull(result); + Assert.Equal(2, result.Length); + Assert.Equal(new[] { "alpha", "beta" }, result.Span.ToArray().Select(r => (string?)r).ToArray()); + } + + [Fact] + public async Task VectorSetRange_OpenEnd() + { + await using var conn = Create(require: RedisFeatures.v8_4_0_rc1); + var db = conn.GetDatabase(); + var key = Me(); + + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + var vector = new[] { 1.0f, 2.0f, 3.0f }; + var members = new[] { "alpha", "beta", "gamma" }; + + foreach (var member in members) + { + var request = VectorSetAddRequest.Member(member, vector.AsMemory()); + await db.VectorSetAddAsync(key, request); + } + + // Get from "beta" to end + using var result = await db.VectorSetRangeAsync(key, start: "beta"); + + Assert.NotNull(result); + Assert.Equal(2, result.Length); + Assert.Equal(new[] { "beta", "gamma" }, result.Span.ToArray().Select(r => (string?)r).ToArray()); + } + + [Fact] + public async Task VectorSetRange_SyncVsAsync() + { + await using var conn = Create(require: RedisFeatures.v8_4_0_rc1); + var db = conn.GetDatabase(); + var key = Me(); + + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + var vector = new[] { 1.0f, 2.0f, 3.0f }; + + // Add 20 members + for (int i = 0; i < 20; i++) + { + var request = VectorSetAddRequest.Member($"m{i:D2}", vector.AsMemory()); + await db.VectorSetAddAsync(key, request); + } + + // Call both sync and async + using var syncResult = db.VectorSetRange(key, start: "m05", end: "m15"); + using var asyncResult = await db.VectorSetRangeAsync(key, start: "m05", end: "m15"); + + Assert.NotNull(syncResult); + Assert.NotNull(asyncResult); + Assert.Equal(syncResult.Length, asyncResult.Length); + Assert.Equal(syncResult.Span.ToArray().Select(r => (string?)r), asyncResult.Span.ToArray().Select(r => (string?)r)); + } + + [Fact] + public async Task VectorSetRange_WithNumericLexOrder() + { + await using var conn = Create(require: RedisFeatures.v8_4_0_rc1); + var db = conn.GetDatabase(); + var key = Me(); + + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + var vector = new[] { 1.0f, 2.0f, 3.0f }; + var members = new[] { "1", "10", "2", "20", "3" }; + + foreach (var member in members) + { + var request = VectorSetAddRequest.Member(member, vector.AsMemory()); + await db.VectorSetAddAsync(key, request); + } + + // Get all - should be in lexicographical order, not numeric + using var result = await db.VectorSetRangeAsync(key); + + Assert.NotNull(result); + Assert.Equal(5, result.Length); + // Lexicographical order: "1", "10", "2", "20", "3" + Assert.Equal(new[] { "1", "10", "2", "20", "3" }, result.Span.ToArray().Select(r => (string?)r).ToArray()); + } + + [Fact] + public async Task VectorSetRangeEnumerate_BasicIteration() + { + await using var conn = Create(require: RedisFeatures.v8_4_0_rc1); + var db = conn.GetDatabase(); + var key = Me(); + + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + var vector = new[] { 1.0f, 2.0f, 3.0f }; + + // Add 50 members + for (int i = 0; i < 50; i++) + { + var request = VectorSetAddRequest.Member($"member{i:D3}", vector.AsMemory()); + await db.VectorSetAddAsync(key, request); + } + + // Enumerate with batch size of 10 + var allMembers = new System.Collections.Generic.List(); + foreach (var member in db.VectorSetRangeEnumerate(key, count: 10)) + { + allMembers.Add(member); + } + + Assert.Equal(50, allMembers.Count); + + // Verify lexicographical order + var sorted = allMembers.OrderBy(m => (string?)m, StringComparer.Ordinal).ToList(); + Assert.Equal(sorted, allMembers); + } + + [Fact] + public async Task VectorSetRangeEnumerate_WithRange() + { + await using var conn = Create(require: RedisFeatures.v8_4_0_rc1); + var db = conn.GetDatabase(); + var key = Me(); + + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + var vector = new[] { 1.0f, 2.0f, 3.0f }; + + // Add members "a" through "z" + for (char c = 'a'; c <= 'z'; c++) + { + var request = VectorSetAddRequest.Member(c.ToString(), vector.AsMemory()); + await db.VectorSetAddAsync(key, request); + } + + // Enumerate from "f" to "p" with batch size 5 + var allMembers = new System.Collections.Generic.List(); + foreach (var member in db.VectorSetRangeEnumerate(key, start: "f", end: "p", count: 5)) + { + allMembers.Add(member); + } + + // Should get "f" through "p" inclusive (11 members) + Assert.Equal(11, allMembers.Count); + Assert.Equal("f", (string?)allMembers.First()); + Assert.Equal("p", (string?)allMembers.Last()); + } + + [Fact] + public async Task VectorSetRangeEnumerate_EarlyBreak() + { + await using var conn = Create(require: RedisFeatures.v8_4_0_rc1); + var db = conn.GetDatabase(); + var key = Me(); + + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + var vector = new[] { 1.0f, 2.0f, 3.0f }; + + // Add 100 members + for (int i = 0; i < 100; i++) + { + var request = VectorSetAddRequest.Member($"member{i:D3}", vector.AsMemory()); + await db.VectorSetAddAsync(key, request); + } + + // Take only first 25 members + var limitedMembers = db.VectorSetRangeEnumerate(key, count: 10).Take(25).ToList(); + + Assert.Equal(25, limitedMembers.Count); + } + + [Fact] + public async Task VectorSetRangeEnumerate_EmptyBatches() + { + await using var conn = Create(require: RedisFeatures.v8_4_0_rc1); + var db = conn.GetDatabase(); + var key = Me(); + + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + // Don't add any members + var allMembers = new System.Collections.Generic.List(); + foreach (var member in db.VectorSetRangeEnumerate(key)) + { + allMembers.Add(member); + } + + Assert.Empty(allMembers); + } + + [Fact] + public async Task VectorSetRangeEnumerateAsync_BasicIteration() + { + await using var conn = Create(require: RedisFeatures.v8_4_0_rc1); + var db = conn.GetDatabase(); + var key = Me(); + + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + var vector = new[] { 1.0f, 2.0f, 3.0f }; + + // Add 50 members + for (int i = 0; i < 50; i++) + { + var request = VectorSetAddRequest.Member($"member{i:D3}", vector.AsMemory()); + await db.VectorSetAddAsync(key, request); + } + + // Enumerate with batch size of 10 + var allMembers = new System.Collections.Generic.List(); + await foreach (var member in db.VectorSetRangeEnumerateAsync(key, count: 10)) + { + allMembers.Add(member); + } + + Assert.Equal(50, allMembers.Count); + + // Verify lexicographical order + var sorted = allMembers.OrderBy(m => (string?)m, StringComparer.Ordinal).ToList(); + Assert.Equal(sorted, allMembers); + } + + [Fact] + public async Task VectorSetRangeEnumerateAsync_WithCancellation() + { + await using var conn = Create(require: RedisFeatures.v8_4_0_rc1); + var db = conn.GetDatabase(); + var key = Me(); + + await db.KeyDeleteAsync(key, CommandFlags.FireAndForget); + + var vector = new[] { 1.0f, 2.0f, 3.0f }; + + // Add 100 members + for (int i = 0; i < 100; i++) + { + var request = VectorSetAddRequest.Member($"member{i:D3}", vector.AsMemory()); + await db.VectorSetAddAsync(key, request); + } + + using var cts = new CancellationTokenSource(); + var allMembers = new System.Collections.Generic.List(); + + // Start enumeration and cancel after collecting some members + await Assert.ThrowsAnyAsync(async () => + { + await foreach (var member in db.VectorSetRangeEnumerateAsync(key, count: 10).WithCancellation(cts.Token)) + { + allMembers.Add(member); + + // Cancel after we've collected 25 members + if (allMembers.Count == 25) + { + cts.Cancel(); + } + } + }); + + // Should have stopped at or shortly after 25 members + Log($"Expected ~25 members, got {allMembers.Count}"); + Assert.True(allMembers.Count >= 25 && allMembers.Count <= 35, $"Expected ~25 members, got {allMembers.Count}"); + } +} diff --git a/tests/StackExchange.Redis.Tests/WithKeyPrefixTests.cs b/tests/StackExchange.Redis.Tests/WithKeyPrefixTests.cs index 8ff0cf0ac..acbef74cf 100644 --- a/tests/StackExchange.Redis.Tests/WithKeyPrefixTests.cs +++ b/tests/StackExchange.Redis.Tests/WithKeyPrefixTests.cs @@ -1,139 +1,133 @@ using System; +using System.Threading.Tasks; using StackExchange.Redis.KeyspaceIsolation; using Xunit; -using Xunit.Abstractions; -namespace StackExchange.Redis.Tests +namespace StackExchange.Redis.Tests; + +public class WithKeyPrefixTests(ITestOutputHelper output, SharedConnectionFixture fixture) : TestBase(output, fixture) { - [Collection(SharedConnectionFixture.Key)] - public class WithKeyPrefixTests : TestBase + [Fact] + public async Task BlankPrefixYieldsSame_Bytes() { - public WithKeyPrefixTests(ITestOutputHelper output, SharedConnectionFixture fixture) : base(output, fixture) { } + await using var conn = Create(); - [Fact] - public void BlankPrefixYieldsSame_Bytes() - { - using (var conn = Create()) - { - var raw = conn.GetDatabase(); - var prefixed = raw.WithKeyPrefix(new byte[0]); - Assert.Same(raw, prefixed); - } - } - - [Fact] - public void BlankPrefixYieldsSame_String() - { - using (var conn = Create()) - { - var raw = conn.GetDatabase(); - var prefixed = raw.WithKeyPrefix(""); - Assert.Same(raw, prefixed); - } - } - - [Fact] - public void NullPrefixIsError_Bytes() - { - Assert.Throws(() => - { - using var conn = Create(); - var raw = conn.GetDatabase(); - raw.WithKeyPrefix((byte[])null); - }); - } - - [Fact] - public void NullPrefixIsError_String() + var raw = conn.GetDatabase(); + var prefixed = raw.WithKeyPrefix(Array.Empty()); + Assert.Same(raw, prefixed); + } + + [Fact] + public async Task BlankPrefixYieldsSame_String() + { + await using var conn = Create(); + + var raw = conn.GetDatabase(); + var prefixed = raw.WithKeyPrefix(""); + Assert.Same(raw, prefixed); + } + + [Fact] + public async Task NullPrefixIsError_Bytes() + { + await Assert.ThrowsAsync(async () => { - Assert.Throws(() => - { - using var conn = Create(); - var raw = conn.GetDatabase(); - raw.WithKeyPrefix((string)null); - }); - } - - [Theory] - [InlineData("abc")] - [InlineData("")] - [InlineData(null)] - public void NullDatabaseIsError(string prefix) + await using var conn = Create(); + + var raw = conn.GetDatabase(); + raw.WithKeyPrefix((byte[]?)null); + }); + } + + [Fact] + public async Task NullPrefixIsError_String() + { + await Assert.ThrowsAsync(async () => { - Assert.Throws(() => - { - IDatabase raw = null; - raw.WithKeyPrefix(prefix); - }); - } - - [Fact] - public void BasicSmokeTest() + await using var conn = Create(); + + var raw = conn.GetDatabase(); + raw.WithKeyPrefix((string?)null); + }); + } + + [Theory] + [InlineData("abc")] + [InlineData("")] + [InlineData(null)] + public void NullDatabaseIsError(string? prefix) + { + Assert.Throws(() => { - using (var conn = Create()) - { - var raw = conn.GetDatabase(); + IDatabase? raw = null; + raw!.WithKeyPrefix(prefix); + }); + } + + [Fact] + public async Task BasicSmokeTest() + { + await using var conn = Create(); - var prefix = Me(); - var foo = raw.WithKeyPrefix(prefix); - var foobar = foo.WithKeyPrefix("bar"); + var raw = conn.GetDatabase(); - string key = Me(); + var prefix = Me(); + var foo = raw.WithKeyPrefix(prefix); + var foobar = foo.WithKeyPrefix("bar"); - string s = Guid.NewGuid().ToString(), t = Guid.NewGuid().ToString(); + string key = Me(); - foo.StringSet(key, s, flags: CommandFlags.FireAndForget); - var val = (string)foo.StringGet(key); - Assert.Equal(s, val); // fooBasicSmokeTest + string s = Guid.NewGuid().ToString(), t = Guid.NewGuid().ToString(); - foobar.StringSet(key, t, flags: CommandFlags.FireAndForget); - val = foobar.StringGet(key); - Assert.Equal(t, val); // foobarBasicSmokeTest + foo.StringSet(key, s, flags: CommandFlags.FireAndForget); + var val = (string?)foo.StringGet(key); + Assert.Equal(s, val); // fooBasicSmokeTest - val = foo.StringGet("bar" + key); - Assert.Equal(t, val); // foobarBasicSmokeTest + foobar.StringSet(key, t, flags: CommandFlags.FireAndForget); + val = foobar.StringGet(key); + Assert.Equal(t, val); // foobarBasicSmokeTest - val = raw.StringGet(prefix + key); - Assert.Equal(s, val); // fooBasicSmokeTest + val = foo.StringGet("bar" + key); + Assert.Equal(t, val); // foobarBasicSmokeTest - val = raw.StringGet(prefix + "bar" + key); - Assert.Equal(t, val); // foobarBasicSmokeTest - } - } + val = raw.StringGet(prefix + key); + Assert.Equal(s, val); // fooBasicSmokeTest - [Fact] - public void ConditionTest() - { - using (var conn = Create()) - { - var raw = conn.GetDatabase(); - - var prefix = Me() + ":"; - var foo = raw.WithKeyPrefix(prefix); - - raw.KeyDelete(prefix + "abc", CommandFlags.FireAndForget); - raw.KeyDelete(prefix + "i", CommandFlags.FireAndForget); - - // execute while key exists - raw.StringSet(prefix + "abc", "def", flags: CommandFlags.FireAndForget); - var tran = foo.CreateTransaction(); - tran.AddCondition(Condition.KeyExists("abc")); - tran.StringIncrementAsync("i"); - tran.Execute(); - - int i = (int)raw.StringGet(prefix + "i"); - Assert.Equal(1, i); - - // repeat without key - raw.KeyDelete(prefix + "abc", CommandFlags.FireAndForget); - tran = foo.CreateTransaction(); - tran.AddCondition(Condition.KeyExists("abc")); - tran.StringIncrementAsync("i"); - tran.Execute(); - - i = (int)raw.StringGet(prefix + "i"); - Assert.Equal(1, i); - } - } + val = raw.StringGet(prefix + "bar" + key); + Assert.Equal(t, val); // foobarBasicSmokeTest + } + + [Fact] + public async Task ConditionTest() + { + await using var conn = Create(); + + var raw = conn.GetDatabase(); + + var prefix = Me() + ":"; + var foo = raw.WithKeyPrefix(prefix); + + raw.KeyDelete(prefix + "abc", CommandFlags.FireAndForget); + raw.KeyDelete(prefix + "i", CommandFlags.FireAndForget); + + // execute while key exists + raw.StringSet(prefix + "abc", "def", flags: CommandFlags.FireAndForget); + var tran = foo.CreateTransaction(); + tran.AddCondition(Condition.KeyExists("abc")); + _ = tran.StringIncrementAsync("i"); + tran.Execute(); + + int i = (int)raw.StringGet(prefix + "i"); + Assert.Equal(1, i); + + // repeat without key + raw.KeyDelete(prefix + "abc", CommandFlags.FireAndForget); + tran = foo.CreateTransaction(); + tran.AddCondition(Condition.KeyExists("abc")); + _ = tran.StringIncrementAsync("i"); + tran.Execute(); + + i = (int)raw.StringGet(prefix + "i"); + Assert.Equal(1, i); } } diff --git a/tests/StackExchange.Redis.Tests/WrapperBaseTests.cs b/tests/StackExchange.Redis.Tests/WrapperBaseTests.cs deleted file mode 100644 index 3f1ee9f28..000000000 --- a/tests/StackExchange.Redis.Tests/WrapperBaseTests.cs +++ /dev/null @@ -1,1116 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Linq.Expressions; -using System.Net; -using System.Text; -using Moq; -using StackExchange.Redis.KeyspaceIsolation; -using Xunit; -using System.Threading.Tasks; - -namespace StackExchange.Redis.Tests -{ - [Collection(nameof(MoqDependentCollection))] - public sealed class WrapperBaseTests - { - private readonly Mock mock; - private readonly WrapperBase wrapper; - - public WrapperBaseTests() - { - mock = new Mock(); - wrapper = new WrapperBase(mock.Object, Encoding.UTF8.GetBytes("prefix:")); - } - -#pragma warning disable RCS1047 // Non-asynchronous method name should not end with 'Async'. - - [Fact] - public void DebugObjectAsync() - { - wrapper.DebugObjectAsync("key", CommandFlags.None); - mock.Verify(_ => _.DebugObjectAsync("prefix:key", CommandFlags.None)); - } - - [Fact] - public void HashDecrementAsync_1() - { - wrapper.HashDecrementAsync("key", "hashField", 123, CommandFlags.None); - mock.Verify(_ => _.HashDecrementAsync("prefix:key", "hashField", 123, CommandFlags.None)); - } - - [Fact] - public void HashDecrementAsync_2() - { - wrapper.HashDecrementAsync("key", "hashField", 1.23, CommandFlags.None); - mock.Verify(_ => _.HashDecrementAsync("prefix:key", "hashField", 1.23, CommandFlags.None)); - } - - [Fact] - public void HashDeleteAsync_1() - { - wrapper.HashDeleteAsync("key", "hashField", CommandFlags.None); - mock.Verify(_ => _.HashDeleteAsync("prefix:key", "hashField", CommandFlags.None)); - } - - [Fact] - public void HashDeleteAsync_2() - { - RedisValue[] hashFields = new RedisValue[0]; - wrapper.HashDeleteAsync("key", hashFields, CommandFlags.None); - mock.Verify(_ => _.HashDeleteAsync("prefix:key", hashFields, CommandFlags.None)); - } - - [Fact] - public void HashExistsAsync() - { - wrapper.HashExistsAsync("key", "hashField", CommandFlags.None); - mock.Verify(_ => _.HashExistsAsync("prefix:key", "hashField", CommandFlags.None)); - } - - [Fact] - public void HashGetAllAsync() - { - wrapper.HashGetAllAsync("key", CommandFlags.None); - mock.Verify(_ => _.HashGetAllAsync("prefix:key", CommandFlags.None)); - } - - [Fact] - public void HashGetAsync_1() - { - wrapper.HashGetAsync("key", "hashField", CommandFlags.None); - mock.Verify(_ => _.HashGetAsync("prefix:key", "hashField", CommandFlags.None)); - } - - [Fact] - public void HashGetAsync_2() - { - RedisValue[] hashFields = new RedisValue[0]; - wrapper.HashGetAsync("key", hashFields, CommandFlags.None); - mock.Verify(_ => _.HashGetAsync("prefix:key", hashFields, CommandFlags.None)); - } - - [Fact] - public void HashIncrementAsync_1() - { - wrapper.HashIncrementAsync("key", "hashField", 123, CommandFlags.None); - mock.Verify(_ => _.HashIncrementAsync("prefix:key", "hashField", 123, CommandFlags.None)); - } - - [Fact] - public void HashIncrementAsync_2() - { - wrapper.HashIncrementAsync("key", "hashField", 1.23, CommandFlags.None); - mock.Verify(_ => _.HashIncrementAsync("prefix:key", "hashField", 1.23, CommandFlags.None)); - } - - [Fact] - public void HashKeysAsync() - { - wrapper.HashKeysAsync("key", CommandFlags.None); - mock.Verify(_ => _.HashKeysAsync("prefix:key", CommandFlags.None)); - } - - [Fact] - public void HashLengthAsync() - { - wrapper.HashLengthAsync("key", CommandFlags.None); - mock.Verify(_ => _.HashLengthAsync("prefix:key", CommandFlags.None)); - } - - [Fact] - public void HashSetAsync_1() - { - HashEntry[] hashFields = new HashEntry[0]; - wrapper.HashSetAsync("key", hashFields, CommandFlags.None); - mock.Verify(_ => _.HashSetAsync("prefix:key", hashFields, CommandFlags.None)); - } - - [Fact] - public void HashSetAsync_2() - { - wrapper.HashSetAsync("key", "hashField", "value", When.Exists, CommandFlags.None); - mock.Verify(_ => _.HashSetAsync("prefix:key", "hashField", "value", When.Exists, CommandFlags.None)); - } - - [Fact] - public void HashStringLengthAsync() - { - wrapper.HashStringLengthAsync("key","field", CommandFlags.None); - mock.Verify(_ => _.HashStringLengthAsync("prefix:key", "field", CommandFlags.None)); - } - - [Fact] - public void HashValuesAsync() - { - wrapper.HashValuesAsync("key", CommandFlags.None); - mock.Verify(_ => _.HashValuesAsync("prefix:key", CommandFlags.None)); - } - - [Fact] - public void HyperLogLogAddAsync_1() - { - wrapper.HyperLogLogAddAsync("key", "value", CommandFlags.None); - mock.Verify(_ => _.HyperLogLogAddAsync("prefix:key", "value", CommandFlags.None)); - } - - [Fact] - public void HyperLogLogAddAsync_2() - { - var values = new RedisValue[0]; - wrapper.HyperLogLogAddAsync("key", values, CommandFlags.None); - mock.Verify(_ => _.HyperLogLogAddAsync("prefix:key", values, CommandFlags.None)); - } - - [Fact] - public void HyperLogLogLengthAsync() - { - wrapper.HyperLogLogLengthAsync("key", CommandFlags.None); - mock.Verify(_ => _.HyperLogLogLengthAsync("prefix:key", CommandFlags.None)); - } - - [Fact] - public void HyperLogLogMergeAsync_1() - { - wrapper.HyperLogLogMergeAsync("destination", "first", "second", CommandFlags.None); - mock.Verify(_ => _.HyperLogLogMergeAsync("prefix:destination", "prefix:first", "prefix:second", CommandFlags.None)); - } - - [Fact] - public void HyperLogLogMergeAsync_2() - { - RedisKey[] keys = new RedisKey[] { "a", "b" }; - Expression> valid = _ => _.Length == 2 && _[0] == "prefix:a" && _[1] == "prefix:b"; - wrapper.HyperLogLogMergeAsync("destination", keys, CommandFlags.None); - mock.Verify(_ => _.HyperLogLogMergeAsync("prefix:destination", It.Is(valid), CommandFlags.None)); - } - - [Fact] - public void IdentifyEndpointAsync() - { - wrapper.IdentifyEndpointAsync("key", CommandFlags.None); - mock.Verify(_ => _.IdentifyEndpointAsync("prefix:key", CommandFlags.None)); - } - - [Fact] - public void IsConnected() - { - wrapper.IsConnected("key", CommandFlags.None); - mock.Verify(_ => _.IsConnected("prefix:key", CommandFlags.None)); - } - - [Fact] - public void KeyDeleteAsync_1() - { - wrapper.KeyDeleteAsync("key", CommandFlags.None); - mock.Verify(_ => _.KeyDeleteAsync("prefix:key", CommandFlags.None)); - } - - [Fact] - public void KeyDeleteAsync_2() - { - RedisKey[] keys = new RedisKey[] { "a", "b" }; - Expression> valid = _ => _.Length == 2 && _[0] == "prefix:a" && _[1] == "prefix:b"; - wrapper.KeyDeleteAsync(keys, CommandFlags.None); - mock.Verify(_ => _.KeyDeleteAsync(It.Is(valid), CommandFlags.None)); - } - - [Fact] - public void KeyDumpAsync() - { - wrapper.KeyDumpAsync("key", CommandFlags.None); - mock.Verify(_ => _.KeyDumpAsync("prefix:key", CommandFlags.None)); - } - - [Fact] - public void KeyExistsAsync() - { - wrapper.KeyExistsAsync("key", CommandFlags.None); - mock.Verify(_ => _.KeyExistsAsync("prefix:key", CommandFlags.None)); - } - - [Fact] - public void KeyExpireAsync_1() - { - TimeSpan expiry = TimeSpan.FromSeconds(123); - wrapper.KeyExpireAsync("key", expiry, CommandFlags.None); - mock.Verify(_ => _.KeyExpireAsync("prefix:key", expiry, CommandFlags.None)); - } - - [Fact] - public void KeyExpireAsync_2() - { - DateTime expiry = DateTime.Now; - wrapper.KeyExpireAsync("key", expiry, CommandFlags.None); - mock.Verify(_ => _.KeyExpireAsync("prefix:key", expiry, CommandFlags.None)); - } - - [Fact] - public void KeyMigrateAsync() - { - EndPoint toServer = new IPEndPoint(IPAddress.Loopback, 123); - wrapper.KeyMigrateAsync("key", toServer, 123, 456, MigrateOptions.Copy, CommandFlags.None); - mock.Verify(_ => _.KeyMigrateAsync("prefix:key", toServer, 123, 456, MigrateOptions.Copy, CommandFlags.None)); - } - - [Fact] - public void KeyMoveAsync() - { - wrapper.KeyMoveAsync("key", 123, CommandFlags.None); - mock.Verify(_ => _.KeyMoveAsync("prefix:key", 123, CommandFlags.None)); - } - - [Fact] - public void KeyPersistAsync() - { - wrapper.KeyPersistAsync("key", CommandFlags.None); - mock.Verify(_ => _.KeyPersistAsync("prefix:key", CommandFlags.None)); - } - - [Fact] - public Task KeyRandomAsync() - { - return Assert.ThrowsAsync(() => wrapper.KeyRandomAsync()); - } - - [Fact] - public void KeyRenameAsync() - { - wrapper.KeyRenameAsync("key", "newKey", When.Exists, CommandFlags.None); - mock.Verify(_ => _.KeyRenameAsync("prefix:key", "prefix:newKey", When.Exists, CommandFlags.None)); - } - - [Fact] - public void KeyRestoreAsync() - { - byte[] value = new byte[0]; - TimeSpan expiry = TimeSpan.FromSeconds(123); - wrapper.KeyRestoreAsync("key", value, expiry, CommandFlags.None); - mock.Verify(_ => _.KeyRestoreAsync("prefix:key", value, expiry, CommandFlags.None)); - } - - [Fact] - public void KeyTimeToLiveAsync() - { - wrapper.KeyTimeToLiveAsync("key", CommandFlags.None); - mock.Verify(_ => _.KeyTimeToLiveAsync("prefix:key", CommandFlags.None)); - } - - [Fact] - public void KeyTypeAsync() - { - wrapper.KeyTypeAsync("key", CommandFlags.None); - mock.Verify(_ => _.KeyTypeAsync("prefix:key", CommandFlags.None)); - } - - [Fact] - public void ListGetByIndexAsync() - { - wrapper.ListGetByIndexAsync("key", 123, CommandFlags.None); - mock.Verify(_ => _.ListGetByIndexAsync("prefix:key", 123, CommandFlags.None)); - } - - [Fact] - public void ListInsertAfterAsync() - { - wrapper.ListInsertAfterAsync("key", "pivot", "value", CommandFlags.None); - mock.Verify(_ => _.ListInsertAfterAsync("prefix:key", "pivot", "value", CommandFlags.None)); - } - - [Fact] - public void ListInsertBeforeAsync() - { - wrapper.ListInsertBeforeAsync("key", "pivot", "value", CommandFlags.None); - mock.Verify(_ => _.ListInsertBeforeAsync("prefix:key", "pivot", "value", CommandFlags.None)); - } - - [Fact] - public void ListLeftPopAsync() - { - wrapper.ListLeftPopAsync("key", CommandFlags.None); - mock.Verify(_ => _.ListLeftPopAsync("prefix:key", CommandFlags.None)); - } - - [Fact] - public void ListLeftPushAsync_1() - { - wrapper.ListLeftPushAsync("key", "value", When.Exists, CommandFlags.None); - mock.Verify(_ => _.ListLeftPushAsync("prefix:key", "value", When.Exists, CommandFlags.None)); - } - - [Fact] - public void ListLeftPushAsync_2() - { - RedisValue[] values = new RedisValue[0]; - wrapper.ListLeftPushAsync("key", values, CommandFlags.None); - mock.Verify(_ => _.ListLeftPushAsync("prefix:key", values, CommandFlags.None)); - } - - [Fact] - public void ListLeftPushAsync_3() - { - RedisValue[] values = new RedisValue[] { "value1", "value2" }; - wrapper.ListLeftPushAsync("key", values, When.Exists, CommandFlags.None); - mock.Verify(_ => _.ListLeftPushAsync("prefix:key", values, When.Exists, CommandFlags.None)); - } - - [Fact] - public void ListLengthAsync() - { - wrapper.ListLengthAsync("key", CommandFlags.None); - mock.Verify(_ => _.ListLengthAsync("prefix:key", CommandFlags.None)); - } - - [Fact] - public void ListRangeAsync() - { - wrapper.ListRangeAsync("key", 123, 456, CommandFlags.None); - mock.Verify(_ => _.ListRangeAsync("prefix:key", 123, 456, CommandFlags.None)); - } - - [Fact] - public void ListRemoveAsync() - { - wrapper.ListRemoveAsync("key", "value", 123, CommandFlags.None); - mock.Verify(_ => _.ListRemoveAsync("prefix:key", "value", 123, CommandFlags.None)); - } - - [Fact] - public void ListRightPopAsync() - { - wrapper.ListRightPopAsync("key", CommandFlags.None); - mock.Verify(_ => _.ListRightPopAsync("prefix:key", CommandFlags.None)); - } - - [Fact] - public void ListRightPopLeftPushAsync() - { - wrapper.ListRightPopLeftPushAsync("source", "destination", CommandFlags.None); - mock.Verify(_ => _.ListRightPopLeftPushAsync("prefix:source", "prefix:destination", CommandFlags.None)); - } - - [Fact] - public void ListRightPushAsync_1() - { - wrapper.ListRightPushAsync("key", "value", When.Exists, CommandFlags.None); - mock.Verify(_ => _.ListRightPushAsync("prefix:key", "value", When.Exists, CommandFlags.None)); - } - - [Fact] - public void ListRightPushAsync_2() - { - RedisValue[] values = new RedisValue[0]; - wrapper.ListRightPushAsync("key", values, CommandFlags.None); - mock.Verify(_ => _.ListRightPushAsync("prefix:key", values, CommandFlags.None)); - } - - [Fact] - public void ListRightPushAsync_3() - { - RedisValue[] values = new RedisValue[] { "value1", "value2" }; - wrapper.ListRightPushAsync("key", values, When.Exists, CommandFlags.None); - mock.Verify(_ => _.ListRightPushAsync("prefix:key", values, When.Exists, CommandFlags.None)); - } - - [Fact] - public void ListSetByIndexAsync() - { - wrapper.ListSetByIndexAsync("key", 123, "value", CommandFlags.None); - mock.Verify(_ => _.ListSetByIndexAsync("prefix:key", 123, "value", CommandFlags.None)); - } - - [Fact] - public void ListTrimAsync() - { - wrapper.ListTrimAsync("key", 123, 456, CommandFlags.None); - mock.Verify(_ => _.ListTrimAsync("prefix:key", 123, 456, CommandFlags.None)); - } - - [Fact] - public void LockExtendAsync() - { - TimeSpan expiry = TimeSpan.FromSeconds(123); - wrapper.LockExtendAsync("key", "value", expiry, CommandFlags.None); - mock.Verify(_ => _.LockExtendAsync("prefix:key", "value", expiry, CommandFlags.None)); - } - - [Fact] - public void LockQueryAsync() - { - wrapper.LockQueryAsync("key", CommandFlags.None); - mock.Verify(_ => _.LockQueryAsync("prefix:key", CommandFlags.None)); - } - - [Fact] - public void LockReleaseAsync() - { - wrapper.LockReleaseAsync("key", "value", CommandFlags.None); - mock.Verify(_ => _.LockReleaseAsync("prefix:key", "value", CommandFlags.None)); - } - - [Fact] - public void LockTakeAsync() - { - TimeSpan expiry = TimeSpan.FromSeconds(123); - wrapper.LockTakeAsync("key", "value", expiry, CommandFlags.None); - mock.Verify(_ => _.LockTakeAsync("prefix:key", "value", expiry, CommandFlags.None)); - } - - [Fact] - public void PublishAsync() - { - wrapper.PublishAsync("channel", "message", CommandFlags.None); - mock.Verify(_ => _.PublishAsync("prefix:channel", "message", CommandFlags.None)); - } - - [Fact] - public void ScriptEvaluateAsync_1() - { - byte[] hash = new byte[0]; - RedisValue[] values = new RedisValue[0]; - RedisKey[] keys = new RedisKey[] { "a", "b" }; - Expression> valid = _ => _.Length == 2 && _[0] == "prefix:a" && _[1] == "prefix:b"; - wrapper.ScriptEvaluateAsync(hash, keys, values, CommandFlags.None); - mock.Verify(_ => _.ScriptEvaluateAsync(hash, It.Is(valid), values, CommandFlags.None)); - } - - [Fact] - public void ScriptEvaluateAsync_2() - { - RedisValue[] values = new RedisValue[0]; - RedisKey[] keys = new RedisKey[] { "a", "b" }; - Expression> valid = _ => _.Length == 2 && _[0] == "prefix:a" && _[1] == "prefix:b"; - wrapper.ScriptEvaluateAsync("script", keys, values, CommandFlags.None); - mock.Verify(_ => _.ScriptEvaluateAsync("script", It.Is(valid), values, CommandFlags.None)); - } - - [Fact] - public void SetAddAsync_1() - { - wrapper.SetAddAsync("key", "value", CommandFlags.None); - mock.Verify(_ => _.SetAddAsync("prefix:key", "value", CommandFlags.None)); - } - - [Fact] - public void SetAddAsync_2() - { - RedisValue[] values = new RedisValue[0]; - wrapper.SetAddAsync("key", values, CommandFlags.None); - mock.Verify(_ => _.SetAddAsync("prefix:key", values, CommandFlags.None)); - } - - [Fact] - public void SetCombineAndStoreAsync_1() - { - wrapper.SetCombineAndStoreAsync(SetOperation.Intersect, "destination", "first", "second", CommandFlags.None); - mock.Verify(_ => _.SetCombineAndStoreAsync(SetOperation.Intersect, "prefix:destination", "prefix:first", "prefix:second", CommandFlags.None)); - } - - [Fact] - public void SetCombineAndStoreAsync_2() - { - RedisKey[] keys = new RedisKey[] { "a", "b" }; - Expression> valid = _ => _.Length == 2 && _[0] == "prefix:a" && _[1] == "prefix:b"; - wrapper.SetCombineAndStoreAsync(SetOperation.Intersect, "destination", keys, CommandFlags.None); - mock.Verify(_ => _.SetCombineAndStoreAsync(SetOperation.Intersect, "prefix:destination", It.Is(valid), CommandFlags.None)); - } - - [Fact] - public void SetCombineAsync_1() - { - wrapper.SetCombineAsync(SetOperation.Intersect, "first", "second", CommandFlags.None); - mock.Verify(_ => _.SetCombineAsync(SetOperation.Intersect, "prefix:first", "prefix:second", CommandFlags.None)); - } - - [Fact] - public void SetCombineAsync_2() - { - RedisKey[] keys = new RedisKey[] { "a", "b" }; - Expression> valid = _ => _.Length == 2 && _[0] == "prefix:a" && _[1] == "prefix:b"; - wrapper.SetCombineAsync(SetOperation.Intersect, keys, CommandFlags.None); - mock.Verify(_ => _.SetCombineAsync(SetOperation.Intersect, It.Is(valid), CommandFlags.None)); - } - - [Fact] - public void SetContainsAsync() - { - wrapper.SetContainsAsync("key", "value", CommandFlags.None); - mock.Verify(_ => _.SetContainsAsync("prefix:key", "value", CommandFlags.None)); - } - - [Fact] - public void SetLengthAsync() - { - wrapper.SetLengthAsync("key", CommandFlags.None); - mock.Verify(_ => _.SetLengthAsync("prefix:key", CommandFlags.None)); - } - - [Fact] - public void SetMembersAsync() - { - wrapper.SetMembersAsync("key", CommandFlags.None); - mock.Verify(_ => _.SetMembersAsync("prefix:key", CommandFlags.None)); - } - - [Fact] - public void SetMoveAsync() - { - wrapper.SetMoveAsync("source", "destination", "value", CommandFlags.None); - mock.Verify(_ => _.SetMoveAsync("prefix:source", "prefix:destination", "value", CommandFlags.None)); - } - - [Fact] - public void SetPopAsync_1() - { - wrapper.SetPopAsync("key", CommandFlags.None); - mock.Verify(_ => _.SetPopAsync("prefix:key", CommandFlags.None)); - - wrapper.SetPopAsync("key", 5, CommandFlags.None); - mock.Verify(_ => _.SetPopAsync("prefix:key", 5, CommandFlags.None)); - } - - [Fact] - public void SetPopAsync_2() - { - wrapper.SetPopAsync("key", 5, CommandFlags.None); - mock.Verify(_ => _.SetPopAsync("prefix:key", 5, CommandFlags.None)); - } - - [Fact] - public void SetRandomMemberAsync() - { - wrapper.SetRandomMemberAsync("key", CommandFlags.None); - mock.Verify(_ => _.SetRandomMemberAsync("prefix:key", CommandFlags.None)); - } - - [Fact] - public void SetRandomMembersAsync() - { - wrapper.SetRandomMembersAsync("key", 123, CommandFlags.None); - mock.Verify(_ => _.SetRandomMembersAsync("prefix:key", 123, CommandFlags.None)); - } - - [Fact] - public void SetRemoveAsync_1() - { - wrapper.SetRemoveAsync("key", "value", CommandFlags.None); - mock.Verify(_ => _.SetRemoveAsync("prefix:key", "value", CommandFlags.None)); - } - - [Fact] - public void SetRemoveAsync_2() - { - RedisValue[] values = new RedisValue[0]; - wrapper.SetRemoveAsync("key", values, CommandFlags.None); - mock.Verify(_ => _.SetRemoveAsync("prefix:key", values, CommandFlags.None)); - } - - [Fact] - public void SortAndStoreAsync() - { - RedisValue[] get = new RedisValue[] { "a", "#" }; - Expression> valid = _ => _.Length == 2 && _[0] == "prefix:a" && _[1] == "#"; - - wrapper.SortAndStoreAsync("destination", "key", 123, 456, Order.Descending, SortType.Alphabetic, "nosort", get, CommandFlags.None); - wrapper.SortAndStoreAsync("destination", "key", 123, 456, Order.Descending, SortType.Alphabetic, "by", get, CommandFlags.None); - - mock.Verify(_ => _.SortAndStoreAsync("prefix:destination", "prefix:key", 123, 456, Order.Descending, SortType.Alphabetic, "nosort", It.Is(valid), CommandFlags.None)); - mock.Verify(_ => _.SortAndStoreAsync("prefix:destination", "prefix:key", 123, 456, Order.Descending, SortType.Alphabetic, "prefix:by", It.Is(valid), CommandFlags.None)); - } - - [Fact] - public void SortAsync() - { - RedisValue[] get = new RedisValue[] { "a", "#" }; - Expression> valid = _ => _.Length == 2 && _[0] == "prefix:a" && _[1] == "#"; - - wrapper.SortAsync("key", 123, 456, Order.Descending, SortType.Alphabetic, "nosort", get, CommandFlags.None); - wrapper.SortAsync("key", 123, 456, Order.Descending, SortType.Alphabetic, "by", get, CommandFlags.None); - - mock.Verify(_ => _.SortAsync("prefix:key", 123, 456, Order.Descending, SortType.Alphabetic, "nosort", It.Is(valid), CommandFlags.None)); - mock.Verify(_ => _.SortAsync("prefix:key", 123, 456, Order.Descending, SortType.Alphabetic, "prefix:by", It.Is(valid), CommandFlags.None)); - } - - [Fact] - public void SortedSetAddAsync_1() - { - wrapper.SortedSetAddAsync("key", "member", 1.23, When.Exists, CommandFlags.None); - mock.Verify(_ => _.SortedSetAddAsync("prefix:key", "member", 1.23, When.Exists, CommandFlags.None)); - } - - [Fact] - public void SortedSetAddAsync_2() - { - SortedSetEntry[] values = new SortedSetEntry[0]; - wrapper.SortedSetAddAsync("key", values, When.Exists, CommandFlags.None); - mock.Verify(_ => _.SortedSetAddAsync("prefix:key", values, When.Exists, CommandFlags.None)); - } - - [Fact] - public void SortedSetCombineAndStoreAsync_1() - { - wrapper.SortedSetCombineAndStoreAsync(SetOperation.Intersect, "destination", "first", "second", Aggregate.Max, CommandFlags.None); - mock.Verify(_ => _.SortedSetCombineAndStoreAsync(SetOperation.Intersect, "prefix:destination", "prefix:first", "prefix:second", Aggregate.Max, CommandFlags.None)); - } - - [Fact] - public void SortedSetCombineAndStoreAsync_2() - { - RedisKey[] keys = new RedisKey[] { "a", "b" }; - Expression> valid = _ => _.Length == 2 && _[0] == "prefix:a" && _[1] == "prefix:b"; - wrapper.SetCombineAndStoreAsync(SetOperation.Intersect, "destination", keys, CommandFlags.None); - mock.Verify(_ => _.SetCombineAndStoreAsync(SetOperation.Intersect, "prefix:destination", It.Is(valid), CommandFlags.None)); - } - - [Fact] - public void SortedSetDecrementAsync() - { - wrapper.SortedSetDecrementAsync("key", "member", 1.23, CommandFlags.None); - mock.Verify(_ => _.SortedSetDecrementAsync("prefix:key", "member", 1.23, CommandFlags.None)); - } - - [Fact] - public void SortedSetIncrementAsync() - { - wrapper.SortedSetIncrementAsync("key", "member", 1.23, CommandFlags.None); - mock.Verify(_ => _.SortedSetIncrementAsync("prefix:key", "member", 1.23, CommandFlags.None)); - } - - [Fact] - public void SortedSetLengthAsync() - { - wrapper.SortedSetLengthAsync("key", 1.23, 1.23, Exclude.Start, CommandFlags.None); - mock.Verify(_ => _.SortedSetLengthAsync("prefix:key", 1.23, 1.23, Exclude.Start, CommandFlags.None)); - } - - [Fact] - public void SortedSetLengthByValueAsync() - { - wrapper.SortedSetLengthByValueAsync("key", "min", "max", Exclude.Start, CommandFlags.None); - mock.Verify(_ => _.SortedSetLengthByValueAsync("prefix:key", "min", "max", Exclude.Start, CommandFlags.None)); - } - - [Fact] - public void SortedSetRangeByRankAsync() - { - wrapper.SortedSetRangeByRankAsync("key", 123, 456, Order.Descending, CommandFlags.None); - mock.Verify(_ => _.SortedSetRangeByRankAsync("prefix:key", 123, 456, Order.Descending, CommandFlags.None)); - } - - [Fact] - public void SortedSetRangeByRankWithScoresAsync() - { - wrapper.SortedSetRangeByRankWithScoresAsync("key", 123, 456, Order.Descending, CommandFlags.None); - mock.Verify(_ => _.SortedSetRangeByRankWithScoresAsync("prefix:key", 123, 456, Order.Descending, CommandFlags.None)); - } - - [Fact] - public void SortedSetRangeByScoreAsync() - { - wrapper.SortedSetRangeByScoreAsync("key", 1.23, 1.23, Exclude.Start, Order.Descending, 123, 456, CommandFlags.None); - mock.Verify(_ => _.SortedSetRangeByScoreAsync("prefix:key", 1.23, 1.23, Exclude.Start, Order.Descending, 123, 456, CommandFlags.None)); - } - - [Fact] - public void SortedSetRangeByScoreWithScoresAsync() - { - wrapper.SortedSetRangeByScoreWithScoresAsync("key", 1.23, 1.23, Exclude.Start, Order.Descending, 123, 456, CommandFlags.None); - mock.Verify(_ => _.SortedSetRangeByScoreWithScoresAsync("prefix:key", 1.23, 1.23, Exclude.Start, Order.Descending, 123, 456, CommandFlags.None)); - } - - [Fact] - public void SortedSetRangeByValueAsync() - { - wrapper.SortedSetRangeByValueAsync("key", "min", "max", Exclude.Start, 123, 456, CommandFlags.None); - mock.Verify(_ => _.SortedSetRangeByValueAsync("prefix:key", "min", "max", Exclude.Start, Order.Ascending, 123, 456, CommandFlags.None)); - } - - [Fact] - public void SortedSetRangeByValueDescAsync() - { - wrapper.SortedSetRangeByValueAsync("key", "min", "max", Exclude.Start, Order.Descending, 123, 456, CommandFlags.None); - mock.Verify(_ => _.SortedSetRangeByValueAsync("prefix:key", "min", "max", Exclude.Start, Order.Descending, 123, 456, CommandFlags.None)); - } - - [Fact] - public void SortedSetRankAsync() - { - wrapper.SortedSetRankAsync("key", "member", Order.Descending, CommandFlags.None); - mock.Verify(_ => _.SortedSetRankAsync("prefix:key", "member", Order.Descending, CommandFlags.None)); - } - - [Fact] - public void SortedSetRemoveAsync_1() - { - wrapper.SortedSetRemoveAsync("key", "member", CommandFlags.None); - mock.Verify(_ => _.SortedSetRemoveAsync("prefix:key", "member", CommandFlags.None)); - } - - [Fact] - public void SortedSetRemoveAsync_2() - { - RedisValue[] members = new RedisValue[0]; - wrapper.SortedSetRemoveAsync("key", members, CommandFlags.None); - mock.Verify(_ => _.SortedSetRemoveAsync("prefix:key", members, CommandFlags.None)); - } - - [Fact] - public void SortedSetRemoveRangeByRankAsync() - { - wrapper.SortedSetRemoveRangeByRankAsync("key", 123, 456, CommandFlags.None); - mock.Verify(_ => _.SortedSetRemoveRangeByRankAsync("prefix:key", 123, 456, CommandFlags.None)); - } - - [Fact] - public void SortedSetRemoveRangeByScoreAsync() - { - wrapper.SortedSetRemoveRangeByScoreAsync("key", 1.23, 1.23, Exclude.Start, CommandFlags.None); - mock.Verify(_ => _.SortedSetRemoveRangeByScoreAsync("prefix:key", 1.23, 1.23, Exclude.Start, CommandFlags.None)); - } - - [Fact] - public void SortedSetRemoveRangeByValueAsync() - { - wrapper.SortedSetRemoveRangeByValueAsync("key", "min", "max", Exclude.Start, CommandFlags.None); - mock.Verify(_ => _.SortedSetRemoveRangeByValueAsync("prefix:key", "min", "max", Exclude.Start, CommandFlags.None)); - } - - [Fact] - public void SortedSetScoreAsync() - { - wrapper.SortedSetScoreAsync("key", "member", CommandFlags.None); - mock.Verify(_ => _.SortedSetScoreAsync("prefix:key", "member", CommandFlags.None)); - } - - [Fact] - public void StreamAcknowledgeAsync_1() - { - wrapper.StreamAcknowledgeAsync("key", "group", "0-0", CommandFlags.None); - mock.Verify(_ => _.StreamAcknowledgeAsync("prefix:key", "group", "0-0", CommandFlags.None)); - } - - [Fact] - public void StreamAcknowledgeAsync_2() - { - var messageIds = new RedisValue[] { "0-0", "0-1", "0-2" }; - wrapper.StreamAcknowledgeAsync("key", "group", messageIds, CommandFlags.None); - mock.Verify(_ => _.StreamAcknowledgeAsync("prefix:key", "group", messageIds, CommandFlags.None)); - } - - [Fact] - public void StreamAddAsync_1() - { - wrapper.StreamAddAsync("key", "field1", "value1", "*", 1000, true, CommandFlags.None); - mock.Verify(_ => _.StreamAddAsync("prefix:key", "field1", "value1", "*", 1000, true, CommandFlags.None)); - } - - [Fact] - public void StreamAddAsync_2() - { - var fields = new NameValueEntry[0]; - wrapper.StreamAddAsync("key", fields, "*", 1000, true, CommandFlags.None); - mock.Verify(_ => _.StreamAddAsync("prefix:key", fields, "*", 1000, true, CommandFlags.None)); - } - - [Fact] - public void StreamClaimMessagesAsync() - { - var messageIds = new RedisValue[0]; - wrapper.StreamClaimAsync("key", "group", "consumer", 1000, messageIds, CommandFlags.None); - mock.Verify(_ => _.StreamClaimAsync("prefix:key", "group", "consumer", 1000, messageIds, CommandFlags.None)); - } - - [Fact] - public void StreamClaimMessagesReturningIdsAsync() - { - var messageIds = new RedisValue[0]; - wrapper.StreamClaimIdsOnlyAsync("key", "group", "consumer", 1000, messageIds, CommandFlags.None); - mock.Verify(_ => _.StreamClaimIdsOnlyAsync("prefix:key", "group", "consumer", 1000, messageIds, CommandFlags.None)); - } - - [Fact] - public void StreamConsumerInfoGetAsync() - { - wrapper.StreamConsumerInfoAsync("key", "group", CommandFlags.None); - mock.Verify(_ => _.StreamConsumerInfoAsync("prefix:key", "group", CommandFlags.None)); - } - - [Fact] - public void StreamConsumerGroupSetPositionAsync() - { - wrapper.StreamConsumerGroupSetPositionAsync("key", "group", StreamPosition.Beginning, CommandFlags.None); - mock.Verify(_ => _.StreamConsumerGroupSetPositionAsync("prefix:key", "group", StreamPosition.Beginning, CommandFlags.None)); - } - - [Fact] - public void StreamCreateConsumerGroupAsync() - { - wrapper.StreamCreateConsumerGroupAsync("key", "group", "0-0", false, CommandFlags.None); - mock.Verify(_ => _.StreamCreateConsumerGroupAsync("prefix:key", "group", "0-0", false, CommandFlags.None)); - } - - [Fact] - public void StreamGroupInfoGetAsync() - { - wrapper.StreamGroupInfoAsync("key", CommandFlags.None); - mock.Verify(_ => _.StreamGroupInfoAsync("prefix:key", CommandFlags.None)); - } - - [Fact] - public void StreamInfoGetAsync() - { - wrapper.StreamInfoAsync("key", CommandFlags.None); - mock.Verify(_ => _.StreamInfoAsync("prefix:key", CommandFlags.None)); - } - - [Fact] - public void StreamLengthAsync() - { - wrapper.StreamLengthAsync("key", CommandFlags.None); - mock.Verify(_ => _.StreamLengthAsync("prefix:key", CommandFlags.None)); - } - - [Fact] - public void StreamMessagesDeleteAsync() - { - var messageIds = new RedisValue[] { }; - wrapper.StreamDeleteAsync("key", messageIds, CommandFlags.None); - mock.Verify(_ => _.StreamDeleteAsync("prefix:key", messageIds, CommandFlags.None)); - } - - [Fact] - public void StreamDeleteConsumerAsync() - { - wrapper.StreamDeleteConsumerAsync("key", "group", "consumer", CommandFlags.None); - mock.Verify(_ => _.StreamDeleteConsumerAsync("prefix:key", "group", "consumer", CommandFlags.None)); - } - - [Fact] - public void StreamDeleteConsumerGroupAsync() - { - wrapper.StreamDeleteConsumerGroupAsync("key", "group", CommandFlags.None); - mock.Verify(_ => _.StreamDeleteConsumerGroupAsync("prefix:key", "group", CommandFlags.None)); - } - - [Fact] - public void StreamPendingInfoGetAsync() - { - wrapper.StreamPendingAsync("key", "group", CommandFlags.None); - mock.Verify(_ => _.StreamPendingAsync("prefix:key", "group", CommandFlags.None)); - } - - [Fact] - public void StreamPendingMessageInfoGetAsync() - { - wrapper.StreamPendingMessagesAsync("key", "group", 10, RedisValue.Null, "-", "+", CommandFlags.None); - mock.Verify(_ => _.StreamPendingMessagesAsync("prefix:key", "group", 10, RedisValue.Null, "-", "+", CommandFlags.None)); - } - - [Fact] - public void StreamRangeAsync() - { - wrapper.StreamRangeAsync("key", "-", "+", null, Order.Ascending, CommandFlags.None); - mock.Verify(_ => _.StreamRangeAsync("prefix:key", "-", "+", null, Order.Ascending, CommandFlags.None)); - } - - [Fact] - public void StreamReadAsync_1() - { - var streamPositions = new StreamPosition[] { }; - wrapper.StreamReadAsync(streamPositions, null, CommandFlags.None); - mock.Verify(_ => _.StreamReadAsync(streamPositions, null, CommandFlags.None)); - } - - [Fact] - public void StreamReadAsync_2() - { - wrapper.StreamReadAsync("key", "0-0", null, CommandFlags.None); - mock.Verify(_ => _.StreamReadAsync("prefix:key", "0-0", null, CommandFlags.None)); - } - - [Fact] - public void StreamReadGroupAsync_1() - { - wrapper.StreamReadGroupAsync("key", "group", "consumer", StreamPosition.Beginning, 10, false, CommandFlags.None); - mock.Verify(_ => _.StreamReadGroupAsync("prefix:key", "group", "consumer", StreamPosition.Beginning, 10, false, CommandFlags.None)); - } - - [Fact] - public void StreamStreamReadGroupAsync_2() - { - var streamPositions = new StreamPosition[] { }; - wrapper.StreamReadGroupAsync(streamPositions, "group", "consumer", 10, false, CommandFlags.None); - mock.Verify(_ => _.StreamReadGroupAsync(streamPositions, "group", "consumer", 10, false, CommandFlags.None)); - } - - [Fact] - public void StreamTrimAsync() - { - wrapper.StreamTrimAsync("key", 1000, true, CommandFlags.None); - mock.Verify(_ => _.StreamTrimAsync("prefix:key", 1000, true, CommandFlags.None)); - } - - [Fact] - public void StringAppendAsync() - { - wrapper.StringAppendAsync("key", "value", CommandFlags.None); - mock.Verify(_ => _.StringAppendAsync("prefix:key", "value", CommandFlags.None)); - } - - [Fact] - public void StringBitCountAsync() - { - wrapper.StringBitCountAsync("key", 123, 456, CommandFlags.None); - mock.Verify(_ => _.StringBitCountAsync("prefix:key", 123, 456, CommandFlags.None)); - } - - [Fact] - public void StringBitOperationAsync_1() - { - wrapper.StringBitOperationAsync(Bitwise.Xor, "destination", "first", "second", CommandFlags.None); - mock.Verify(_ => _.StringBitOperationAsync(Bitwise.Xor, "prefix:destination", "prefix:first", "prefix:second", CommandFlags.None)); - } - - [Fact] - public void StringBitOperationAsync_2() - { - RedisKey[] keys = new RedisKey[] { "a", "b" }; - Expression> valid = _ => _.Length == 2 && _[0] == "prefix:a" && _[1] == "prefix:b"; - wrapper.StringBitOperationAsync(Bitwise.Xor, "destination", keys, CommandFlags.None); - mock.Verify(_ => _.StringBitOperationAsync(Bitwise.Xor, "prefix:destination", It.Is(valid), CommandFlags.None)); - } - - [Fact] - public void StringBitPositionAsync() - { - wrapper.StringBitPositionAsync("key", true, 123, 456, CommandFlags.None); - mock.Verify(_ => _.StringBitPositionAsync("prefix:key", true, 123, 456, CommandFlags.None)); - } - - [Fact] - public void StringDecrementAsync_1() - { - wrapper.StringDecrementAsync("key", 123, CommandFlags.None); - mock.Verify(_ => _.StringDecrementAsync("prefix:key", 123, CommandFlags.None)); - } - - [Fact] - public void StringDecrementAsync_2() - { - wrapper.StringDecrementAsync("key", 1.23, CommandFlags.None); - mock.Verify(_ => _.StringDecrementAsync("prefix:key", 1.23, CommandFlags.None)); - } - - [Fact] - public void StringGetAsync_1() - { - wrapper.StringGetAsync("key", CommandFlags.None); - mock.Verify(_ => _.StringGetAsync("prefix:key", CommandFlags.None)); - } - - [Fact] - public void StringGetAsync_2() - { - RedisKey[] keys = new RedisKey[] { "a", "b" }; - Expression> valid = _ => _.Length == 2 && _[0] == "prefix:a" && _[1] == "prefix:b"; - wrapper.StringGetAsync(keys, CommandFlags.None); - mock.Verify(_ => _.StringGetAsync(It.Is(valid), CommandFlags.None)); - } - - [Fact] - public void StringGetBitAsync() - { - wrapper.StringGetBitAsync("key", 123, CommandFlags.None); - mock.Verify(_ => _.StringGetBitAsync("prefix:key", 123, CommandFlags.None)); - } - - [Fact] - public void StringGetRangeAsync() - { - wrapper.StringGetRangeAsync("key", 123, 456, CommandFlags.None); - mock.Verify(_ => _.StringGetRangeAsync("prefix:key", 123, 456, CommandFlags.None)); - } - - [Fact] - public void StringGetSetAsync() - { - wrapper.StringGetSetAsync("key", "value", CommandFlags.None); - mock.Verify(_ => _.StringGetSetAsync("prefix:key", "value", CommandFlags.None)); - } - - [Fact] - public void StringGetWithExpiryAsync() - { - wrapper.StringGetWithExpiryAsync("key", CommandFlags.None); - mock.Verify(_ => _.StringGetWithExpiryAsync("prefix:key", CommandFlags.None)); - } - - [Fact] - public void StringIncrementAsync_1() - { - wrapper.StringIncrementAsync("key", 123, CommandFlags.None); - mock.Verify(_ => _.StringIncrementAsync("prefix:key", 123, CommandFlags.None)); - } - - [Fact] - public void StringIncrementAsync_2() - { - wrapper.StringIncrementAsync("key", 1.23, CommandFlags.None); - mock.Verify(_ => _.StringIncrementAsync("prefix:key", 1.23, CommandFlags.None)); - } - - [Fact] - public void StringLengthAsync() - { - wrapper.StringLengthAsync("key", CommandFlags.None); - mock.Verify(_ => _.StringLengthAsync("prefix:key", CommandFlags.None)); - } - - [Fact] - public void StringSetAsync_1() - { - TimeSpan expiry = TimeSpan.FromSeconds(123); - wrapper.StringSetAsync("key", "value", expiry, When.Exists, CommandFlags.None); - mock.Verify(_ => _.StringSetAsync("prefix:key", "value", expiry, When.Exists, CommandFlags.None)); - } - - [Fact] - public void StringSetAsync_2() - { - KeyValuePair[] values = new KeyValuePair[] { new KeyValuePair("a", "x"), new KeyValuePair("b", "y") }; - Expression[], bool>> valid = _ => _.Length == 2 && _[0].Key == "prefix:a" && _[0].Value == "x" && _[1].Key == "prefix:b" && _[1].Value == "y"; - wrapper.StringSetAsync(values, When.Exists, CommandFlags.None); - mock.Verify(_ => _.StringSetAsync(It.Is(valid), When.Exists, CommandFlags.None)); - } - - [Fact] - public void StringSetBitAsync() - { - wrapper.StringSetBitAsync("key", 123, true, CommandFlags.None); - mock.Verify(_ => _.StringSetBitAsync("prefix:key", 123, true, CommandFlags.None)); - } - - [Fact] - public void StringSetRangeAsync() - { - wrapper.StringSetRangeAsync("key", 123, "value", CommandFlags.None); - mock.Verify(_ => _.StringSetRangeAsync("prefix:key", 123, "value", CommandFlags.None)); - } - - [Fact] - public void KeyTouchAsync_1() - { - wrapper.KeyTouchAsync("key", CommandFlags.None); - mock.Verify(_ => _.KeyTouchAsync("prefix:key", CommandFlags.None)); - } - - [Fact] - public void KeyTouchAsync_2() - { - RedisKey[] keys = new RedisKey[] { "a", "b" }; - Expression> valid = _ => _.Length == 2 && _[0] == "prefix:a" && _[1] == "prefix:b"; - wrapper.KeyTouchAsync(keys, CommandFlags.None); - mock.Verify(_ => _.KeyTouchAsync(It.Is(valid), CommandFlags.None)); - } -#pragma warning restore RCS1047 // Non-asynchronous method name should not end with 'Async'. - } -} diff --git a/tests/StackExchange.Redis.Tests/xunit.runner.json b/tests/StackExchange.Redis.Tests/xunit.runner.json index 65a35fb2f..dc36b1875 100644 --- a/tests/StackExchange.Redis.Tests/xunit.runner.json +++ b/tests/StackExchange.Redis.Tests/xunit.runner.json @@ -1,6 +1,8 @@ { "methodDisplay": "classAndMethod", - "maxParallelThreads": 8, + "parallelizeAssembly": true, + "maxParallelThreads": "2x", + "parallelizeTestCollections": true, "diagnosticMessages": false, "longRunningTestSeconds": 60 } \ No newline at end of file diff --git a/toys/KestrelRedisServer/KestrelRedisServer.csproj b/toys/KestrelRedisServer/KestrelRedisServer.csproj index 911116f68..f7955c42e 100644 --- a/toys/KestrelRedisServer/KestrelRedisServer.csproj +++ b/toys/KestrelRedisServer/KestrelRedisServer.csproj @@ -1,13 +1,13 @@  - netcoreapp2.1 + net10.0 $(NoWarn);CS1591 + enable + enable - - diff --git a/toys/KestrelRedisServer/Program.cs b/toys/KestrelRedisServer/Program.cs index c022f9480..fb77c2f14 100644 --- a/toys/KestrelRedisServer/Program.cs +++ b/toys/KestrelRedisServer/Program.cs @@ -1,27 +1,67 @@ -using Microsoft.AspNetCore; +using System.Net; +using KestrelRedisServer; using Microsoft.AspNetCore.Connections; -using Microsoft.AspNetCore.Hosting; -using Microsoft.AspNetCore.Server.Kestrel.Transport.Abstractions.Internal; +using StackExchange.Redis; +using StackExchange.Redis.Server; -namespace KestrelRedisServer +var server = new MemoryCacheRedisServer { - public static class Program + // note: we don't support many v6 features, but some clients + // want this before they'll try RESP3 + RedisVersion = new(6, 0), + // Password = "letmein", +}; + +/* +// demonstrate cluster spoofing +server.ServerType = ServerType.Cluster; +var ep = server.AddEmptyNode(); +server.Migrate("key", ep); +*/ + +var builder = WebApplication.CreateBuilder(args); +builder.Services.AddSingleton(server); +builder.WebHost.ConfigureKestrel(options => +{ + // HTTP 5000 (test/debug API only) + options.ListenLocalhost(5000); + + // this is the core of using Kestrel to create a TCP server + // TCP 6379 + Action builder = builder => builder.UseConnectionHandler(); + foreach (var ep in server.GetEndPoints()) { - public static void Main(string[] args) + if (ep is IPEndPoint ip && ip.Address.Equals(IPAddress.Loopback)) { - CreateWebHostBuilder(args).Build().Run(); + options.ListenLocalhost(ip.Port, builder); + } + else + { + options.Listen(ep, builder); } - - public static IWebHostBuilder CreateWebHostBuilder(string[] args) => - WebHost.CreateDefaultBuilder(args) - .UseKestrel(options => - { - options.ApplicationSchedulingMode = SchedulingMode.Inline; - // HTTP 5000 - options.ListenLocalhost(5000); - - // TCP 6379 - options.ListenLocalhost(6379, builder => builder.UseConnectionHandler()); - }).UseStartup(); } -} +}); + +var app = builder.Build(); + +// redis-specific hack - there is a redis command to shutdown the server +_ = server.Shutdown.ContinueWith( + static (t, s) => + { + try + { + // if the resp server is shutdown by a client: stop the kestrel server too + if (t.Result == RespServer.ShutdownReason.ClientInitiated) + { + ((IServiceProvider)s!).GetService()?.StopApplication(); + } + } + catch { /* Don't go boom on shutdown */ } + }, + app.Services); + +// add debug route +app.Run(context => context.Response.WriteAsync(server.GetStats())); + +// run the server +await app.RunAsync(); diff --git a/toys/KestrelRedisServer/RedisConnectionHandler.cs b/toys/KestrelRedisServer/RedisConnectionHandler.cs index a447440d9..415da54b9 100644 --- a/toys/KestrelRedisServer/RedisConnectionHandler.cs +++ b/toys/KestrelRedisServer/RedisConnectionHandler.cs @@ -1,15 +1,32 @@ -using System.IO; -using System.Threading.Tasks; +using System.Diagnostics; using Microsoft.AspNetCore.Connections; using StackExchange.Redis.Server; namespace KestrelRedisServer { - public class RedisConnectionHandler : ConnectionHandler + public class RedisConnectionHandler(RedisServer server) : ConnectionHandler { - private readonly RespServer _server; - public RedisConnectionHandler(RespServer server) => _server = server; public override Task OnConnectedAsync(ConnectionContext connection) - => _server.RunClientAsync(connection.Transport); + { + RedisServer.Node? node; + if (!(connection.LocalEndPoint is { } ep && server.TryGetNode(ep, out node))) + { + node = null; + } + + return server.RunClientAsync(connection.Transport, node: node) + .ContinueWith( + t => + { + // ensure any exceptions are observed + var ex = t.Exception; + if (ex != null) + { + Debug.WriteLine(ex.Message); + GC.KeepAlive(ex); + } + }, + TaskContinuationOptions.OnlyOnFaulted); + } } } diff --git a/toys/KestrelRedisServer/Startup.cs b/toys/KestrelRedisServer/Startup.cs deleted file mode 100644 index ab3a52382..000000000 --- a/toys/KestrelRedisServer/Startup.cs +++ /dev/null @@ -1,40 +0,0 @@ -using System; -using Microsoft.AspNetCore.Builder; -using Microsoft.AspNetCore.Hosting; -using Microsoft.AspNetCore.Http; -using Microsoft.Extensions.DependencyInjection; -using StackExchange.Redis.Server; - -namespace KestrelRedisServer -{ - public class Startup : IDisposable - { - private readonly RespServer _server = new MemoryCacheRedisServer(); - - // This method gets called by the runtime. Use this method to add services to the container. - // For more information on how to configure your application, visit https://go.microsoft.com/fwlink/?LinkID=398940 - public void ConfigureServices(IServiceCollection services) - => services.Add(new ServiceDescriptor(typeof(RespServer), _server)); - - public void Dispose() => _server.Dispose(); - - // This method gets called by the runtime. Use this method to configure the HTTP request pipeline. - public void Configure(IApplicationBuilder app, IHostingEnvironment env, IApplicationLifetime lifetime) - { - _server.Shutdown.ContinueWith((t, s) => - { - try - { // if the resp server is shutdown by a client: stop the kestrel server too - if (t.Result == RespServer.ShutdownReason.ClientInitiated) - { - ((IApplicationLifetime)s).StopApplication(); - } - } - catch { /* Don't go boom on shutdown */ } - }, lifetime); - - if (env.IsDevelopment()) app.UseDeveloperExceptionPage(); - app.Run(context => context.Response.WriteAsync(_server.GetStats())); - } - } -} diff --git a/toys/StackExchange.Redis.Server/GlobalSuppressions.cs b/toys/StackExchange.Redis.Server/GlobalSuppressions.cs new file mode 100644 index 000000000..8784fa37f --- /dev/null +++ b/toys/StackExchange.Redis.Server/GlobalSuppressions.cs @@ -0,0 +1,8 @@ +// This file is used by Code Analysis to maintain SuppressMessage +// attributes that are applied to this project. +// Project-level suppressions either have no target or are given +// a specific target and scoped to a namespace, type, member, etc. + +using System.Diagnostics.CodeAnalysis; + +[assembly: SuppressMessage("Style", "IDE0066:Convert switch statement to expression", Justification = "Pending", Scope = "member", Target = "~M:StackExchange.Redis.TypedRedisValue.ToString~System.String")] diff --git a/toys/StackExchange.Redis.Server/GlobalUsings.cs b/toys/StackExchange.Redis.Server/GlobalUsings.cs new file mode 100644 index 000000000..aa3ae0946 --- /dev/null +++ b/toys/StackExchange.Redis.Server/GlobalUsings.cs @@ -0,0 +1,22 @@ +extern alias seredis; +global using Format = seredis::StackExchange.Redis.Format; +global using PhysicalConnection = seredis::StackExchange.Redis.PhysicalConnection; +/* +During the v2/v3 transition, SE.Redis doesn't have RESPite, which +means it needs to merge in a few types like AsciiHash; this causes +conflicts; this file is a place to resolve them. Since the server +is now *mostly* RESPite, it turns out that the most efficient way +to do this is to shunt all of SE.Redis off into an alias, and bring +back just the types we need. +*/ +global using RedisChannel = seredis::StackExchange.Redis.RedisChannel; +global using RedisCommand = seredis::StackExchange.Redis.RedisCommand; +global using RedisCommandMetadata = seredis::StackExchange.Redis.RedisCommandMetadata; +global using RedisKey = seredis::StackExchange.Redis.RedisKey; +global using RedisProtocol = seredis::StackExchange.Redis.RedisProtocol; +global using RedisValue = seredis::StackExchange.Redis.RedisValue; +global using ResultType = seredis::StackExchange.Redis.ResultType; +global using ServerSelectionStrategy = seredis::StackExchange.Redis.ServerSelectionStrategy; +global using ServerType = seredis::StackExchange.Redis.ServerType; +global using SlotRange = seredis::StackExchange.Redis.SlotRange; +global using TaskSource = seredis::StackExchange.Redis.TaskSource; diff --git a/toys/StackExchange.Redis.Server/MemoryCacheRedisServer.cs b/toys/StackExchange.Redis.Server/MemoryCacheRedisServer.cs index ab56c3792..e9bcb5a5f 100644 --- a/toys/StackExchange.Redis.Server/MemoryCacheRedisServer.cs +++ b/toys/StackExchange.Redis.Server/MemoryCacheRedisServer.cs @@ -2,6 +2,7 @@ using System.Collections.Generic; using System.IO; using System.Linq; +using System.Net; using System.Runtime.Caching; using System.Runtime.CompilerServices; @@ -9,109 +10,206 @@ namespace StackExchange.Redis.Server { public class MemoryCacheRedisServer : RedisServer { - public MemoryCacheRedisServer(TextWriter output = null) : base(1, output) + public MemoryCacheRedisServer(EndPoint endpoint = null, TextWriter output = null) : base(endpoint, 1, output) => CreateNewCache(); - private MemoryCache _cache; + private MemoryCache _cache2; private void CreateNewCache() { - var old = _cache; - _cache = new MemoryCache(GetType().Name); - if (old != null) old.Dispose(); + var old = _cache2; + _cache2 = new MemoryCache(GetType().Name); + old?.Dispose(); } protected override void Dispose(bool disposing) { - if (disposing) _cache.Dispose(); + if (disposing) _cache2.Dispose(); base.Dispose(disposing); } - protected override long Dbsize(int database) => _cache.GetCount(); - protected override RedisValue Get(int database, RedisKey key) - => RedisValue.Unbox(_cache[key]); - protected override void Set(int database, RedisKey key, RedisValue value) - => _cache[key] = value.Box(); - protected override bool Del(int database, RedisKey key) - => _cache.Remove(key) != null; + protected override long Dbsize(int database) => _cache2.GetCount(); + + private readonly struct ExpiringValue(object value, DateTime absoluteExpiration) + { + public readonly object Value = value; + public readonly DateTime AbsoluteExpiration = absoluteExpiration; + } + + private enum ExpectedType + { + Any = 0, + Stack, + Set, + List, + } + private object Get(in RedisKey key, ExpectedType expectedType) + { + var val = _cache2[key]; + switch (val) + { + case null: + return null; + case ExpiringValue ev: + if (ev.AbsoluteExpiration <= Time()) + { + _cache2.Remove(key); + return null; + } + return Validate(ev.Value, expectedType); + default: + return Validate(val, expectedType); + } + static object Validate(object value, ExpectedType expectedType) + { + return value switch + { + null => value, + HashSet set when expectedType is ExpectedType.Set or ExpectedType.Any => value, + HashSet => Throw(), + Stack stack when expectedType is ExpectedType.List or ExpectedType.Any => value, + Stack => Throw(), + _ when expectedType is ExpectedType.Stack or ExpectedType.Any => value, + _ => Throw(), + }; + + static object Throw() => throw new WrongTypeException(); + } + } + protected override TimeSpan? Ttl(int database, in RedisKey key) + { + var val = _cache2[key]; + switch (val) + { + case null: + return null; + case ExpiringValue ev: + var delta = ev.AbsoluteExpiration - Time(); + if (delta <= TimeSpan.Zero) + { + _cache2.Remove(key); + return null; + } + return delta; + default: + return TimeSpan.MaxValue; + } + } + + protected override bool Expire(int database, in RedisKey key, TimeSpan timeout) + { + if (timeout <= TimeSpan.Zero) return Del(database, key); + var val = Get(key, ExpectedType.Any); + if (val is not null) + { + _cache2[key] = new ExpiringValue(val, Time() + timeout); + return true; + } + + return false; + } + + protected override RedisValue Get(int database, in RedisKey key) + { + var val = Get(key, ExpectedType.Stack); + return RedisValue.Unbox(val); + } + + protected override void Set(int database, in RedisKey key, in RedisValue value) + => _cache2[key] = value.Box(); + + protected override void SetEx(int database, in RedisKey key, TimeSpan expiration, in RedisValue value) + { + var now = Time(); + var absolute = now + expiration; + if (absolute <= now) _cache2.Remove(key); + else _cache2[key] = new ExpiringValue(value.Box(), absolute); + } + + protected override bool Del(int database, in RedisKey key) + => _cache2.Remove(key) != null; protected override void Flushdb(int database) => CreateNewCache(); - protected override bool Exists(int database, RedisKey key) - => _cache.Contains(key); + protected override bool Exists(int database, in RedisKey key) + { + var val = Get(key, ExpectedType.Any); + return val != null && !(val is ExpiringValue ev && ev.AbsoluteExpiration <= Time()); + } - protected override IEnumerable Keys(int database, RedisKey pattern) + protected override IEnumerable Keys(int database, in RedisKey pattern) => GetKeysCore(pattern); + private IEnumerable GetKeysCore(RedisKey pattern) { - string s = pattern; - foreach (var pair in _cache) + foreach (var pair in _cache2) { + if (pair.Value is ExpiringValue ev && ev.AbsoluteExpiration <= Time()) continue; if (IsMatch(pattern, pair.Key)) yield return pair.Key; } } - protected override bool Sadd(int database, RedisKey key, RedisValue value) + protected override bool Sadd(int database, in RedisKey key, in RedisValue value) => GetSet(key, true).Add(value); - protected override bool Sismember(int database, RedisKey key, RedisValue value) + protected override bool Sismember(int database, in RedisKey key, in RedisValue value) => GetSet(key, false)?.Contains(value) ?? false; - protected override bool Srem(int database, RedisKey key, RedisValue value) + protected override bool Srem(int database, in RedisKey key, in RedisValue value) { var set = GetSet(key, false); if (set != null && set.Remove(value)) { - if (set.Count == 0) _cache.Remove(key); + if (set.Count == 0) _cache2.Remove(key); return true; } return false; } - protected override long Scard(int database, RedisKey key) + protected override long Scard(int database, in RedisKey key) => GetSet(key, false)?.Count ?? 0; private HashSet GetSet(RedisKey key, bool create) { - var set = (HashSet)_cache[key]; + var set = (HashSet)Get(key, ExpectedType.Set); if (set == null && create) { set = new HashSet(); - _cache[key] = set; + _cache2[key] = set; } return set; } - protected override RedisValue Spop(int database, RedisKey key) + protected override RedisValue Spop(int database, in RedisKey key) { var set = GetSet(key, false); if (set == null) return RedisValue.Null; var result = set.First(); set.Remove(result); - if (set.Count == 0) _cache.Remove(key); + if (set.Count == 0) _cache2.Remove(key); return result; } - protected override long Lpush(int database, RedisKey key, RedisValue value) + protected override long Lpush(int database, in RedisKey key, in RedisValue value) { var stack = GetStack(key, true); stack.Push(value); return stack.Count; } - protected override RedisValue Lpop(int database, RedisKey key) + protected override RedisValue Lpop(int database, in RedisKey key) { var stack = GetStack(key, false); if (stack == null) return RedisValue.Null; var val = stack.Pop(); - if(stack.Count == 0) _cache.Remove(key); + if (stack.Count == 0) _cache2.Remove(key); return val; } - protected override long Llen(int database, RedisKey key) + protected override long Llen(int database, in RedisKey key) => GetStack(key, false)?.Count ?? 0; [MethodImpl(MethodImplOptions.NoInlining)] private static void ThrowArgumentOutOfRangeException() => throw new ArgumentOutOfRangeException(); - protected override void LRange(int database, RedisKey key, long start, Span arr) + protected override void LRange(int database, in RedisKey key, long start, Span arr) { var stack = GetStack(key, false); @@ -129,13 +227,13 @@ protected override void LRange(int database, RedisKey key, long start, Span GetStack(RedisKey key, bool create) + private Stack GetStack(in RedisKey key, bool create) { - var stack = (Stack)_cache[key]; + var stack = (Stack)Get(key, ExpectedType.Stack); if (stack == null && create) { stack = new Stack(); - _cache[key] = stack; + _cache2[key] = stack; } return stack; } diff --git a/toys/StackExchange.Redis.Server/RedisClient.Output.cs b/toys/StackExchange.Redis.Server/RedisClient.Output.cs new file mode 100644 index 000000000..e27d693be --- /dev/null +++ b/toys/StackExchange.Redis.Server/RedisClient.Output.cs @@ -0,0 +1,262 @@ +using System; +using System.Buffers; +using System.Diagnostics; +using System.IO.Pipelines; +using System.Text; +using System.Threading; +using System.Threading.Channels; +using System.Threading.Tasks; +using RESPite.Messages; + +namespace StackExchange.Redis.Server; + +public partial class RedisClient +{ + private static readonly UnboundedChannelOptions s_replyChannelOptions = new() + { + SingleReader = true, + SingleWriter = false, + AllowSynchronousContinuations = false, + }; + + private readonly struct VersionedResponse(TypedRedisValue value, RedisProtocol protocol) + { + public readonly TypedRedisValue Value = value; + public readonly RedisProtocol Protocol = protocol; + } + + private readonly Channel _replies = Channel.CreateUnbounded(s_replyChannelOptions); + + public void AddOutbound(in TypedRedisValue message) + { + if (message.IsNil) + { + message.Recycle(); + return; + } + + try + { + var versioned = new VersionedResponse(message, Protocol); + if (!_replies.Writer.TryWrite(versioned)) + { + // sorry, we're going to need it, but in reality: we're using + // unbounded channels, so this isn't an issue + _replies.Writer.WriteAsync(versioned).AsTask().Wait(); + } + } + catch + { + message.Recycle(); + } + } + + public ValueTask AddOutboundAsync(in TypedRedisValue message, CancellationToken cancellationToken = default) + { + if (message.IsNil) + { + message.Recycle(); + return default; + } + + try + { + var versioned = new VersionedResponse(message, Protocol); + var pending = _replies.Writer.WriteAsync(versioned, cancellationToken); + if (!pending.IsCompleted) return Awaited(message, pending); + pending.GetAwaiter().GetResult(); + // if we succeed, the writer owns it for recycling + } + catch + { + message.Recycle(); + } + return default; + + static async ValueTask Awaited(TypedRedisValue message, ValueTask pending) + { + try + { + await pending; + // if we succeed, the writer owns it for recycling + } + catch + { + message.Recycle(); + } + } + } + + public void Complete(Exception ex = null) => _replies.Writer.TryComplete(ex); + + public async Task WriteOutputAsync(PipeWriter writer, CancellationToken cancellationToken = default) + { + try + { + var reader = _replies.Reader; + do + { + int count = 0; + while (reader.TryRead(out var versioned)) + { + WriteResponse(writer, versioned.Value, versioned.Protocol); + versioned.Value.Recycle(); + count++; + } + + if (count != 0) + { +#if NET10_0_OR_GREATER + Node?.Server?.OnFlush(this, count, writer.CanGetUnflushedBytes ? writer.UnflushedBytes : -1); +#else + Node?.Server?.OnFlush(this, count, -1); +#endif + await writer.FlushAsync(cancellationToken).ConfigureAwait(false); + } + } + // await more data + while (await reader.WaitToReadAsync(cancellationToken).ConfigureAwait(false)); + await writer.CompleteAsync(); + } + catch (Exception ex) + { + await writer.CompleteAsync(ex); + } + + static void WriteResponse(IBufferWriter output, TypedRedisValue value, RedisProtocol protocol) + { + static void WritePrefix(IBufferWriter output, char prefix) + { + var span = output.GetSpan(1); + span[0] = (byte)prefix; + output.Advance(1); + } + + if (value.IsNil) return; // not actually a request (i.e. empty/whitespace request) + + var type = value.Type; + if (protocol is RedisProtocol.Resp2 & type is not RespPrefix.Null) + { + if (type is RespPrefix.VerbatimString) + { + var s = (string)value.AsRedisValue(); + if (s is { Length: >= 4 } && s[3] == ':') + value = TypedRedisValue.BulkString(s.Substring(4)); + } + type = ToResp2(type); + } + RetryResp2: + if (protocol is RedisProtocol.Resp3 && value.IsNullValueOrArray) + { + output.Write("_\r\n"u8); + } + else + { + char prefix; + switch (type) + { + case RespPrefix.Integer: + PhysicalConnection.WriteInteger(output, (long)value.AsRedisValue()); + break; + case RespPrefix.SimpleError: + prefix = '-'; + goto BasicMessage; + case RespPrefix.SimpleString: + prefix = '+'; + BasicMessage: + WritePrefix(output, prefix); + var val = (string)value.AsRedisValue() ?? ""; + var expectedLength = Encoding.UTF8.GetByteCount(val); + PhysicalConnection.WriteRaw(output, val, expectedLength); + PhysicalConnection.WriteCrlf(output); + break; + case RespPrefix.BulkString: + PhysicalConnection.WriteBulkString(value.AsRedisValue(), output); + break; + case RespPrefix.Null: + case RespPrefix.Push when value.IsNullArray: + case RespPrefix.Map when value.IsNullArray: + case RespPrefix.Set when value.IsNullArray: + case RespPrefix.Attribute when value.IsNullArray: + output.Write("_\r\n"u8); + break; + case RespPrefix.Array when value.IsNullArray: + PhysicalConnection.WriteMultiBulkHeader(output, -1); + break; + case RespPrefix.Push: + case RespPrefix.Map: + case RespPrefix.Array: + case RespPrefix.Set: + case RespPrefix.Attribute: + var segment = value.Span; + PhysicalConnection.WriteMultiBulkHeader(output, segment.Length, ToResultType(type)); + foreach (var item in segment) + { + if (item.IsNil) throw new InvalidOperationException("Array element cannot be nil"); + WriteResponse(output, item, protocol); + } + break; + default: + // retry with RESP2 + var r2 = ToResp2(type); + if (r2 != type) + { + Debug.WriteLine($"{type} not handled in RESP3; using {r2} instead"); + goto RetryResp2; + } + + throw new InvalidOperationException( + "Unexpected result type: " + value.Type); + } + } + + static ResultType ToResultType(RespPrefix type) => + type switch + { + RespPrefix.None => ResultType.None, + RespPrefix.SimpleString => ResultType.SimpleString, + RespPrefix.SimpleError => ResultType.Error, + RespPrefix.Integer => ResultType.Integer, + RespPrefix.BulkString => ResultType.BulkString, + RespPrefix.Array => ResultType.Array, + RespPrefix.Null => ResultType.Null, + RespPrefix.Boolean => ResultType.Boolean, + RespPrefix.Double => ResultType.Double, + RespPrefix.BigInteger => ResultType.BigInteger, + RespPrefix.BulkError => ResultType.BlobError, + RespPrefix.VerbatimString => ResultType.VerbatimString, + RespPrefix.Map => ResultType.Map, + RespPrefix.Set => ResultType.Set, + RespPrefix.Push => ResultType.Push, + RespPrefix.Attribute => ResultType.Attribute, + // StreamContinuation and StreamTerminator don't have direct ResultType equivalents + // These are protocol-level markers, not result types + _ => throw new ArgumentOutOfRangeException(nameof(type), type, "Unexpected RespPrefix value"), + }; + } + } + + public RespPrefix ApplyProtocol(RespPrefix type) => IsResp2 ? ToResp2(type) : type; + + private static RespPrefix ToResp2(RespPrefix type) + { + switch (type) + { + case RespPrefix.Boolean: + return RespPrefix.Integer; + case RespPrefix.Double: + case RespPrefix.BigInteger: + return RespPrefix.SimpleString; + case RespPrefix.BulkError: + return RespPrefix.SimpleError; + case RespPrefix.VerbatimString: + return RespPrefix.BulkString; + case RespPrefix.Map: + case RespPrefix.Set: + case RespPrefix.Push: + case RespPrefix.Attribute: + return RespPrefix.Array; + default: return type; + } + } +} diff --git a/toys/StackExchange.Redis.Server/RedisClient.cs b/toys/StackExchange.Redis.Server/RedisClient.cs index bfe27b042..1f09b3916 100644 --- a/toys/StackExchange.Redis.Server/RedisClient.cs +++ b/toys/StackExchange.Redis.Server/RedisClient.cs @@ -1,40 +1,122 @@ using System; +using System.Buffers; using System.Collections.Generic; using System.IO.Pipelines; +using System.Text; +using RESPite; +using RESPite.Messages; namespace StackExchange.Redis.Server { - public sealed class RedisClient : IDisposable + public partial class RedisClient(RedisServer.Node node) : IDisposable +#pragma warning disable SA1001 + #if NET + , ISpanFormattable +#else + , IFormattable + #endif +#pragma warning restore SA1001 { - internal int SkipReplies { get; set; } - internal bool ShouldSkipResponse() + private RespScanState _readState; + + public override string ToString() { - if (SkipReplies > 0) + if (Protocol is RedisProtocol.Resp2) { - SkipReplies--; + return IsSubscriber ? $"{node.Host}:{node.Port} #{Id}:sub" : $"{node.Host}:{node.Port} #{Id}"; + } + return $"{node.Host}:{node.Port} #{Id}:r3"; + } + + string IFormattable.ToString(string format, IFormatProvider formatProvider) => ToString(); +#if NET + public bool TryFormat(Span destination, out int charsWritten, ReadOnlySpan format, IFormatProvider provider) + { + charsWritten = 0; + if (!(TryWrite(ref destination, node.Host.AsSpan(), ref charsWritten) + && TryWrite(ref destination, ":".AsSpan(), ref charsWritten) + && TryWriteInt32(ref destination, node.Port, ref charsWritten) + && TryWrite(ref destination, " #".AsSpan(), ref charsWritten) + && TryWriteInt32(ref destination, Id, ref charsWritten))) + { + return false; + } + if (Protocol is RedisProtocol.Resp2) + { + if (IsSubscriber) + { + if (!TryWrite(ref destination, ":sub".AsSpan(), ref charsWritten)) return false; + } + } + else + { + if (!TryWrite(ref destination, ":r3".AsSpan(), ref charsWritten)) return false; + } + return true; + + static bool TryWrite(ref Span destination, ReadOnlySpan value, ref int charsWritten) + { + if (value.Length > destination.Length) + { + return false; + } + value.CopyTo(destination); + destination = destination.Slice(value.Length); + charsWritten += value.Length; + return true; + } + static bool TryWriteInt32(ref Span destination, int value, ref int charsWritten) + { + if (!value.TryFormat(destination, out var len)) + { + return false; + } + destination = destination.Slice(len); + charsWritten += len; return true; } - return false; } - private HashSet _subscripions; - public int SubscriptionCount => _subscripions?.Count ?? 0; - internal int Subscribe(RedisChannel channel) +#endif + + public bool TryReadRequest(ReadOnlySequence data, out long consumed) { - if (_subscripions == null) _subscripions = new HashSet(); - _subscripions.Add(channel); - return _subscripions.Count; + // skip past data we've already read + data = data.Slice(_readState.TotalBytes); + var status = RespFrameScanner.Default.TryRead(ref _readState, data); + consumed = _readState.TotalBytes; + switch (status) + { + case OperationStatus.Done: + _readState = default; // reset ready for the next frame + return true; + case OperationStatus.NeedMoreData: + consumed = 0; + return false; + default: + throw new InvalidOperationException($"Unexpected status: {status}"); + } } - internal int Unsubscribe(RedisChannel channel) + + public RedisServer.Node Node => node; + internal int SkipReplies { get; set; } + internal bool ShouldSkipResponse() { - if (_subscripions == null) return 0; - _subscripions.Remove(channel); - return _subscripions.Count; + if (SkipReplies > 0) // skips N + { + SkipReplies--; + return true; + } + return SkipReplies < 0; // skips forever } + public int Database { get; set; } public string Name { get; set; } internal IDuplexPipe LinkedPipe { get; set; } public bool Closed { get; internal set; } public int Id { get; internal set; } + public bool IsAuthenticated { get; internal set; } + public RedisProtocol Protocol { get; internal set; } = RedisProtocol.Resp2; + public long ProtocolVersion => Protocol is RedisProtocol.Resp2 ? 2 : 3; public void Dispose() { @@ -49,6 +131,215 @@ public void Dispose() try { pipe.Output.Complete(); } catch { } if (pipe is IDisposable d) try { d.Dispose(); } catch { } } + + _readState = default; + } + + private int _activeSlot = ServerSelectionStrategy.NoSlot; + internal void ResetAfterRequest() => _activeSlot = ServerSelectionStrategy.NoSlot; + public virtual void OnKey(in RedisKey key, KeyFlags flags) + { + if ((flags & KeyFlags.NoSlotCheck) == 0 & node.CheckCrossSlot) + { + var slot = RespServer.GetHashSlot(key); + if (_activeSlot is ServerSelectionStrategy.NoSlot) + { + _activeSlot = slot; + } + else if (_activeSlot != slot) + { + CrossSlotException.Throw(); + } + } + // ASKING here? + node.AssertKey(key); + + if ((flags & KeyFlags.ReadOnly) == 0) node.Touch(Database, key); + } + + public void Touch(int database, in RedisKey key) + { + TransactionState failureState = TransactionState.WatchDoomed; + switch (_transactionState) + { + case TransactionState.WatchHopeful: + if (_watching.Contains(new(database, key))) + { + _transactionState = failureState; + _watching.Clear(); + } + break; + case TransactionState.MultiHopeful: + failureState = TransactionState.MultiDoomedByTouch; + _transaction?.Clear(); + goto case TransactionState.WatchHopeful; + } + } + + public bool Watch(in RedisKey key) + { + switch (_transactionState) + { + case TransactionState.None: + _transactionState = TransactionState.WatchHopeful; + goto case TransactionState.WatchHopeful; + case TransactionState.WatchHopeful: + _watching.Add(new(Database, key)); + return true; + case TransactionState.WatchDoomed: + case TransactionState.MultiDoomedByTouch: + // no point tracking, just pretend + return true; + default: + // can't watch inside multi + return false; + } } + + public bool Unwatch() + { + switch (_transactionState) + { + case TransactionState.MultiHopeful: + case TransactionState.MultiDoomedByTouch: + case TransactionState.MultiAbortByError: + return false; + default: + _watching.Clear(); + _transactionState = TransactionState.None; + return true; + } + } + + private TransactionState _transactionState; + + private enum TransactionState + { + None, + WatchHopeful, + WatchDoomed, + MultiHopeful, + MultiDoomedByTouch, + MultiAbortByError, + } + + private readonly struct DatabaseKey(int db, in RedisKey key) : IEquatable + { + public readonly int Db = db; + public readonly RedisKey Key = key; + public override int GetHashCode() => unchecked((Db * 397) ^ Key.GetHashCode()); + public override bool Equals(object obj) => obj is DatabaseKey other && Equals(other); + public bool Equals(DatabaseKey other) => Db == other.Db && Key.Equals(other.Key); + } + private readonly HashSet _watching = []; + + public bool Multi() + { + switch (_transactionState) + { + case TransactionState.None: + case TransactionState.WatchHopeful: + _transactionState = TransactionState.MultiHopeful; + return true; + case TransactionState.WatchDoomed: + _transactionState = TransactionState.MultiDoomedByTouch; + return true; + default: + return false; + } + } + + public bool Discard() + { + switch (_transactionState) + { + case TransactionState.MultiHopeful: + case TransactionState.MultiDoomedByTouch: + _transactionState = TransactionState.None; + _watching.Clear(); + _transaction?.Clear(); + return true; + case TransactionState.MultiAbortByError: + return true; + default: + return false; + } + } + + public void ExecAbort() + { + switch (_transactionState) + { + case TransactionState.MultiHopeful: + case TransactionState.MultiDoomedByTouch: + _transactionState = TransactionState.MultiAbortByError; + _watching.Clear(); + _transaction?.Clear(); + break; + } + } + + public enum ExecResult + { + NotInTransaction, + WatchConflict, + AbortedByError, + CommandsReturned, + } + + public ExecResult FlushMulti(out byte[][] commands) + { + commands = []; + switch (_transactionState) + { + case TransactionState.MultiHopeful: + _transactionState = TransactionState.None; + _watching.Clear(); + commands = _transaction?.ToArray() ?? []; + _transaction?.Clear(); + return ExecResult.CommandsReturned; + case TransactionState.MultiDoomedByTouch: + _transactionState = TransactionState.None; + return ExecResult.WatchConflict; + case TransactionState.MultiAbortByError: + _transactionState = TransactionState.None; + return ExecResult.AbortedByError; + default: + return ExecResult.NotInTransaction; + } + } + + // completely unoptimized for now; this is fine + private List _transaction; // null until needed + + internal bool BufferMulti(in RedisRequest request, in AsciiHash command) + { + switch (_transactionState) + { + case TransactionState.MultiHopeful when !AllowInTransaction(command): + (_transaction ??= []).Add(request.Serialize()); + return true; + case TransactionState.MultiAbortByError when !AllowInTransaction(command): + case TransactionState.MultiDoomedByTouch when !AllowInTransaction(command): + // don't buffer anything, just pretend + return true; + default: + return false; + } + + static bool AllowInTransaction(in AsciiHash cmd) + => cmd.Equals(EXEC) || cmd.Equals(DISCARD) || cmd.Equals(MULTI) + || cmd.Equals(WATCH) || cmd.Equals(UNWATCH); + } + + private static readonly AsciiHash + EXEC = new("EXEC"u8), DISCARD = new("DISCARD"u8), MULTI = new("MULTI"u8), + WATCH = new("WATCH"u8), UNWATCH = new("UNWATCH"u8); + } + + internal sealed class CrossSlotException : Exception + { + private CrossSlotException() { } + public static void Throw() => throw new CrossSlotException(); } } diff --git a/toys/StackExchange.Redis.Server/RedisRequest.cs b/toys/StackExchange.Redis.Server/RedisRequest.cs index fda9474c9..269e31d9a 100644 --- a/toys/StackExchange.Redis.Server/RedisRequest.cs +++ b/toys/StackExchange.Redis.Server/RedisRequest.cs @@ -1,14 +1,23 @@ using System; +using System.Buffers; +using System.Diagnostics; +using RESPite; +using RESPite.Messages; namespace StackExchange.Redis.Server { public readonly ref struct RedisRequest - { // why ref? don't *really* need it, but: these things are "in flight" - // based on an open RawResult (which is just the detokenized ReadOnlySequence) - // so: using "ref" makes it clear that you can't expect to store these and have - // them keep working - private readonly RawResult _inner; + { + private readonly RespReader _rootReader; + private readonly RedisClient _client; + public RedisRequest WithClient(RedisClient client) => new(in this, client); + + private RedisRequest(scoped in RedisRequest original, RedisClient client) + { + this = original; + _client = client; + } public int Count { get; } public override string ToString() => Count == 0 ? "(n/a)" : GetString(0); @@ -21,43 +30,120 @@ public TypedRedisValue CommandNotFound() public TypedRedisValue UnknownSubcommandOrArgumentCount() => TypedRedisValue.Error($"ERR Unknown subcommand or wrong number of arguments for '{ToString()}'."); - public string GetString(int index) - => _inner[index].GetString(); + public string GetString(int index) => GetReader(index).ReadString(); - public bool IsString(int index, string value) // TODO: optimize - => string.Equals(value, _inner[index].GetString(), StringComparison.OrdinalIgnoreCase); + [Obsolete("Use IsString(int, ReadOnlySpan{byte}) instead.")] + public bool IsString(int index, string value) + => GetReader(index).Is(value); + + public bool IsString(int index, ReadOnlySpan value) + => GetReader(index).Is(value); public override int GetHashCode() => throw new NotSupportedException(); - internal RedisRequest(in RawResult result) + + /// + /// Get a reader initialized at the start of the payload. + /// + public RespReader GetRootReader() => _rootReader; + + /// + /// Get a reader initialized at the start of the payload. + /// + public RespReader GetReader(int childIndex) { - _inner = result; - Count = result.ItemsCount; + if (childIndex < 0 || childIndex >= Count) Throw(); + var reader = GetRootReader(); + reader.MoveNextAggregate(); + for (int i = 0; i < childIndex; i++) + { + reader.MoveNextScalar(); + } + reader.MoveNextScalar(); + return reader; + + static void Throw() => throw new ArgumentOutOfRangeException(nameof(childIndex)); } - public RedisValue GetValue(int index) - => _inner[index].AsRedisValue(); + internal RedisRequest(scoped in RespReader reader, ref byte[] commandLease) + { + _rootReader = reader; + var local = reader; + if (local.TryMoveNext(checkError: false) & local.IsAggregate) + { + Count = local.AggregateLength(); + } - public int GetInt32(int index) - => (int)_inner[index].AsRedisValue(); + if (Count == 0) + { + Command = s_EmptyCommand; + KnownCommand = RedisCommand.UNKNOWN; + } + else + { + local.MoveNextScalar(); + unsafe + { + KnownCommand = local.TryParseScalar(&RedisCommandMetadata.TryParseCI, out RedisCommand cmd) + ? cmd : RedisCommand.UNKNOWN; + } + var len = local.ScalarLength(); + if (len > commandLease.Length) + { + ArrayPool.Shared.Return(commandLease); + commandLease = ArrayPool.Shared.Rent(len); + } + var readBytes = local.CopyTo(commandLease); + Debug.Assert(readBytes == len); + AsciiHash.ToUpper(commandLease.AsSpan(0, readBytes)); + // note we retain the lease array in the Command, this is intentional + Command = new(commandLease, 0, readBytes); + } + } - public long GetInt64(int index) => (long)_inner[index].AsRedisValue(); + internal RedisCommand KnownCommand { get; } - public RedisKey GetKey(int index) => _inner[index].AsRedisKey(); + internal static byte[] GetLease() => ArrayPool.Shared.Rent(16); + internal static void ReleaseLease(ref byte[] commandLease) + { + ArrayPool.Shared.Return(commandLease); + commandLease = []; + } - public RedisChannel GetChannel(int index, RedisChannel.PatternMode mode) - => _inner[index].AsRedisChannel(null, mode); + private static readonly AsciiHash s_EmptyCommand = new(Array.Empty()); - internal bool TryGetCommandBytes(int i, out CommandBytes command) - { - var payload = _inner[i].Payload; - if (payload.Length > CommandBytes.MaxLength) - { - command = default; - return false; - } + public readonly AsciiHash Command; + + public RedisValue GetValue(int index) => GetReader(index).ReadRedisValue(); + + public bool TryGetInt64(int index, out long value) => GetReader(index).TryReadInt64(out value); + + public bool TryGetInt32(int index, out int value) => GetReader(index).TryReadInt32(out value); - command = payload.IsEmpty ? default : new CommandBytes(payload); - return true; + public int GetInt32(int index) => GetReader(index).ReadInt32(); + + public long GetInt64(int index) => GetReader(index).ReadInt64(); + + public RedisKey GetKey(int index, KeyFlags flags = KeyFlags.None) + { + var key = GetReader(index).ReadRedisKey(); + _client?.OnKey(key, flags); + return key; } + + internal RedisChannel GetChannel(int index, RedisChannel.RedisChannelOptions options) + => GetReader(index).ReadRedisChannel(options); + + internal RedisRequest(ReadOnlySpan payload, ref byte[] commandLease) : this(new RespReader(payload), ref commandLease) { } + internal RedisRequest(in ReadOnlySequence payload, ref byte[] commandLease) : this(new RespReader(payload), ref commandLease) { } + + public byte[] Serialize() => _rootReader.Serialize(); + } + + [Flags] + public enum KeyFlags + { + None = 0, + ReadOnly = 1 << 0, + NoSlotCheck = 1 << 1, } } diff --git a/toys/StackExchange.Redis.Server/RedisServer.PubSub.cs b/toys/StackExchange.Redis.Server/RedisServer.PubSub.cs new file mode 100644 index 000000000..7778ed63b --- /dev/null +++ b/toys/StackExchange.Redis.Server/RedisServer.PubSub.cs @@ -0,0 +1,356 @@ +using System; +using System.Buffers; +using System.Collections.Generic; +using System.Text.RegularExpressions; +using System.Threading; +using RESPite.Messages; + +namespace StackExchange.Redis.Server; + +public partial class RedisServer +{ + protected virtual void OnOutOfBand(RedisClient client, TypedRedisValue message) + => client.AddOutbound(message); + + [RedisCommand(-2)] + protected virtual TypedRedisValue Subscribe(RedisClient client, in RedisRequest request) + => SubscribeImpl(client, request, RedisCommand.SUBSCRIBE); + + [RedisCommand(-1)] + protected virtual TypedRedisValue Unsubscribe(RedisClient client, in RedisRequest request) + => SubscribeImpl(client, request, RedisCommand.UNSUBSCRIBE); + + [RedisCommand(-2)] + protected virtual TypedRedisValue PSubscribe(RedisClient client, in RedisRequest request) + => SubscribeImpl(client, request, RedisCommand.PSUBSCRIBE); + + [RedisCommand(-1)] + protected virtual TypedRedisValue PUnsubscribe(RedisClient client, in RedisRequest request) + => SubscribeImpl(client, request, RedisCommand.PUNSUBSCRIBE); + + [RedisCommand(-2)] + protected virtual TypedRedisValue SSubscribe(RedisClient client, in RedisRequest request) + => SubscribeImpl(client, request, RedisCommand.SSUBSCRIBE); + + [RedisCommand(-1)] + protected virtual TypedRedisValue SUnsubscribe(RedisClient client, in RedisRequest request) + => SubscribeImpl(client, request, RedisCommand.SUNSUBSCRIBE); + + [RedisCommand(3)] + protected virtual TypedRedisValue Publish(RedisClient client, in RedisRequest request) + { + PublishPair pair = new( + request.GetChannel(1, RedisChannel.RedisChannelOptions.None), + request.GetValue(2)); + // note: docs say "the number of clients that the message was sent to.", but this is a lie; it + // is the number of *subscriptions* - if a client has two matching: delta is two + int count = ForAllClients(pair, static (client, pair) => client.Publish(pair.Channel, pair.Value)); + return TypedRedisValue.Integer(count); + } + + private readonly struct PublishPair(RedisChannel channel, RedisValue value, Node node = null) + { + public readonly RedisChannel Channel = channel; + public readonly RedisValue Value = value; + public readonly Node Node = node; + } + [RedisCommand(3)] + protected virtual TypedRedisValue SPublish(RedisClient client, in RedisRequest request) + { + var channel = request.GetChannel(1, RedisChannel.RedisChannelOptions.Sharded); + var node = client.Node; // filter to clients on the same node + var slot = ServerSelectionStrategy.GetClusterSlot((byte[])channel); + if (!node.HasSlot(slot)) KeyMovedException.Throw(slot); + + PublishPair pair = new(channel, request.GetValue(2)); + int count = ForAllClients(pair, static (client, pair) => + ReferenceEquals(client.Node, pair.Node) ? client.Publish(pair.Channel, pair.Value) : 0); + return TypedRedisValue.Integer(count); + } + + private TypedRedisValue SubscribeImpl(RedisClient client, in RedisRequest request, RedisCommand cmd) + { + bool add = cmd is RedisCommand.SUBSCRIBE or RedisCommand.PSUBSCRIBE or RedisCommand.SSUBSCRIBE; + var options = cmd switch + { + RedisCommand.SUBSCRIBE or RedisCommand.UNSUBSCRIBE => RedisChannel.RedisChannelOptions.None, + RedisCommand.PSUBSCRIBE or RedisCommand.PUNSUBSCRIBE => RedisChannel.RedisChannelOptions.Pattern, + RedisCommand.SSUBSCRIBE or RedisCommand.SUNSUBSCRIBE => RedisChannel.RedisChannelOptions.Sharded, + _ => throw new ArgumentOutOfRangeException(nameof(cmd)), + }; + + // buffer the slots while checking validity + var subCount = request.Count - 1; + if (subCount == 0 & !add) + { + client.UnsubscribeAll(cmd); + } + else + { + var lease = ArrayPool.Shared.Rent(subCount); + try + { + var channel = lease[0] = request.GetChannel(1, options); + int slot = channel.IsSharded + ? ServerSelectionStrategy.GetClusterSlot(channel) + : ServerSelectionStrategy.NoSlot; + if (!client.Node.HasSlot(slot)) KeyMovedException.Throw(slot); + for (int i = 2; i <= subCount; i++) + { + channel = lease[i - 1] = request.GetChannel(i, options); + if (slot != ServerSelectionStrategy.NoSlot && + slot != ServerSelectionStrategy.GetClusterSlot(channel)) + { + CrossSlotException.Throw(); + } + } + + for (int i = 0; i < subCount; i++) + { + if (add) client.Subscribe(lease[i]); + else client.Unsubscribe(lease[i]); + } + } + finally + { + ArrayPool.Shared.Return(lease); + } + } + + return TypedRedisValue.Nil; + } +} + +public partial class RedisClient +{ + private bool HasSubscriptions + { + get + { + var subs = _subscriptions; + if (subs is null) return false; + lock (subs) + { + return subs.Count != 0; + } + } + } + + private Dictionary SubscriptionsIfAny + { + get + { + var subs = _subscriptions; + if (subs is not null) + { + lock (subs) + { + if (subs.Count == 0) return null; + } + } + return subs; + } + } + private Dictionary Subscriptions + { + get + { + return _subscriptions ?? InitSubs(); + + Dictionary InitSubs() + { + var newSubs = new Dictionary(); + return Interlocked.CompareExchange(ref _subscriptions, newSubs, null) ?? newSubs; + } + } + } + + private int simpleCount, shardedCount, patternCount; + private Dictionary _subscriptions; + public int SubscriptionCount => simpleCount; + public int ShardedSubscriptionCount => shardedCount; + public int PatternSubscriptionCount => patternCount; + public bool IsSubscriber => (SubscriptionCount + ShardedSubscriptionCount + PatternSubscriptionCount) != 0; + + public int Publish(in RedisChannel channel, in RedisValue value) + { + var node = Node; + if (node is null) return 0; + int count = 0; + var subs = Subscriptions; + lock (subs) + { + // we can do simple and sharded equality lookups directly + if ((simpleCount + shardedCount) != 0 && subs.TryGetValue(channel, out _)) + { + var msg = TypedRedisValue.Rent(3, out var span, PushKind); + span[0] = TypedRedisValue.BulkString(channel.IsSharded ? "smessage" : "message"); + span[1] = TypedRedisValue.BulkString(channel); + span[2] = TypedRedisValue.BulkString(value); + node.OnOutOfBand(this, msg); + count++; + } + + if (patternCount != 0 && !channel.IsSharded) + { + // need to loop for patterns + var channelName = channel.ToString(); + foreach (var pair in subs) + { + if (pair.Key.IsPattern && pair.Value is { } glob && glob.IsMatch(channelName)) + { + var msg = TypedRedisValue.Rent(4, out var span, PushKind); + span[0] = TypedRedisValue.BulkString("pmessage"); + span[1] = TypedRedisValue.BulkString(pair.Key); + span[2] = TypedRedisValue.BulkString(channel); + span[3] = TypedRedisValue.BulkString(value); + node.OnOutOfBand(this, msg); + count++; + } + } + } + } + + return count; + } + + public bool IsResp2 => Protocol is RedisProtocol.Resp2; + + public RespPrefix PushKind => IsResp2 ? RespPrefix.Array : RespPrefix.Push; + + private void SendSubUnsubMessage(string kind, RedisChannel channel, int count) + { + if (Node is { } node) + { + var reply = TypedRedisValue.Rent(3, out var span, PushKind); + span[0] = TypedRedisValue.BulkString(kind); + span[1] = TypedRedisValue.BulkString((byte[])channel); + span[2] = TypedRedisValue.Integer(count); + // go via node to allow logging etc + node.OnOutOfBand(this, reply); + } + } + + private ref int GetCountField(RedisChannel channel) + => ref channel.IsSharded ? ref shardedCount + : ref channel.IsPattern ? ref patternCount + : ref simpleCount; + + internal void Subscribe(RedisChannel channel) + { + Regex glob = channel.IsPattern ? BuildGlob(channel) : null; + var subs = Subscriptions; + int count; + ref int field = ref GetCountField(channel); + lock (subs) + { + #if NET + count = subs.TryAdd(channel, glob) ? ++field : field; + #else + if (subs.ContainsKey(channel)) + { + count = field; + } + else + { + subs.Add(channel, glob); + count = ++field; + } + #endif + } + SendSubUnsubMessage( + channel.IsSharded ? "ssubscribe" + : channel.IsPattern ? "psubscribe" + : "subscribe", + channel, + count); + } + + private Regex BuildGlob(RedisChannel channel) + { + /* supported patterns: + h?llo subscribes to hello, hallo and hxllo + h*llo subscribes to hllo and heeeello + h[ae]llo subscribes to hello and hallo, but not hillo + */ + // firstly, escape *everything*, then we'll go back and fixup + var re = Regex.Escape(channel.ToString()); + re = re.Replace(@"\?", ".").Replace(@"\*", ".*") + .Replace(@"\[", "[").Replace(@"\]", "]"); // not perfect, but good enough for now + return new Regex(re, RegexOptions.CultureInvariant); + } + + internal void Unsubscribe(RedisChannel channel) + { + var subs = SubscriptionsIfAny; + if (subs is null) return; + int count; + ref int field = ref GetCountField(channel); + lock (subs) + { + count = subs.Remove(channel) ? --field : field; + } + SendSubUnsubMessage( + channel.IsSharded ? "sunsubscribe" + : channel.IsPattern ? "punsubscribe" + : "unsubscribe", + channel, + count); + } + + internal void UnsubscribeAll(RedisCommand cmd) + { + var subs = Subscriptions; + if (subs is null) return; + RedisChannel[] remove; + int count = 0; + string msg; + lock (subs) + { + remove = ArrayPool.Shared.Rent(count); + foreach (var pair in subs) + { + var key = pair.Key; + if (cmd switch + { + RedisCommand.UNSUBSCRIBE when !(pair.Key.IsPattern | pair.Key.IsSharded) => true, + RedisCommand.PUNSUBSCRIBE when pair.Key.IsPattern => true, + RedisCommand.SUNSUBSCRIBE when pair.Key.IsSharded => true, + _ => false, + }) + { + remove[count++] = key; + } + } + + foreach (var key in remove.AsSpan(0, count)) + { + _subscriptions.Remove(key); + } + + switch (cmd) + { + case RedisCommand.SUNSUBSCRIBE: + msg = "sunsubscribe"; + shardedCount = 0; + break; + case RedisCommand.PUNSUBSCRIBE: + msg = "punsubscribe"; + patternCount = 0; + break; + case RedisCommand.UNSUBSCRIBE: + msg = "unsubscribe"; + simpleCount = 0; + break; + default: + msg = ""; + break; + } + } + foreach (var key in remove.AsSpan(0, count)) + { + SendSubUnsubMessage(msg, key, 0); + } + ArrayPool.Shared.Return(remove); + } +} diff --git a/toys/StackExchange.Redis.Server/RedisServer.cs b/toys/StackExchange.Redis.Server/RedisServer.cs index 16d76ac42..aa26e34a6 100644 --- a/toys/StackExchange.Redis.Server/RedisServer.cs +++ b/toys/StackExchange.Redis.Server/RedisServer.cs @@ -1,20 +1,130 @@ using System; +using System.Collections.Concurrent; using System.Collections.Generic; using System.Diagnostics; using System.IO; +using System.Linq; +using System.Net; using System.Text; +using System.Threading; +using RESPite; +using RESPite.Messages; namespace StackExchange.Redis.Server { - public abstract class RedisServer : RespServer + public abstract partial class RedisServer : RespServer { - public static bool IsMatch(string pattern, string key) + // non-trivial wildcards not implemented yet! + public static bool IsMatch(string pattern, string key) => + pattern == "*" || string.Equals(pattern, key, StringComparison.OrdinalIgnoreCase); + + private ConcurrentDictionary _nodes = new(); + + public bool TryGetNode(EndPoint endpoint, out Node node) => _nodes.TryGetValue(endpoint, out node); + + public EndPoint DefaultEndPoint + { + get + { + foreach (var pair in _nodes) + { + return pair.Key; + } + throw new InvalidOperationException("No endpoints"); + } + } + + public override Node DefaultNode + { + get + { + foreach (var pair in _nodes) + { + return pair.Value; + } + return null; + } + } + + public IEnumerable GetEndPoints() + { + foreach (var pair in _nodes) + { + yield return pair.Key; + } + } + + public bool Migrate(int hashSlot, EndPoint to) { - // non-trivial wildcards not implemented yet! - return pattern == "*" || string.Equals(pattern, key, StringComparison.OrdinalIgnoreCase); + if (ServerType != ServerType.Cluster) throw new InvalidOperationException($"Server mode is {ServerType}"); + if (!TryGetNode(to, out var target)) throw new KeyNotFoundException($"Target node not found: {Format.ToString(to)}"); + foreach (var pair in _nodes) + { + if (pair.Value.HasSlot(hashSlot)) + { + if (pair.Value == target) return false; // nothing to do + + if (!pair.Value.RemoveSlot(hashSlot)) + { + throw new KeyNotFoundException($"Unable to remove slot {hashSlot} from old owner"); + } + target.AddSlot(hashSlot); + return true; + } + } + throw new KeyNotFoundException($"Source node not found for slot {hashSlot}"); } - protected RedisServer(int databases = 16, TextWriter output = null) : base(output) + public bool Migrate(Span key, EndPoint to) => Migrate(ServerSelectionStrategy.GetClusterSlot(key), to); + public bool Migrate(in RedisKey key, EndPoint to) => Migrate(GetHashSlot(key), to); + + public EndPoint AddEmptyNode() { + EndPoint endpoint; + Node node; + do + { + endpoint = null; + int maxPort = 0; + foreach (var pair in _nodes) + { + endpoint ??= pair.Key; + switch (pair.Key) + { + case IPEndPoint ip: + if (ip.Port > maxPort) maxPort = ip.Port; + break; + case DnsEndPoint dns: + if (dns.Port > maxPort) maxPort = dns.Port; + break; + } + } + + switch (endpoint) + { + case null: + endpoint = new IPEndPoint(IPAddress.Loopback, 6379); + break; + case IPEndPoint ip: + endpoint = new IPEndPoint(ip.Address, maxPort + 1); + break; + case DnsEndPoint dns: + endpoint = new DnsEndPoint(dns.Host, maxPort + 1); + break; + } + + node = new(this, endpoint); + node.UpdateSlots([]); // explicit empty range (rather than implicit "all nodes") + } + // defensive loop for concurrency + while (!_nodes.TryAdd(endpoint, node)); + return endpoint; + } + + protected RedisServer(EndPoint endpoint = null, int databases = 16, TextWriter output = null) : base(output) + { + endpoint ??= new IPEndPoint(IPAddress.Loopback, 6379); + _nodes.TryAdd(endpoint, new Node(this, endpoint)); + RedisVersion = s_DefaultServerVersion; if (databases < 1) throw new ArgumentOutOfRangeException(nameof(databases)); Databases = databases; var config = ServerConfiguration; @@ -43,8 +153,123 @@ protected override void AppendStats(StringBuilder sb) } public int Databases { get; } + public string Password { get; set; } = ""; + + public override TypedRedisValue Execute(RedisClient client, in RedisRequest request) + { + var pw = Password; + if (pw.Length != 0 & !client.IsAuthenticated) + { + if (!IsAuthCommand(request.KnownCommand)) + return TypedRedisValue.Error("NOAUTH Authentication required."); + } + else if (client.Protocol is RedisProtocol.Resp2 && client.IsSubscriber && + !IsPubSubCommand(request.KnownCommand)) + { + return TypedRedisValue.Error( + $"ERR only [P|S][UN]SUBSCRIBE / PING / QUIT allowed in this context (got: '{request.Command}')"); + } + return base.Execute(client, request); + + static bool IsAuthCommand(RedisCommand cmd) => cmd is RedisCommand.AUTH or RedisCommand.HELLO; + static bool IsPubSubCommand(RedisCommand cmd) + => cmd is RedisCommand.SUBSCRIBE or RedisCommand.UNSUBSCRIBE + or RedisCommand.SSUBSCRIBE or RedisCommand.SUNSUBSCRIBE + or RedisCommand.PSUBSCRIBE or RedisCommand.PUNSUBSCRIBE + or RedisCommand.PING or RedisCommand.QUIT; + } + + [RedisCommand(2)] + protected virtual TypedRedisValue Auth(RedisClient client, in RedisRequest request) + { + if (request.GetString(1) == Password) + { + client.IsAuthenticated = true; + return TypedRedisValue.OK; + } + return TypedRedisValue.Error("ERR invalid password"); + } + + [RedisCommand(-1)] + protected virtual TypedRedisValue Hello(RedisClient client, in RedisRequest request) + { + var protocol = client.Protocol; + bool isAuthed = client.IsAuthenticated; + string name = client.Name; + if (request.Count >= 2) + { + if (!request.TryGetInt32(1, out var protover)) return TypedRedisValue.Error("ERR Protocol version is not an integer or out of range"); + switch (protover) + { + case 2: + protocol = RedisProtocol.Resp2; + break; + case 3: // this client does not currently support RESP3 + protocol = RedisProtocol.Resp3; + break; + default: + return TypedRedisValue.Error("NOPROTO unsupported protocol version"); + } + static TypedRedisValue ArgFail(in RespReader reader) => TypedRedisValue.Error($"ERR Syntax error in HELLO option '{reader.ReadString()}'\""); + + for (int i = 2; i < request.Count; i++) + { + int remaining = request.Count - (i + 1); + var fieldReader = request.GetReader(i); + HelloSubFields field; + unsafe + { + if (!fieldReader.TryParseScalar(&HelloSubFieldsMetadata.TryParseCI, out field)) + { + return ArgFail(fieldReader); + } + } + + switch (field) + { + case HelloSubFields.Auth: + if (remaining < 2) return ArgFail(fieldReader); + // ignore username for now + var pw = request.GetString(i + 2); + if (pw != Password) return TypedRedisValue.Error("WRONGPASS invalid username-password pair or user is disabled."); + isAuthed = true; + i += 2; + break; + case HelloSubFields.SetName: + if (remaining < 1) return ArgFail(fieldReader); + name = request.GetString(++i); + break; + default: + return ArgFail(fieldReader); + } + } + } + + // all good, update client + client.Protocol = protocol; + client.IsAuthenticated = isAuthed; + client.Name = name; + + var reply = TypedRedisValue.Rent(14, out var span, RespPrefix.Map); + span[0] = TypedRedisValue.BulkString("server"); + span[1] = TypedRedisValue.BulkString("redis"); + span[2] = TypedRedisValue.BulkString("version"); + span[3] = TypedRedisValue.BulkString(VersionString); + span[4] = TypedRedisValue.BulkString("proto"); + span[5] = TypedRedisValue.Integer(client.ProtocolVersion); + span[6] = TypedRedisValue.BulkString("id"); + span[7] = TypedRedisValue.Integer(client.Id); + span[8] = TypedRedisValue.BulkString("mode"); + span[9] = TypedRedisValue.BulkString(ModeString); + span[10] = TypedRedisValue.BulkString("role"); + span[11] = TypedRedisValue.BulkString("master"); + span[12] = TypedRedisValue.BulkString("modules"); + span[13] = TypedRedisValue.EmptyArray(RespPrefix.Array); + return reply; + } + [RedisCommand(-3)] - protected virtual TypedRedisValue Sadd(RedisClient client, RedisRequest request) + protected virtual TypedRedisValue Sadd(RedisClient client, in RedisRequest request) { int added = 0; var key = request.GetKey(1); @@ -55,10 +280,10 @@ protected virtual TypedRedisValue Sadd(RedisClient client, RedisRequest request) } return TypedRedisValue.Integer(added); } - protected virtual bool Sadd(int database, RedisKey key, RedisValue value) => throw new NotSupportedException(); + protected virtual bool Sadd(int database, in RedisKey key, in RedisValue value) => throw new NotSupportedException(); [RedisCommand(-3)] - protected virtual TypedRedisValue Srem(RedisClient client, RedisRequest request) + protected virtual TypedRedisValue Srem(RedisClient client, in RedisRequest request) { int removed = 0; var key = request.GetKey(1); @@ -69,53 +294,496 @@ protected virtual TypedRedisValue Srem(RedisClient client, RedisRequest request) } return TypedRedisValue.Integer(removed); } - protected virtual bool Srem(int database, RedisKey key, RedisValue value) => throw new NotSupportedException(); + protected virtual bool Srem(int database, in RedisKey key, in RedisValue value) => throw new NotSupportedException(); [RedisCommand(2)] - protected virtual TypedRedisValue Spop(RedisClient client, RedisRequest request) + protected virtual TypedRedisValue Spop(RedisClient client, in RedisRequest request) => TypedRedisValue.BulkString(Spop(client.Database, request.GetKey(1))); - protected virtual RedisValue Spop(int database, RedisKey key) => throw new NotSupportedException(); + protected virtual RedisValue Spop(int database, in RedisKey key) => throw new NotSupportedException(); [RedisCommand(2)] - protected virtual TypedRedisValue Scard(RedisClient client, RedisRequest request) - => TypedRedisValue.Integer(Scard(client.Database, request.GetKey(1))); + protected virtual TypedRedisValue Scard(RedisClient client, in RedisRequest request) + => TypedRedisValue.Integer(Scard(client.Database, request.GetKey(1, KeyFlags.ReadOnly))); - protected virtual long Scard(int database, RedisKey key) => throw new NotSupportedException(); + protected virtual long Scard(int database, in RedisKey key) => throw new NotSupportedException(); [RedisCommand(3)] - protected virtual TypedRedisValue Sismember(RedisClient client, RedisRequest request) - => Sismember(client.Database, request.GetKey(1), request.GetValue(2)) ? TypedRedisValue.One : TypedRedisValue.Zero; + protected virtual TypedRedisValue Sismember(RedisClient client, in RedisRequest request) + => Sismember(client.Database, request.GetKey(1, KeyFlags.ReadOnly), request.GetValue(2)) ? TypedRedisValue.One : TypedRedisValue.Zero; + + protected virtual bool Sismember(int database, in RedisKey key, in RedisValue value) => throw new NotSupportedException(); - protected virtual bool Sismember(int database, RedisKey key, RedisValue value) => throw new NotSupportedException(); + [RedisCommand(3)] + protected virtual TypedRedisValue Rename(RedisClient client, in RedisRequest request) + { + RedisKey oldKey = request.GetKey(1), newKey = request.GetKey(2); + return oldKey == newKey || Rename(client.Database, oldKey, newKey) ? TypedRedisValue.OK : TypedRedisValue.Error("ERR no such key"); + } - [RedisCommand(3, "client", "setname", LockFree = true)] - protected virtual TypedRedisValue ClientSetname(RedisClient client, RedisRequest request) + protected virtual bool Rename(int database, in RedisKey oldKey, in RedisKey newKey) + { + // can implement with Exists/Del/Set + if (!Exists(database, oldKey)) return false; + Del(database, newKey); + Set(database, newKey, Get(database, oldKey)); + Del(database, oldKey); + return true; + } + + [RedisCommand(4)] + protected virtual TypedRedisValue SetEx(RedisClient client, in RedisRequest request) + { + RedisKey key = request.GetKey(1); + int seconds = request.GetInt32(2); + var value = request.GetValue(3); + SetEx(client.Database, key, TimeSpan.FromSeconds(seconds), value); + return TypedRedisValue.OK; + } + + [RedisCommand(-2)] + protected virtual TypedRedisValue Touch(RedisClient client, in RedisRequest request) + { + for (int i = 1; i < request.Count; i++) + { + Touch(client.Database, request.GetKey(i)); + } + + return TypedRedisValue.OK; + } + + [RedisCommand(-2)] + protected virtual TypedRedisValue Watch(RedisClient client, in RedisRequest request) + { + for (int i = 1; i < request.Count; i++) + { + var key = request.GetKey(i, KeyFlags.ReadOnly); + if (!client.Watch(key)) + return TypedRedisValue.Error("WATCH inside MULTI is not allowed"); + } + return TypedRedisValue.OK; + } + + [RedisCommand(1)] + protected virtual TypedRedisValue Unwatch(RedisClient client, in RedisRequest request) + { + return client.Unwatch() ? TypedRedisValue.OK : TypedRedisValue.Error("UNWATCH inside MULTI is not allowed"); + } + + [RedisCommand(1)] + protected virtual TypedRedisValue Multi(RedisClient client, in RedisRequest request) + { + return client.Multi() ? TypedRedisValue.OK : TypedRedisValue.Error("MULTI calls can not be nested"); + } + + [RedisCommand(1)] + protected virtual TypedRedisValue Discard(RedisClient client, in RedisRequest request) + { + return client.Discard() ? TypedRedisValue.OK : TypedRedisValue.Error("DISCARD without MULTI"); + } + + [RedisCommand(1)] + protected virtual TypedRedisValue Exec(RedisClient client, in RedisRequest request) + { + var exec = client.FlushMulti(out var commands); + switch (exec) + { + case RedisClient.ExecResult.NotInTransaction: + return TypedRedisValue.Error("EXEC without MULTI"); + case RedisClient.ExecResult.WatchConflict: + return TypedRedisValue.NullArray(RespPrefix.Array); + case RedisClient.ExecResult.AbortedByError: + return TypedRedisValue.Error("EXECABORT Transaction discarded because of previous errors."); + } + Debug.Assert(exec is RedisClient.ExecResult.CommandsReturned); + + var results = TypedRedisValue.Rent(commands.Length, out var span, RespPrefix.Array); + int index = 0; + var lease = RedisRequest.GetLease(); + try + { + foreach (var cmd in commands) + { + RedisRequest inner = new(cmd, ref lease); + inner = inner.WithClient(client); + span[index++] = Execute(client, inner); + } + } + finally + { + RedisRequest.ReleaseLease(ref lease); + } + return results; + } + + protected virtual void SetEx(int database, in RedisKey key, TimeSpan timeout, in RedisValue value) + { + Set(database, key, value); + Expire(database, key, timeout); + } + + [RedisCommand(3, nameof(RedisCommand.CLIENT), "setname", LockFree = true)] + protected virtual TypedRedisValue ClientSetname(RedisClient client, in RedisRequest request) { client.Name = request.GetString(2); return TypedRedisValue.OK; } - [RedisCommand(2, "client", "getname", LockFree = true)] - protected virtual TypedRedisValue ClientGetname(RedisClient client, RedisRequest request) + [RedisCommand(2, nameof(RedisCommand.CLIENT), "getname", LockFree = true)] + protected virtual TypedRedisValue ClientGetname(RedisClient client, in RedisRequest request) => TypedRedisValue.BulkString(client.Name); - [RedisCommand(3, "client", "reply", LockFree = true)] - protected virtual TypedRedisValue ClientReply(RedisClient client, RedisRequest request) + [RedisCommand(3, nameof(RedisCommand.CLIENT), "reply", LockFree = true)] + protected virtual TypedRedisValue ClientReply(RedisClient client, in RedisRequest request) { - if (request.IsString(2, "on")) client.SkipReplies = -1; // reply to nothing - else if (request.IsString(2, "off")) client.SkipReplies = 0; // reply to everything - else if (request.IsString(2, "skip")) client.SkipReplies = 2; // this one, and the next one + if (request.IsString(2, "on"u8)) client.SkipReplies = -1; // reply to nothing + else if (request.IsString(2, "off"u8)) client.SkipReplies = 0; // reply to everything + else if (request.IsString(2, "skip"u8)) client.SkipReplies = 2; // this one, and the next one else return TypedRedisValue.Error("ERR syntax error"); return TypedRedisValue.OK; } - [RedisCommand(-1)] - protected virtual TypedRedisValue Cluster(RedisClient client, RedisRequest request) - => request.CommandNotFound(); + [RedisCommand(2, nameof(RedisCommand.CLIENT), "id", LockFree = true)] + protected virtual TypedRedisValue ClientId(RedisClient client, in RedisRequest request) + => TypedRedisValue.Integer(client.Id); + + [RedisCommand(4, nameof(RedisCommand.CLIENT), "setinfo", LockFree = true)] + protected virtual TypedRedisValue ClientSetInfo(RedisClient client, in RedisRequest request) + => TypedRedisValue.OK; // only exists to keep logs clean + + private bool IsClusterEnabled(out TypedRedisValue fault) + { + if (ServerType == ServerType.Cluster) + { + fault = default; + return true; + } + fault = TypedRedisValue.Error("ERR This instance has cluster support disabled"); + return false; + } + + [RedisCommand(2, nameof(RedisCommand.CLUSTER), subcommand: "nodes", LockFree = true)] + protected virtual TypedRedisValue ClusterNodes(RedisClient client, in RedisRequest request) + { + if (!IsClusterEnabled(out TypedRedisValue fault)) return fault; + + var sb = new StringBuilder(); + foreach (var pair in _nodes.OrderBy(x => x.Key, EndPointComparer.Instance)) + { + var node = pair.Value; + sb.Append(node.Id).Append(" ").Append(node.Host).Append(":").Append(node.Port).Append("@1").Append(node.Port).Append(" "); + if (node == client.Node) + { + sb.Append("myself,"); + } + sb.Append("master - 0 0 1 connected"); + foreach (var range in node.Slots) + { + sb.Append(" ").Append(range.ToString()); + } + sb.AppendLine(); + } + return TypedRedisValue.BulkString(sb.ToString()); + } + + [RedisCommand(2, nameof(RedisCommand.CLUSTER), subcommand: "slots", LockFree = true)] + protected virtual TypedRedisValue ClusterSlots(RedisClient client, in RedisRequest request) + { + if (!IsClusterEnabled(out TypedRedisValue fault)) return fault; + + int count = 0, index = 0; + foreach (var pair in _nodes) + { + count += pair.Value.Slots.Length; + } + var slots = TypedRedisValue.Rent(count, out var slotsSpan, RespPrefix.Array); + foreach (var pair in _nodes.OrderBy(x => x.Key, EndPointComparer.Instance)) + { + string host = GetHost(pair.Key, out int port); + foreach (var range in pair.Value.Slots) + { + if (index >= count) break; // someone changed things while we were working + slotsSpan[index++] = TypedRedisValue.Rent(3, out var slotSpan, RespPrefix.Array); + slotSpan[0] = TypedRedisValue.Integer(range.From); + slotSpan[1] = TypedRedisValue.Integer(range.To); + slotSpan[2] = TypedRedisValue.Rent(4, out var nodeSpan, RespPrefix.Array); + nodeSpan[0] = TypedRedisValue.BulkString(host); + nodeSpan[1] = TypedRedisValue.Integer(port); + nodeSpan[2] = TypedRedisValue.BulkString(pair.Value.Id); + nodeSpan[3] = TypedRedisValue.EmptyArray(RespPrefix.Array); + } + } + return slots; + } + + private sealed class EndPointComparer : IComparer + { + private EndPointComparer() { } + public static readonly EndPointComparer Instance = new(); + + public int Compare(EndPoint x, EndPoint y) + { + if (x is null) return y is null ? 0 : -1; + if (y is null) return 1; + if (x is IPEndPoint ipX && y is IPEndPoint ipY) + { + // ignore the address, go by port alone + return ipX.Port.CompareTo(ipY.Port); + } + if (x is DnsEndPoint dnsX && y is DnsEndPoint dnsY) + { + var delta = dnsX.Host.CompareTo(dnsY.Host, StringComparison.Ordinal); + if (delta != 0) return delta; + return dnsX.Port.CompareTo(dnsY.Port); + } + + return 0; // whatever + } + } + + public static string GetHost(EndPoint endpoint, out int port) + { + if (endpoint is IPEndPoint ip) + { + port = ip.Port; + return ip.Address.ToString(); + } + if (endpoint is DnsEndPoint dns) + { + port = dns.Port; + return dns.Host; + } + throw new NotSupportedException("Unknown endpoint type: " + endpoint.GetType().Name); + } + + public sealed class Node + { + public override string ToString() + { + var sb = new StringBuilder(); + sb.Append(Host).Append(":").Append(Port).Append(" ("); + var slots = _slots; + if (slots is null) + { + sb.Append("all keys"); + } + else + { + bool first = true; + foreach (var slot in Slots) + { + if (!first) sb.Append(","); + sb.Append(slot); + first = false; + } + + if (first) sb.Append("empty"); + } + sb.Append(")"); + return sb.ToString(); + } + + public string Host { get; } + + public int Port { get; } + public string Id { get; } = NewId(); + + private SlotRange[] _slots; + + private readonly RedisServer _server; + public RedisServer Server => _server; + public Node(RedisServer server, EndPoint endpoint) + { + Host = GetHost(endpoint, out var port); + Port = port; + _server = server; + } + + public void UpdateSlots(SlotRange[] slots) => _slots = slots; + public ReadOnlySpan Slots => _slots ?? SlotRange.SharedAllSlots; + public bool CheckCrossSlot => _server.CheckCrossSlot; + + public bool HasSlot(int hashSlot) + { + if (hashSlot == ServerSelectionStrategy.NoSlot) return true; + var slots = _slots; + if (slots is null) return true; // all nodes + foreach (var slot in slots) + { + if (slot.Includes(hashSlot)) return true; + } + return false; + } + + public bool HasSlot(in RedisKey key) + { + var slots = _slots; + if (slots is null) return true; // all nodes + var hashSlot = GetHashSlot(key); + foreach (var slot in slots) + { + if (slot.Includes(hashSlot)) return true; + } + return false; + } + + public bool HasSlot(ReadOnlySpan key) + { + var slots = _slots; + if (slots is null) return true; // all nodes + var hashSlot = ServerSelectionStrategy.GetClusterSlot(key); + foreach (var slot in slots) + { + if (slot.Includes(hashSlot)) return true; + } + return false; + } + + private static string NewId() + { + Span data = stackalloc char[40]; +#if NET + var rand = Random.Shared; +#else + var rand = new Random(); +#endif + ReadOnlySpan alphabet = "0123456789abcdef"; + for (int i = 0; i < data.Length; i++) + { + data[i] = alphabet[rand.Next(alphabet.Length)]; + } + return data.ToString(); + } + + public void AddSlot(int hashSlot) + { + SlotRange[] oldSlots, newSlots; + do + { + oldSlots = _slots; + newSlots = oldSlots; + if (oldSlots is null) + { + newSlots = [new SlotRange(hashSlot, hashSlot)]; + } + else + { + bool found = false; + int index = 0; + foreach (var slot in oldSlots) + { + if (slot.Includes(hashSlot)) return; // already covered + if (slot.To == hashSlot - 1) + { + // extend the range + newSlots = new SlotRange[oldSlots.Length]; + oldSlots.AsSpan().CopyTo(newSlots); + newSlots[index] = new SlotRange(slot.From, hashSlot); + found = true; + break; + } + + index++; + } + + if (!found) + { + newSlots = [..oldSlots, new SlotRange(hashSlot, hashSlot)]; + Array.Sort(newSlots); + } + } + } + while (Interlocked.CompareExchange(ref _slots, newSlots, oldSlots) != oldSlots); + } + + public bool RemoveSlot(int hashSlot) + { + SlotRange[] oldSlotsRaw, newSlots; + do + { + oldSlotsRaw = _slots; + newSlots = oldSlotsRaw; + // avoid the implicit null "all slots" usage + var oldSlots = oldSlotsRaw ?? SlotRange.SharedAllSlots; + bool found = false; + int index = 0; + foreach (var s in oldSlots) + { + if (s.Includes(hashSlot)) + { + found = true; + var oldSpan = oldSlots.AsSpan(); + if (s.IsSingleSlot) + { + // remove it + newSlots = new SlotRange[oldSlots.Length - 1]; + if (index > 0) oldSpan.Slice(0, index).CopyTo(newSlots); + if (index < oldSlots.Length - 1) oldSpan.Slice(index + 1).CopyTo(newSlots.AsSpan(index)); + } + else if (s.From == hashSlot) + { + // truncate the start + newSlots = new SlotRange[oldSlots.Length]; + oldSpan.CopyTo(newSlots); + newSlots[index] = new SlotRange(s.From + 1, s.To); + } + else if (s.To == hashSlot) + { + // truncate the end + newSlots = new SlotRange[oldSlots.Length]; + oldSpan.CopyTo(newSlots); + newSlots[index] = new SlotRange(s.From, s.To - 1); + } + else + { + // split it + newSlots = new SlotRange[oldSlots.Length + 1]; + if (index > 0) oldSpan.Slice(0, index).CopyTo(newSlots); + newSlots[index] = new SlotRange(s.From, hashSlot - 1); + newSlots[index + 1] = new SlotRange(hashSlot + 1, s.To); + if (index < oldSlots.Length - 1) oldSpan.Slice(index + 1).CopyTo(newSlots.AsSpan(index + 2)); + } + break; + } + index++; + } + + if (!found) return false; + } + while (Interlocked.CompareExchange(ref _slots, newSlots, oldSlotsRaw) != oldSlotsRaw); + + return true; + } + + public void AssertKey(in RedisKey key) + { + var slots = _slots; + if (slots is not null) + { + var hashSlot = GetHashSlot(key); + if (!HasSlot(hashSlot)) KeyMovedException.Throw(hashSlot); + } + } + + public void Touch(int db, in RedisKey key) => _server.Touch(db, key); + + public void OnOutOfBand(RedisClient client, TypedRedisValue message) + => _server.OnOutOfBand(client, message); + } + + public virtual bool CheckCrossSlot => ServerType == ServerType.Cluster; + + protected override Node GetNode(int hashSlot) + { + foreach (var pair in _nodes) + { + if (pair.Value.HasSlot(hashSlot)) return pair.Value; + } + return base.GetNode(hashSlot); + } [RedisCommand(-3)] - protected virtual TypedRedisValue Lpush(RedisClient client, RedisRequest request) + protected virtual TypedRedisValue Lpush(RedisClient client, in RedisRequest request) { var key = request.GetKey(1); long length = -1; @@ -127,7 +795,7 @@ protected virtual TypedRedisValue Lpush(RedisClient client, RedisRequest request } [RedisCommand(-3)] - protected virtual TypedRedisValue Rpush(RedisClient client, RedisRequest request) + protected virtual TypedRedisValue Rpush(RedisClient client, in RedisRequest request) { var key = request.GetKey(1); long length = -1; @@ -139,36 +807,36 @@ protected virtual TypedRedisValue Rpush(RedisClient client, RedisRequest request } [RedisCommand(2)] - protected virtual TypedRedisValue Lpop(RedisClient client, RedisRequest request) + protected virtual TypedRedisValue Lpop(RedisClient client, in RedisRequest request) => TypedRedisValue.BulkString(Lpop(client.Database, request.GetKey(1))); [RedisCommand(2)] - protected virtual TypedRedisValue Rpop(RedisClient client, RedisRequest request) + protected virtual TypedRedisValue Rpop(RedisClient client, in RedisRequest request) => TypedRedisValue.BulkString(Rpop(client.Database, request.GetKey(1))); [RedisCommand(2)] - protected virtual TypedRedisValue Llen(RedisClient client, RedisRequest request) - => TypedRedisValue.Integer(Llen(client.Database, request.GetKey(1))); + protected virtual TypedRedisValue Llen(RedisClient client, in RedisRequest request) + => TypedRedisValue.Integer(Llen(client.Database, request.GetKey(1, KeyFlags.ReadOnly))); - protected virtual long Lpush(int database, RedisKey key, RedisValue value) => throw new NotSupportedException(); - protected virtual long Rpush(int database, RedisKey key, RedisValue value) => throw new NotSupportedException(); - protected virtual long Llen(int database, RedisKey key) => throw new NotSupportedException(); - protected virtual RedisValue Rpop(int database, RedisKey key) => throw new NotSupportedException(); - protected virtual RedisValue Lpop(int database, RedisKey key) => throw new NotSupportedException(); + protected virtual long Lpush(int database, in RedisKey key, in RedisValue value) => throw new NotSupportedException(); + protected virtual long Rpush(int database, in RedisKey key, in RedisValue value) => throw new NotSupportedException(); + protected virtual long Llen(int database, in RedisKey key) => throw new NotSupportedException(); + protected virtual RedisValue Rpop(int database, in RedisKey key) => throw new NotSupportedException(); + protected virtual RedisValue Lpop(int database, in RedisKey key) => throw new NotSupportedException(); [RedisCommand(4)] - protected virtual TypedRedisValue LRange(RedisClient client, RedisRequest request) + protected virtual TypedRedisValue LRange(RedisClient client, in RedisRequest request) { - var key = request.GetKey(1); + var key = request.GetKey(1, KeyFlags.ReadOnly); long start = request.GetInt64(2), stop = request.GetInt64(3); var len = Llen(client.Database, key); - if (len == 0) return TypedRedisValue.EmptyArray; + if (len == 0) return TypedRedisValue.EmptyArray(RespPrefix.Array); if (start < 0) start = len + start; if (stop < 0) stop = len + stop; - if (stop < 0 || start >= len || stop < start) return TypedRedisValue.EmptyArray; + if (stop < 0 || start >= len || stop < start) return TypedRedisValue.EmptyArray(RespPrefix.Array); if (start < 0) start = 0; else if (start >= len) start = len - 1; @@ -176,11 +844,11 @@ protected virtual TypedRedisValue LRange(RedisClient client, RedisRequest reques if (stop < 0) stop = 0; else if (stop >= len) stop = len - 1; - var arr = TypedRedisValue.Rent(checked((int)((stop - start) + 1)), out var span); + var arr = TypedRedisValue.Rent(checked((int)((stop - start) + 1)), out var span, RespPrefix.Array); LRange(client.Database, key, start, span); return arr; } - protected virtual void LRange(int database, RedisKey key, long start, Span arr) => throw new NotSupportedException(); + protected virtual void LRange(int database, in RedisKey key, long start, Span arr) => throw new NotSupportedException(); protected virtual void OnUpdateServerConfiguration() { } protected RedisConfig ServerConfiguration { get; } = RedisConfig.Create(); @@ -213,17 +881,17 @@ internal int CountMatch(string pattern) return count; } } - [RedisCommand(3, "config", "get", LockFree = true)] - protected virtual TypedRedisValue Config(RedisClient client, RedisRequest request) + [RedisCommand(3, nameof(RedisCommand.CONFIG), "get", LockFree = true)] + protected virtual TypedRedisValue Config(RedisClient client, in RedisRequest request) { var pattern = request.GetString(2); OnUpdateServerConfiguration(); var config = ServerConfiguration; var matches = config.CountMatch(pattern); - if (matches == 0) return TypedRedisValue.EmptyArray; + if (matches == 0) return TypedRedisValue.EmptyArray(RespPrefix.Map); - var arr = TypedRedisValue.Rent(2 * matches, out var span); + var arr = TypedRedisValue.Rent(2 * matches, out var span, RespPrefix.Map); int index = 0; foreach (var pair in config.Wrapped) { @@ -242,23 +910,23 @@ protected virtual TypedRedisValue Config(RedisClient client, RedisRequest reques } [RedisCommand(2, LockFree = true)] - protected virtual TypedRedisValue Echo(RedisClient client, RedisRequest request) + protected virtual TypedRedisValue Echo(RedisClient client, in RedisRequest request) => TypedRedisValue.BulkString(request.GetValue(1)); [RedisCommand(2)] - protected virtual TypedRedisValue Exists(RedisClient client, RedisRequest request) + protected virtual TypedRedisValue Exists(RedisClient client, in RedisRequest request) { int count = 0; var db = client.Database; for (int i = 1; i < request.Count; i++) { - if (Exists(db, request.GetKey(i))) + if (Exists(db, request.GetKey(i, KeyFlags.ReadOnly))) count++; } return TypedRedisValue.Integer(count); } - protected virtual bool Exists(int database, RedisKey key) + protected virtual bool Exists(int database, in RedisKey key) { try { @@ -268,32 +936,32 @@ protected virtual bool Exists(int database, RedisKey key) } [RedisCommand(2)] - protected virtual TypedRedisValue Get(RedisClient client, RedisRequest request) - => TypedRedisValue.BulkString(Get(client.Database, request.GetKey(1))); + protected virtual TypedRedisValue Get(RedisClient client, in RedisRequest request) + => TypedRedisValue.BulkString(Get(client.Database, request.GetKey(1, KeyFlags.ReadOnly))); - protected virtual RedisValue Get(int database, RedisKey key) => throw new NotSupportedException(); + protected virtual RedisValue Get(int database, in RedisKey key) => throw new NotSupportedException(); [RedisCommand(3)] - protected virtual TypedRedisValue Set(RedisClient client, RedisRequest request) + protected virtual TypedRedisValue Set(RedisClient client, in RedisRequest request) { Set(client.Database, request.GetKey(1), request.GetValue(2)); return TypedRedisValue.OK; } - protected virtual void Set(int database, RedisKey key, RedisValue value) => throw new NotSupportedException(); + protected virtual void Set(int database, in RedisKey key, in RedisValue value) => throw new NotSupportedException(); [RedisCommand(1)] - protected new virtual TypedRedisValue Shutdown(RedisClient client, RedisRequest request) + protected new virtual TypedRedisValue Shutdown(RedisClient client, in RedisRequest request) { DoShutdown(ShutdownReason.ClientInitiated); return TypedRedisValue.OK; } [RedisCommand(2)] - protected virtual TypedRedisValue Strlen(RedisClient client, RedisRequest request) - => TypedRedisValue.Integer(Strlen(client.Database, request.GetKey(1))); + protected virtual TypedRedisValue Strlen(RedisClient client, in RedisRequest request) + => TypedRedisValue.Integer(Strlen(client.Database, request.GetKey(1, KeyFlags.ReadOnly))); - protected virtual long Strlen(int database, RedisKey key) => Get(database, key).Length(); + protected virtual long Strlen(int database, in RedisKey key) => Get(database, key).Length(); [RedisCommand(-2)] - protected virtual TypedRedisValue Del(RedisClient client, RedisRequest request) + protected virtual TypedRedisValue Del(RedisClient client, in RedisRequest request) { int count = 0; for (int i = 1; i < request.Count; i++) @@ -303,16 +971,65 @@ protected virtual TypedRedisValue Del(RedisClient client, RedisRequest request) } return TypedRedisValue.Integer(count); } - protected virtual bool Del(int database, RedisKey key) => throw new NotSupportedException(); + protected virtual bool Del(int database, in RedisKey key) => throw new NotSupportedException(); + + [RedisCommand(2)] + protected virtual TypedRedisValue GetDel(RedisClient client, in RedisRequest request) + { + var key = request.GetKey(1); + var value = Get(client.Database, key); + if (!value.IsNull) Del(client.Database, key); + return TypedRedisValue.BulkString(value); + } [RedisCommand(1)] - protected virtual TypedRedisValue Dbsize(RedisClient client, RedisRequest request) + protected virtual TypedRedisValue Dbsize(RedisClient client, in RedisRequest request) => TypedRedisValue.Integer(Dbsize(client.Database)); protected virtual long Dbsize(int database) => throw new NotSupportedException(); + [RedisCommand(3)] + protected virtual TypedRedisValue Expire(RedisClient client, in RedisRequest request) + { + var key = request.GetKey(1); + var seconds = request.GetInt32(2); + return TypedRedisValue.Integer(Expire(client.Database, key, TimeSpan.FromSeconds(seconds)) ? 1 : 0); + } + + [RedisCommand(3)] + protected virtual TypedRedisValue PExpire(RedisClient client, in RedisRequest request) + { + var key = request.GetKey(1); + var millis = request.GetInt64(2); + return TypedRedisValue.Integer(Expire(client.Database, key, TimeSpan.FromMilliseconds(millis)) ? 1 : 0); + } + + [RedisCommand(2)] + protected virtual TypedRedisValue Ttl(RedisClient client, in RedisRequest request) + { + var key = request.GetKey(1, KeyFlags.ReadOnly); + var ttl = Ttl(client.Database, key); + if (ttl == null || ttl <= TimeSpan.Zero) return TypedRedisValue.Integer(-2); + if (ttl == TimeSpan.MaxValue) return TypedRedisValue.Integer(-1); + return TypedRedisValue.Integer((int)ttl.Value.TotalSeconds); + } + + protected virtual TimeSpan? Ttl(int database, in RedisKey key) => throw new NotSupportedException(); + + [RedisCommand(2)] + protected virtual TypedRedisValue Pttl(RedisClient client, in RedisRequest request) + { + var key = request.GetKey(1, KeyFlags.ReadOnly); + var ttl = Ttl(client.Database, key); + if (ttl == null || ttl <= TimeSpan.Zero) return TypedRedisValue.Integer(-2); + if (ttl == TimeSpan.MaxValue) return TypedRedisValue.Integer(-1); + return TypedRedisValue.Integer((long)ttl.Value.TotalMilliseconds); + } + + protected virtual bool Expire(int database, in RedisKey key, TimeSpan timeout) => throw new NotSupportedException(); + [RedisCommand(1)] - protected virtual TypedRedisValue Flushall(RedisClient client, RedisRequest request) + protected virtual TypedRedisValue Flushall(RedisClient client, in RedisRequest request) { var count = Databases; for (int i = 0; i < count; i++) @@ -323,7 +1040,7 @@ protected virtual TypedRedisValue Flushall(RedisClient client, RedisRequest requ } [RedisCommand(1)] - protected virtual TypedRedisValue Flushdb(RedisClient client, RedisRequest request) + protected virtual TypedRedisValue Flushdb(RedisClient client, in RedisRequest request) { Flushdb(client.Database); return TypedRedisValue.OK; @@ -331,7 +1048,7 @@ protected virtual TypedRedisValue Flushdb(RedisClient client, RedisRequest reque protected virtual void Flushdb(int database) => throw new NotSupportedException(); [RedisCommand(-1, LockFree = true, MaxArgs = 2)] - protected virtual TypedRedisValue Info(RedisClient client, RedisRequest request) + protected virtual TypedRedisValue Info(RedisClient client, in RedisRequest request) { var info = Info(request.Count == 1 ? null : request.GetString(1)); return TypedRedisValue.BulkString(info); @@ -347,24 +1064,60 @@ bool IsMatch(string section) => string.IsNullOrWhiteSpace(selected) if (IsMatch("Persistence")) Info(sb, "Persistence"); if (IsMatch("Stats")) Info(sb, "Stats"); if (IsMatch("Replication")) Info(sb, "Replication"); + if (IsMatch("Cluster")) Info(sb, "Cluster"); if (IsMatch("Keyspace")) Info(sb, "Keyspace"); return sb.ToString(); } [RedisCommand(2)] - protected virtual TypedRedisValue Keys(RedisClient client, RedisRequest request) + protected virtual TypedRedisValue Keys(RedisClient client, in RedisRequest request) { List found = null; - foreach (var key in Keys(client.Database, request.GetKey(1))) + bool checkSlot = ServerType is ServerType.Cluster; + var node = client.Node ?? DefaultNode; + foreach (var key in Keys(client.Database, request.GetKey(1, flags: KeyFlags.NoSlotCheck | KeyFlags.ReadOnly))) { + if (checkSlot && !node.HasSlot(key)) continue; if (found == null) found = new List(); found.Add(TypedRedisValue.BulkString(key.AsRedisValue())); } - if (found == null) return TypedRedisValue.EmptyArray; - return TypedRedisValue.MultiBulk(found); + if (found == null) return TypedRedisValue.EmptyArray(RespPrefix.Array); + return TypedRedisValue.MultiBulk(found, RespPrefix.Array); + } + protected virtual IEnumerable Keys(int database, in RedisKey pattern) => throw new NotSupportedException(); + + private static readonly Version s_DefaultServerVersion = new(1, 0, 0); + + private string _versionString; + private string VersionString => _versionString; + private static string FormatVersion(Version v) + { + var sb = new StringBuilder().Append(v.Major).Append('.').Append(v.Minor); + if (v.Revision >= 0) sb.Append('.').Append(v.Revision); + if (v.Build >= 0) sb.Append('.').Append(v.Build); + return sb.ToString(); + } + + public Version RedisVersion + { + get; + set + { + if (field == value) return; + field = value; + _versionString = FormatVersion(value); + } } - protected virtual IEnumerable Keys(int database, RedisKey pattern) => throw new NotSupportedException(); + public DateTime StartTime { get; set; } = DateTime.UtcNow; + public ServerType ServerType { get; set; } = ServerType.Standalone; + + private string ModeString => ServerType switch + { + ServerType.Cluster => "cluster", + ServerType.Sentinel => "sentinel", + _ => "standalone", + }; protected virtual void Info(StringBuilder sb, string section) { StringBuilder AddHeader() @@ -376,16 +1129,20 @@ StringBuilder AddHeader() switch (section) { case "Server": - AddHeader().AppendLine("redis_version:1.0") - .AppendLine("redis_mode:standalone") + var v = RedisVersion; + AddHeader().Append("redis_version:").AppendLine(VersionString) + .Append("redis_mode:").Append(ModeString).AppendLine() .Append("os:").Append(Environment.OSVersion).AppendLine() .Append("arch_bits:x").Append(IntPtr.Size * 8).AppendLine(); using (var process = Process.GetCurrentProcess()) { - sb.Append("process:").Append(process.Id).AppendLine(); + sb.Append("process_id:").Append(process.Id).AppendLine(); } - //var port = TcpPort(); - //if (port >= 0) sb.Append("tcp_port:").Append(port).AppendLine(); + var time = DateTime.UtcNow - StartTime; + sb.Append("uptime_in_seconds:").Append((int)time.TotalSeconds).AppendLine(); + sb.Append("uptime_in_days:").Append((int)time.TotalDays).AppendLine(); + // var port = TcpPort(); + // if (port >= 0) sb.Append("tcp_port:").Append(port).AppendLine(); break; case "Clients": AddHeader().Append("connected_clients:").Append(ClientCount).AppendLine(); @@ -402,30 +1159,34 @@ StringBuilder AddHeader() case "Replication": AddHeader().AppendLine("role:master"); break; + case "Cluster": + AddHeader().Append("cluster_enabled:").Append(ServerType is ServerType.Cluster ? 1 : 0).AppendLine(); + break; case "Keyspace": break; } } - [RedisCommand(2, "memory", "purge")] - protected virtual TypedRedisValue MemoryPurge(RedisClient client, RedisRequest request) + + [RedisCommand(2, nameof(RedisCommand.MEMORY), "purge")] + protected virtual TypedRedisValue MemoryPurge(RedisClient client, in RedisRequest request) { GC.Collect(GC.MaxGeneration, GCCollectionMode.Forced); return TypedRedisValue.OK; } [RedisCommand(-2)] - protected virtual TypedRedisValue Mget(RedisClient client, RedisRequest request) + protected virtual TypedRedisValue Mget(RedisClient client, in RedisRequest request) { int argCount = request.Count; - var arr = TypedRedisValue.Rent(argCount - 1, out var span); + var arr = TypedRedisValue.Rent(argCount - 1, out var span, RespPrefix.Map); var db = client.Database; for (int i = 1; i < argCount; i++) { - span[i - 1] = TypedRedisValue.BulkString(Get(db, request.GetKey(i))); + span[i - 1] = TypedRedisValue.BulkString(Get(db, request.GetKey(i, KeyFlags.ReadOnly))); } return arr; } [RedisCommand(-3)] - protected virtual TypedRedisValue Mset(RedisClient client, RedisRequest request) + protected virtual TypedRedisValue Mset(RedisClient client, in RedisRequest request) { int argCount = request.Count; var db = client.Database; @@ -435,88 +1196,59 @@ protected virtual TypedRedisValue Mset(RedisClient client, RedisRequest request) } return TypedRedisValue.OK; } + [RedisCommand(-1, LockFree = true, MaxArgs = 2)] - protected virtual TypedRedisValue Ping(RedisClient client, RedisRequest request) - => TypedRedisValue.SimpleString(request.Count == 1 ? "PONG" : request.GetString(1)); + protected virtual TypedRedisValue Ping(RedisClient client, in RedisRequest request) + { + if (client.IsResp2 & client.IsSubscriber) + { + var reply = TypedRedisValue.Rent(2, out var span, RespPrefix.Array); + span[0] = TypedRedisValue.BulkString("pong"); + RedisValue value = request.Count == 1 ? RedisValue.Null : request.GetValue(1); + span[1] = TypedRedisValue.BulkString(value); + return reply; + } + return TypedRedisValue.SimpleString(request.Count == 1 ? "PONG" : request.GetString(1)); + } [RedisCommand(1, LockFree = true)] - protected virtual TypedRedisValue Quit(RedisClient client, RedisRequest request) + protected virtual TypedRedisValue Quit(RedisClient client, in RedisRequest request) { + client.Complete(); RemoveClient(client); return TypedRedisValue.OK; } [RedisCommand(1, LockFree = true)] - protected virtual TypedRedisValue Role(RedisClient client, RedisRequest request) + protected virtual TypedRedisValue Role(RedisClient client, in RedisRequest request) { - var arr = TypedRedisValue.Rent(3, out var span); + var arr = TypedRedisValue.Rent(3, out var span, RespPrefix.Array); span[0] = TypedRedisValue.BulkString("master"); span[1] = TypedRedisValue.Integer(0); - span[2] = TypedRedisValue.EmptyArray; + span[2] = TypedRedisValue.EmptyArray(RespPrefix.Array); return arr; } [RedisCommand(2, LockFree = true)] - protected virtual TypedRedisValue Select(RedisClient client, RedisRequest request) + protected virtual TypedRedisValue Select(RedisClient client, in RedisRequest request) { var raw = request.GetValue(1); - if (!raw.IsInteger) return TypedRedisValue.Error("ERR invalid DB index"); - int db = (int)raw; + if (!raw.TryParse(out int db)) return TypedRedisValue.Error("ERR invalid DB index"); if (db < 0 || db >= Databases) return TypedRedisValue.Error("ERR DB index is out of range"); client.Database = db; return TypedRedisValue.OK; } - [RedisCommand(-2)] - protected virtual TypedRedisValue Subscribe(RedisClient client, RedisRequest request) - => SubscribeImpl(client, request); - [RedisCommand(-2)] - protected virtual TypedRedisValue Unsubscribe(RedisClient client, RedisRequest request) - => SubscribeImpl(client, request); - - private TypedRedisValue SubscribeImpl(RedisClient client, RedisRequest request) - { - var reply = TypedRedisValue.Rent(3 * (request.Count - 1), out var span); - int index = 0; - request.TryGetCommandBytes(0, out var cmd); - var cmdString = TypedRedisValue.BulkString(cmd.ToArray()); - var mode = cmd[0] == (byte)'p' ? RedisChannel.PatternMode.Pattern : RedisChannel.PatternMode.Literal; - for (int i = 1; i < request.Count; i++) - { - var channel = request.GetChannel(i, mode); - int count; - if (s_Subscribe.Equals(cmd)) - { - count = client.Subscribe(channel); - } - else if (s_Unsubscribe.Equals(cmd)) - { - count = client.Unsubscribe(channel); - } - else - { - reply.Recycle(index); - return TypedRedisValue.Nil; - } - span[index++] = cmdString; - span[index++] = TypedRedisValue.BulkString((byte[])channel); - span[index++] = TypedRedisValue.Integer(count); - } - return reply; - } - private static readonly CommandBytes - s_Subscribe = new CommandBytes("subscribe"), - s_Unsubscribe = new CommandBytes("unsubscribe"); private static readonly DateTime UnixEpoch = new DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc); [RedisCommand(1, LockFree = true)] - protected virtual TypedRedisValue Time(RedisClient client, RedisRequest request) + protected virtual TypedRedisValue Time(RedisClient client, in RedisRequest request) { var delta = Time() - UnixEpoch; var ticks = delta.Ticks; var seconds = ticks / TimeSpan.TicksPerSecond; var micros = (ticks % TimeSpan.TicksPerSecond) / (TimeSpan.TicksPerMillisecond / 1000); - var reply = TypedRedisValue.Rent(2, out var span); + var reply = TypedRedisValue.Rent(2, out var span, RespPrefix.Array); span[0] = TypedRedisValue.BulkString(seconds); span[1] = TypedRedisValue.BulkString(micros); return reply; @@ -524,25 +1256,49 @@ protected virtual TypedRedisValue Time(RedisClient client, RedisRequest request) protected virtual DateTime Time() => DateTime.UtcNow; [RedisCommand(-2)] - protected virtual TypedRedisValue Unlink(RedisClient client, RedisRequest request) + protected virtual TypedRedisValue Unlink(RedisClient client, in RedisRequest request) => Del(client, request); [RedisCommand(2)] - protected virtual TypedRedisValue Incr(RedisClient client, RedisRequest request) + protected virtual TypedRedisValue Incr(RedisClient client, in RedisRequest request) => TypedRedisValue.Integer(IncrBy(client.Database, request.GetKey(1), 1)); [RedisCommand(2)] - protected virtual TypedRedisValue Decr(RedisClient client, RedisRequest request) + protected virtual TypedRedisValue Decr(RedisClient client, in RedisRequest request) => TypedRedisValue.Integer(IncrBy(client.Database, request.GetKey(1), -1)); [RedisCommand(3)] - protected virtual TypedRedisValue IncrBy(RedisClient client, RedisRequest request) + protected virtual TypedRedisValue DecrBy(RedisClient client, in RedisRequest request) + => TypedRedisValue.Integer(IncrBy(client.Database, request.GetKey(1), -request.GetInt64(2))); + + [RedisCommand(3)] + protected virtual TypedRedisValue IncrBy(RedisClient client, in RedisRequest request) => TypedRedisValue.Integer(IncrBy(client.Database, request.GetKey(1), request.GetInt64(2))); - protected virtual long IncrBy(int database, RedisKey key, long delta) + protected virtual long IncrBy(int database, in RedisKey key, long delta) { var value = ((long)Get(database, key)) + delta; Set(database, key, value); return value; } + + public virtual void OnFlush(RedisClient client, int messages, long bytes) + { + } + } + + internal static partial class HelloSubFieldsMetadata + { + [AsciiHash(CaseSensitive = false)] + public static partial bool TryParseCI(ReadOnlySpan command, out HelloSubFields value); + } + + internal enum HelloSubFields + { + [AsciiHash("")] + None = 0, + [AsciiHash("AUTH")] + Auth, + [AsciiHash("SETNAME")] + SetName, } } diff --git a/toys/StackExchange.Redis.Server/RespReaderExtensions.cs b/toys/StackExchange.Redis.Server/RespReaderExtensions.cs new file mode 100644 index 000000000..8ee5f921c --- /dev/null +++ b/toys/StackExchange.Redis.Server/RespReaderExtensions.cs @@ -0,0 +1,213 @@ +#nullable enable +extern alias seredis; +using System; +using System.Diagnostics; +using System.Threading.Tasks; +using RESPite.Messages; + +namespace StackExchange.Redis; // this really belongs in SE.Redis, will be moved in v3 + +internal static class RespReaderExtensions +{ + extension(in RespReader reader) + { + public RedisValue ReadRedisValue() + { + reader.DemandScalar(); + if (reader.IsNull) return RedisValue.Null; + + return reader.Prefix switch + { + RespPrefix.Boolean => reader.ReadBoolean(), + RespPrefix.Integer => reader.ReadInt64(), + _ when reader.TryReadInt64(out var i64) => i64, + _ when reader.TryReadDouble(out var fp64) => fp64, + _ => reader.ReadByteArray(), + }; + } + + public string DebugReadTruncatedString(int maxChars) + { + if (!reader.IsScalar) return ""; + try + { + var s = reader.ReadString() ?? ""; + return s.Length <= maxChars ? s : s.Substring(0, maxChars) + "..."; + } + catch + { + return ""; + } + } + + public RedisKey ReadRedisKey() => (RedisKey)reader.ReadByteArray(); + + public RedisChannel ReadRedisChannel(RedisChannel.RedisChannelOptions options) + => new(reader.ReadByteArray(), options); + + private bool TryGetFirst(out string first) + { + if (reader.IsNonNullAggregate && !reader.AggregateIsEmpty()) + { + var clone = reader.Clone(); + if (clone.TryMoveNext()) + { + unsafe + { + if (clone.IsScalar && + clone.TryParseScalar(&PhysicalConnection.PushKindMetadata.TryParse, out PhysicalConnection.PushKind kind)) + { + first = kind.ToString(); + return true; + } + } + + first = clone.GetOverview(); + return true; + } + } + first = ""; + return false; + } + + public string GetOverview() + { + // return reader.BufferUtf8(); // <== for when you really can't grok what is happening + if (reader.Prefix is RespPrefix.None) + { + var copy = reader; + copy.MovePastBof(); + return copy.Prefix is RespPrefix.None ? "(empty)" : copy.GetOverview(); + } + if (reader.IsNull) return "(null)"; + + return reader.Prefix switch + { + RespPrefix.SimpleString or RespPrefix.Integer or RespPrefix.SimpleError or RespPrefix.Double => $"{reader.Prefix}: {reader.ReadString()}", + RespPrefix.Push when reader.TryGetFirst(out var first) => $"{reader.Prefix} ({first}): {reader.AggregateLength()} items", + _ when reader.IsScalar => $"{reader.Prefix}: {reader.ScalarLength()} bytes, '{reader.DebugReadTruncatedString(16)}'", + _ when reader.IsAggregate => $"{reader.Prefix}: {reader.AggregateLength()} items", + _ => $"(unknown: {reader.Prefix})", + }; + } + + public RespPrefix GetFirstPrefix() + { + var prefix = reader.Prefix; + if (prefix is RespPrefix.None) + { + var mutable = reader; + mutable.MovePastBof(); + prefix = mutable.Prefix; + } + return prefix; + } + + /* + public bool AggregateHasAtLeast(int count) + { + reader.DemandAggregate(); + if (reader.IsNull) return false; + if (reader.IsStreaming) return CheckStreamingAggregateAtLeast(in reader, count); + return reader.AggregateLength() >= count; + + static bool CheckStreamingAggregateAtLeast(in RespReader reader, int count) + { + var iter = reader.AggregateChildren(); + object? attributes = null; + while (count > 0 && iter.MoveNextRaw(null!, ref attributes)) + { + count--; + } + + return count == 0; + } + } + */ + } + + extension(ref RespReader reader) + { + public bool SafeTryMoveNext() => reader.TryMoveNext(checkError: false) & !reader.IsError; + + public void MovePastBof() + { + // if we're at BOF, read the first element, ignoring errors + if (reader.Prefix is RespPrefix.None) reader.SafeTryMoveNext(); + } + + public RedisValue[]? ReadPastRedisValues() + => reader.ReadPastArray(static (ref r) => r.ReadRedisValue(), scalar: true); + + public seredis::StackExchange.Redis.Lease? AsLease() + { + if (!reader.IsScalar) throw new InvalidCastException("Cannot convert to Lease: " + reader.Prefix); + if (reader.IsNull) return null; + + var length = reader.ScalarLength(); + if (length == 0) return seredis::StackExchange.Redis.Lease.Empty; + + var lease = seredis::StackExchange.Redis.Lease.Create(length, clear: false); + if (reader.TryGetSpan(out var span)) + { + span.CopyTo(lease.Span); + } + else + { + var buffer = reader.Buffer(lease.Span); + Debug.Assert(buffer.Length == length, "buffer length mismatch"); + } + return lease; + } + } + + public static RespPrefix GetRespPrefix(ReadOnlySpan frame) + { + var reader = new RespReader(frame); + reader.SafeTryMoveNext(); + return reader.Prefix; + } + + extension(RespPrefix prefix) + { + public ResultType ToResultType() => prefix switch + { + RespPrefix.Array => ResultType.Array, + RespPrefix.Attribute => ResultType.Attribute, + RespPrefix.BigInteger => ResultType.BigInteger, + RespPrefix.Boolean => ResultType.Boolean, + RespPrefix.BulkError => ResultType.BlobError, + RespPrefix.BulkString => ResultType.BulkString, + RespPrefix.SimpleString => ResultType.SimpleString, + RespPrefix.Map => ResultType.Map, + RespPrefix.Set => ResultType.Set, + RespPrefix.Double => ResultType.Double, + RespPrefix.Integer => ResultType.Integer, + RespPrefix.SimpleError => ResultType.Error, + RespPrefix.Null => ResultType.Null, + RespPrefix.VerbatimString => ResultType.VerbatimString, + RespPrefix.Push=> ResultType.Push, + _ => throw new ArgumentOutOfRangeException(nameof(prefix), prefix, null), + }; + } + + extension(T?[] array) where T : class + { + internal bool AnyNull() + { + foreach (var el in array) + { + if (el is null) return true; + } + + return false; + } + } + +#if !NET + extension(Task task) + { + public bool IsCompletedSuccessfully => task.Status is TaskStatus.RanToCompletion; + } +#endif +} diff --git a/toys/StackExchange.Redis.Server/RespServer.cs b/toys/StackExchange.Redis.Server/RespServer.cs index 3edd3f9f3..5826d97f1 100644 --- a/toys/StackExchange.Redis.Server/RespServer.cs +++ b/toys/StackExchange.Redis.Server/RespServer.cs @@ -1,7 +1,8 @@ - -using System; +using System; using System.Buffers; +using System.Collections.Concurrent; using System.Collections.Generic; +using System.Diagnostics; using System.IO; using System.IO.Pipelines; using System.Linq; @@ -11,6 +12,9 @@ using System.Threading.Tasks; using Pipelines.Sockets.Unofficial; using Pipelines.Sockets.Unofficial.Arenas; +using RESPite; +using RESPite.Buffers; +using RESPite.Messages; namespace StackExchange.Redis.Server { @@ -21,7 +25,7 @@ public enum ShutdownReason ServerDisposed, ClientInitiated, } - private readonly List _clients = new List(); + private readonly TextWriter _output; protected RespServer(TextWriter output = null) @@ -30,23 +34,36 @@ protected RespServer(TextWriter output = null) _commands = BuildCommands(this); } - private static Dictionary BuildCommands(RespServer server) + public HashSet GetCommands() { - RedisCommandAttribute CheckSignatureAndGetAttribute(MethodInfo method) + var set = new HashSet(StringComparer.OrdinalIgnoreCase); + foreach (var kvp in _commands) + { + set.Add(kvp.Key.ToString()); + } + return set; + } + + private static Dictionary BuildCommands(RespServer server) + { + static RedisCommandAttribute CheckSignatureAndGetAttribute(MethodInfo method) { if (method.ReturnType != typeof(TypedRedisValue)) return null; var p = method.GetParameters(); - if (p.Length != 2 || p[0].ParameterType != typeof(RedisClient) || p[1].ParameterType != typeof(RedisRequest)) + if (p.Length != 2 || p[0].ParameterType != typeof(RedisClient) || p[1].ParameterType != typeof(RedisRequest).MakeByRefType()) return null; return (RedisCommandAttribute)Attribute.GetCustomAttribute(method, typeof(RedisCommandAttribute)); } - var grouped = from method in server.GetType().GetMethods(BindingFlags.Instance | BindingFlags.Public | BindingFlags.NonPublic) - let attrib = CheckSignatureAndGetAttribute(method) - where attrib != null - select new RespCommand(attrib, method, server) into cmd - group cmd by cmd.Command; - var result = new Dictionary(); + var grouped = ( + from method in server.GetType() + .GetMethods(BindingFlags.Instance | BindingFlags.Public | BindingFlags.NonPublic) + let attrib = CheckSignatureAndGetAttribute(method) + where attrib != null + select new RespCommand(attrib, method, server)) + .GroupBy(x => new AsciiHash(x.Command.ToUpperInvariant()), AsciiHash.CaseSensitiveEqualityComparer); + + var result = new Dictionary(AsciiHash.CaseSensitiveEqualityComparer); foreach (var grp in grouped) { RespCommand parent; @@ -59,7 +76,9 @@ RedisCommandAttribute CheckSignatureAndGetAttribute(MethodInfo method) { parent = grp.Single(); } - result.Add(new CommandBytes(grp.Key), parent); + + Debug.WriteLine($"Registering: {grp.Key}"); + result.Add(grp.Key, parent); } return result; } @@ -70,19 +89,20 @@ public string GetStats() AppendStats(sb); return sb.ToString(); } - protected virtual void AppendStats(StringBuilder sb) - { + + protected virtual void AppendStats(StringBuilder sb) => sb.Append("Current clients:\t").Append(ClientCount).AppendLine() - .Append("Total clients:\t").Append(TotalClientCount).AppendLine() - .Append("Total operations:\t").Append(TotalCommandsProcesed).AppendLine() - .Append("Error replies:\t").Append(TotalErrorCount).AppendLine(); - } + .Append("Total clients:\t").Append(TotalClientCount).AppendLine() + .Append("Total operations:\t").Append(TotalCommandsProcesed).AppendLine() + .Append("Error replies:\t").Append(TotalErrorCount).AppendLine(); [AttributeUsage(AttributeTargets.Method, AllowMultiple = false, Inherited = true)] protected sealed class RedisCommandAttribute : Attribute { - public RedisCommandAttribute(int arity, - string command = null, string subcommand = null) + public RedisCommandAttribute( + int arity, + string command = null, + string subcommand = null) { Command = command; SubCommand = subcommand; @@ -95,22 +115,24 @@ public RedisCommandAttribute(int arity, public int Arity { get; } public bool LockFree { get; set; } } - private readonly Dictionary _commands; + private readonly Dictionary _commands; private readonly struct RespCommand { public RespCommand(RedisCommandAttribute attrib, MethodInfo method, RespServer server) { _operation = (RespOperation)Delegate.CreateDelegate(typeof(RespOperation), server, method); - Command = (string.IsNullOrWhiteSpace(attrib.Command) ? method.Name : attrib.Command).Trim().ToLowerInvariant(); - CommandBytes = new CommandBytes(Command); + + var command = attrib.Command; + if (string.IsNullOrEmpty(command)) command = method.Name; + + Command = command; SubCommand = attrib.SubCommand?.Trim()?.ToLowerInvariant(); Arity = attrib.Arity; MaxArgs = attrib.MaxArgs; LockFree = attrib.LockFree; _subcommands = null; } - private CommandBytes CommandBytes { get; } public string Command { get; } public string SubCommand { get; } public bool IsSubCommand => !string.IsNullOrEmpty(SubCommand); @@ -130,7 +152,6 @@ private RespCommand(in RespCommand parent, RespCommand[] subs) if (subs == null || subs.Length == 0) throw new InvalidOperationException("Cannot add empty sub-commands"); Command = parent.Command; - CommandBytes = parent.CommandBytes; SubCommand = parent.SubCommand; Arity = parent.Arity; MaxArgs = parent.MaxArgs; @@ -139,6 +160,7 @@ private RespCommand(in RespCommand parent, RespCommand[] subs) _subcommands = subs; } public bool IsUnknown => _operation == null; + public RespCommand Resolve(in RedisRequest request) { if (request.Count >= 2) @@ -186,37 +208,55 @@ internal int NetArity() } } - private delegate TypedRedisValue RespOperation(RedisClient client, RedisRequest request); + private delegate TypedRedisValue RespOperation(RedisClient client, in RedisRequest request); // for extensibility, so that a subclass can get their own client type // to be used via ListenForConnections - public virtual RedisClient CreateClient() => new RedisClient(); + public virtual RedisClient CreateClient(RedisServer.Node node) => new(node); + + public virtual void OnClientConnected(RedisClient client, object state) { } - public int ClientCount + public int ClientCount => _clientLookup.Count; + public int TotalClientCount => _totalClientCount; + private int _nextId, _totalClientCount; + + public RedisClient AddClient(RedisServer.Node node, object state) { - get { lock (_clients) { return _clients.Count; } } + var client = CreateClient(node); + client.Id = Interlocked.Increment(ref _nextId); + Interlocked.Increment(ref _totalClientCount); + ThrowIfShutdown(); + _clientLookup[client.Id] = client; + OnClientConnected(client, state); + return client; } - public int TotalClientCount { get; private set; } - private int _nextId; - public RedisClient AddClient() + + protected int ForAllClients(TState state, Func func) { - var client = CreateClient(); - lock (_clients) + int count = 0; + foreach (var client in _clientLookup.Values) { - ThrowIfShutdown(); - client.Id = ++_nextId; - _clients.Add(client); - TotalClientCount++; + count += func(client, state); } - return client; + return count; } + + public bool TryGetClient(int id, out RedisClient client) => _clientLookup.TryGetValue(id, out client); + + private readonly ConcurrentDictionary _clientLookup = new(); + public bool RemoveClient(RedisClient client) { if (client == null) return false; - lock (_clients) + client.Closed = true; + return _clientLookup.TryRemove(client.Id, out _); + } + + protected virtual void Touch(int database, in RedisKey key) + { + foreach (var client in _clientLookup.Values) { - client.Closed = true; - return _clients.Remove(client); + client.Touch(database, key); } } @@ -231,45 +271,69 @@ protected void DoShutdown(ShutdownReason reason) if (_isShutdown) return; Log("Server shutting down..."); _isShutdown = true; - lock (_clients) - { - foreach (var client in _clients) client.Dispose(); - _clients.Clear(); - } + foreach (var client in _clientLookup.Values) client.Dispose(); + _clientLookup.Clear(); _shutdown.TrySetResult(reason); } public Task Shutdown => _shutdown.Task; public void Dispose() => Dispose(true); protected virtual void Dispose(bool disposing) { - _arena.Dispose(); DoShutdown(ShutdownReason.ServerDisposed); } - public async Task RunClientAsync(IDuplexPipe pipe) + private readonly Arena _arena = new(); + + public virtual RedisServer.Node DefaultNode => null; + + public async Task RunClientAsync(IDuplexPipe pipe, RedisServer.Node node = null, object state = null) { Exception fault = null; RedisClient client = null; + byte[] commandLease = RedisRequest.GetLease(); + ReadOnlySequence buffer = default; + bool wasReading = false; try { - client = AddClient(); + node ??= DefaultNode; + client = AddClient(node, state); + OnClientConnected(client, state); + Task output = client.WriteOutputAsync(pipe.Output); while (!client.Closed) { var readResult = await pipe.Input.ReadAsync().ConfigureAwait(false); - var buffer = readResult.Buffer; + buffer = readResult.Buffer; - bool makingProgress = false; - while (!client.Closed && await TryProcessRequestAsync(ref buffer, client, pipe.Output).ConfigureAwait(false)) + wasReading = true; + while (!client.Closed && client.TryReadRequest(buffer, out long consumed)) { - makingProgress = true; - } - pipe.Input.AdvanceTo(buffer.Start, buffer.End); + wasReading = false; + // process a completed request + RedisRequest request = new(buffer.Slice(0, consumed), ref commandLease); + request = request.WithClient(client); + var response = Execute(client, request); + + if (client.ShouldSkipResponse() || response.IsNil) // elective or no-result + { + response.Recycle(); + } + else + { + await client.AddOutboundAsync(response); + } + client.ResetAfterRequest(); - if (!makingProgress && readResult.IsCompleted) - { // nothing to do, and nothing more will be arriving - break; + // advance the buffer to account for the message we just read + buffer = buffer.Slice(consumed); + wasReading = true; } + wasReading = false; + + pipe.Input.AdvanceTo(buffer.Start, buffer.End); + if (readResult.IsCompleted) break; // EOF } + client.Complete(); + await output; } catch (ConnectionResetException) { } catch (ObjectDisposedException) { } @@ -278,11 +342,14 @@ public async Task RunClientAsync(IDuplexPipe pipe) if (ex.GetType().Name != nameof(ConnectionResetException)) { // aspnet core has one too; swallow it by pattern - fault = ex; throw; + fault = ex; + throw; } } finally { + RedisRequest.ReleaseLease(ref commandLease); + client?.Complete(fault); RemoveClient(client); try { pipe.Input.Complete(fault); } catch { } try { pipe.Output.Complete(fault); } catch { } @@ -290,10 +357,40 @@ public async Task RunClientAsync(IDuplexPipe pipe) if (fault != null && !_isShutdown) { Log("Connection faulted (" + fault.GetType().Name + "): " + fault.Message); + if (wasReading) + { + Log("Read fault, buffer: " + GetUtf8String(buffer)); + } } } } - public void Log(string message) + + internal static string GetUtf8String(in ReadOnlySequence buffer) + { + if (buffer.IsEmpty) return "(empty)"; + char[] lease = null; + var maxLen = Encoding.UTF8.GetMaxCharCount(checked((int)buffer.Length)); + Span target = maxLen <= 128 ? stackalloc char[128] : (lease = ArrayPool.Shared.Rent(maxLen)); + int charCount = 0; + if (buffer.IsSingleSegment) + { + charCount = Encoding.UTF8.GetChars(buffer.First.Span, target); + } + else + { + foreach (var segment in buffer) + { + charCount += Encoding.UTF8.GetChars(segment.Span, target.Slice(charCount)); + } + } + const string CR = "\u240D", LF = "\u240A", CRLF = CR + LF; + string s = target.Slice(0, charCount).ToString() + .Replace("\r\n", CRLF).Replace("\r", CR).Replace("\n", LF); + if (lease is not null) ArrayPool.Shared.Return(lease); + return s; + } + + public virtual void Log(string message) { var output = _output; if (output != null) @@ -305,129 +402,48 @@ public void Log(string message) } } - public static async ValueTask WriteResponseAsync(RedisClient client, PipeWriter output, TypedRedisValue value) - { - void WritePrefix(PipeWriter ooutput, char pprefix) - { - var span = ooutput.GetSpan(1); - span[0] = (byte)pprefix; - ooutput.Advance(1); - } + protected object ServerSyncLock => this; - if (value.IsNil) return; // not actually a request (i.e. empty/whitespace request) - if (client != null && client.ShouldSkipResponse()) return; // intentionally skipping the result - char prefix; - switch (value.Type) - { - case ResultType.Integer: - PhysicalConnection.WriteInteger(output, (long)value.AsRedisValue()); - break; - case ResultType.Error: - prefix = '-'; - goto BasicMessage; - case ResultType.SimpleString: - prefix = '+'; - BasicMessage: - WritePrefix(output, prefix); - var val = (string)value.AsRedisValue(); - var expectedLength = Encoding.UTF8.GetByteCount(val); - PhysicalConnection.WriteRaw(output, val, expectedLength); - PhysicalConnection.WriteCrlf(output); - break; - case ResultType.BulkString: - PhysicalConnection.WriteBulkString(value.AsRedisValue(), output); - break; - case ResultType.MultiBulk: - if (value.IsNullArray) - { - PhysicalConnection.WriteMultiBulkHeader(output, -1); - } - else - { - var segment = value.Segment; - PhysicalConnection.WriteMultiBulkHeader(output, segment.Count); - var arr = segment.Array; - int offset = segment.Offset; - for (int i = 0; i < segment.Count; i++) - { - var item = arr[offset++]; - if (item.IsNil) - throw new InvalidOperationException("Array element cannot be nil, index " + i); + private long _totalCommandsProcesed, _totalErrorCount; + public long TotalCommandsProcesed => _totalCommandsProcesed; + public long TotalErrorCount => _totalErrorCount; - // note: don't pass client down; this would impact SkipReplies - await WriteResponseAsync(null, output, item); - } - } - break; - default: - throw new InvalidOperationException( - "Unexpected result type: " + value.Type); - } - await output.FlushAsync().ConfigureAwait(false); + public virtual void ResetCounters() + { + _totalCommandsProcesed = _totalErrorCount = _totalClientCount = 0; } - private static bool TryParseRequest(Arena arena, ref ReadOnlySequence buffer, out RedisRequest request) + public virtual TypedRedisValue OnUnknownCommand(in RedisClient client, in RedisRequest request, ReadOnlySpan command) { - var reader = new BufferReader(buffer); - var raw = PhysicalConnection.TryParseResult(arena, in buffer, ref reader, false, null, true); - if (raw.HasValue) - { - buffer = reader.SliceFromCurrent(); - request = new RedisRequest(raw); - return true; - } - request = default; - - return false; + return request.CommandNotFound(); } - private readonly Arena _arena = new Arena(); - - public ValueTask TryProcessRequestAsync(ref ReadOnlySequence buffer, RedisClient client, PipeWriter output) + public virtual TypedRedisValue Execute(RedisClient client, in RedisRequest request) { - async ValueTask Awaited(ValueTask wwrite, TypedRedisValue rresponse) + if (request.Count == 0 || request.Command.Length == 0) // not a request { - await wwrite; - rresponse.Recycle(); - return true; - } - if (!buffer.IsEmpty && TryParseRequest(_arena, ref buffer, out var request)) - { - TypedRedisValue response; - try { response = Execute(client, request); } - finally { _arena.Reset(); } - - var write = WriteResponseAsync(client, output, response); - if (!write.IsCompletedSuccessfully) return Awaited(write, response); - response.Recycle(); - return new ValueTask(true); + client.ExecAbort(); + return request.CommandNotFound(); } - return new ValueTask(false); - } - - protected object ServerSyncLock => this; - - private long _totalCommandsProcesed, _totalErrorCount; - public long TotalCommandsProcesed => _totalCommandsProcesed; - public long TotalErrorCount => _totalErrorCount; - public TypedRedisValue Execute(RedisClient client, RedisRequest request) - { - if (request.Count == 0) return default;// not a request - - if (!request.TryGetCommandBytes(0, out var cmdBytes)) return request.CommandNotFound(); - if (cmdBytes.Length == 0) return default; // not a request Interlocked.Increment(ref _totalCommandsProcesed); try { TypedRedisValue result; - if (_commands.TryGetValue(cmdBytes, out var cmd)) + if (_commands.TryGetValue(request.Command, out var cmd)) { if (cmd.HasSubCommands) { cmd = cmd.Resolve(request); - if (cmd.IsUnknown) return request.UnknownSubcommandOrArgumentCount(); + if (cmd.IsUnknown) + { + client.ExecAbort(); + return request.UnknownSubcommandOrArgumentCount(); + } } + + if (client.BufferMulti(request, request.Command)) return TypedRedisValue.SimpleString("QUEUED"); + if (cmd.LockFree) { result = cmd.Execute(client, request); @@ -442,16 +458,25 @@ public TypedRedisValue Execute(RedisClient client, RedisRequest request) } else { - result = TypedRedisValue.Nil; + client.ExecAbort(); + result = OnUnknownCommand(client, request, request.Command.Span); } - if (result.IsNil) + if (result.IsError) Interlocked.Increment(ref _totalErrorCount); + return result; + } + catch (KeyMovedException moved) + { + if (GetNode(moved.HashSlot) is { } node) { - Log($"missing command: '{request.GetString(0)}'"); - return request.CommandNotFound(); + OnMoved(client, moved.HashSlot, node); + return TypedRedisValue.Error($"MOVED {moved.HashSlot} {node.Host}:{node.Port}"); } - if (result.Type == ResultType.Error) Interlocked.Increment(ref _totalErrorCount); - return result; + return TypedRedisValue.Error($"ERR key has been migrated from slot {moved.HashSlot}, but the new owner is unknown"); + } + catch (CrossSlotException) + { + return TypedRedisValue.Error("CROSSSLOT Keys in request don't hash to the same slot"); } catch (NotSupportedException) { @@ -463,6 +488,10 @@ public TypedRedisValue Execute(RedisClient client, RedisRequest request) Log($"missing command: '{request.GetString(0)}'"); return request.CommandNotFound(); } + catch (WrongTypeException) + { + return TypedRedisValue.Error("WRONGTYPE Operation against a key holding the wrong kind of value"); + } catch (InvalidCastException) { return TypedRedisValue.Error("WRONGTYPE Operation against a key holding the wrong kind of value"); @@ -474,41 +503,64 @@ public TypedRedisValue Execute(RedisClient client, RedisRequest request) } } + protected virtual void OnMoved(RedisClient client, int hashSlot, RedisServer.Node node) + { + } + + protected virtual RedisServer.Node GetNode(int hashSlot) => null; + + public sealed class KeyMovedException : Exception + { + private KeyMovedException(int hashSlot) => HashSlot = hashSlot; + public int HashSlot { get; } + public static void Throw(int hashSlot) => throw new KeyMovedException(hashSlot); + public static void Throw(in RedisKey key) => throw new KeyMovedException(GetHashSlot(key)); + } + + public sealed class WrongTypeException : Exception + { + } + + protected internal static int GetHashSlot(in RedisKey key) => s_ClusterSelectionStrategy.HashSlot(key); + private static readonly ServerSelectionStrategy s_ClusterSelectionStrategy = new(null) { ServerType = ServerType.Cluster }; + + /* internal static string ToLower(in RawResult value) { var val = value.GetString(); if (string.IsNullOrWhiteSpace(val)) return val; return val.ToLowerInvariant(); } + */ [RedisCommand(1, LockFree = true)] - protected virtual TypedRedisValue Command(RedisClient client, RedisRequest request) + protected virtual TypedRedisValue Command(RedisClient client, in RedisRequest request) { - var results = TypedRedisValue.Rent(_commands.Count, out var span); + var results = TypedRedisValue.Rent(_commands.Count, out var span, RespPrefix.Array); int index = 0; foreach (var pair in _commands) span[index++] = CommandInfo(pair.Value); return results; } - [RedisCommand(-2, "command", "info", LockFree = true)] - protected virtual TypedRedisValue CommandInfo(RedisClient client, RedisRequest request) + [RedisCommand(-2, nameof(RedisCommand.COMMAND), "info", LockFree = true)] + protected virtual TypedRedisValue CommandInfo(RedisClient client, in RedisRequest request) { - var results = TypedRedisValue.Rent(request.Count - 2, out var span); + var results = TypedRedisValue.Rent(request.Count - 2, out var span, RespPrefix.Array); for (int i = 2; i < request.Count; i++) { - span[i - 2] = request.TryGetCommandBytes(i, out var cmdBytes) - &&_commands.TryGetValue(cmdBytes, out var cmdInfo) - ? CommandInfo(cmdInfo) : TypedRedisValue.NullArray; + span[i - 2] = _commands.TryGetValue(request.Command, out var cmdInfo) + ? CommandInfo(cmdInfo) : TypedRedisValue.NullArray(RespPrefix.Array); } return results; } - private TypedRedisValue CommandInfo(RespCommand command) + + private TypedRedisValue CommandInfo(in RespCommand command) { - var arr = TypedRedisValue.Rent(6, out var span); + var arr = TypedRedisValue.Rent(6, out var span, RespPrefix.Array); span[0] = TypedRedisValue.BulkString(command.Command); span[1] = TypedRedisValue.Integer(command.NetArity()); - span[2] = TypedRedisValue.EmptyArray; + span[2] = TypedRedisValue.EmptyArray(RespPrefix.Array); span[3] = TypedRedisValue.Zero; span[4] = TypedRedisValue.Zero; span[5] = TypedRedisValue.Zero; diff --git a/toys/StackExchange.Redis.Server/StackExchange.Redis.Server.csproj b/toys/StackExchange.Redis.Server/StackExchange.Redis.Server.csproj index b1c16a9c1..68d690497 100644 --- a/toys/StackExchange.Redis.Server/StackExchange.Redis.Server.csproj +++ b/toys/StackExchange.Redis.Server/StackExchange.Redis.Server.csproj @@ -1,7 +1,7 @@  - netstandard2.0 + netstandard2.0;net10.0 Basic redis server based on StackExchange.Redis StackExchange.Redis StackExchange.Redis.Server @@ -9,11 +9,11 @@ Server;Async;Redis;Cache;PubSub;Messaging Library true - true $(NoWarn);CS1591 - - + + + diff --git a/toys/StackExchange.Redis.Server/TypedRedisValue.cs b/toys/StackExchange.Redis.Server/TypedRedisValue.cs index e6d27110c..0493399ac 100644 --- a/toys/StackExchange.Redis.Server/TypedRedisValue.cs +++ b/toys/StackExchange.Redis.Server/TypedRedisValue.cs @@ -1,6 +1,7 @@ using System; using System.Buffers; using System.Collections.Generic; +using RESPite.Messages; namespace StackExchange.Redis { @@ -11,47 +12,49 @@ public readonly struct TypedRedisValue { // note: if this ever becomes exposed on the public API, it should be made so that it clears; // can't trust external callers to clear the space, and using recycle without that is dangerous - internal static TypedRedisValue Rent(int count, out Span span) + internal static TypedRedisValue Rent(int count, out Span span, RespPrefix type) { if (count == 0) { span = default; - return EmptyArray; + return EmptyArray(type); } - var arr = ArrayPool.Shared.Rent(count); + + var arr = ArrayPool.Shared.Rent(count); // new TypedRedisValue[count]; span = new Span(arr, 0, count); - return new TypedRedisValue(arr, count); + return new TypedRedisValue(arr, count, type); } /// /// An invalid empty value that has no type. /// public static TypedRedisValue Nil => default; + /// /// Returns whether this value is an invalid empty value. /// - public bool IsNil => Type == ResultType.None; + public bool IsNil => Type == RespPrefix.None; /// /// Returns whether this value represents a null array. /// - public bool IsNullArray => Type == ResultType.MultiBulk && _value.DirectObject == null; + public bool IsNullArray => IsAggregate && _value.IsNull; private readonly RedisValue _value; /// /// The type of value being represented. /// - public ResultType Type { get; } + public RespPrefix Type { get; } /// /// Initialize a TypedRedisValue from a value and optionally a type. /// /// The value to initialize. /// The type of . - private TypedRedisValue(RedisValue value, ResultType? type = null) + private TypedRedisValue(RedisValue value, RespPrefix? type = null) { - Type = type ?? (value.IsInteger ? ResultType.Integer : ResultType.BulkString); + Type = type ?? (value.IsInteger ? RespPrefix.Integer : RespPrefix.BulkString); _value = value; } @@ -60,65 +63,61 @@ private TypedRedisValue(RedisValue value, ResultType? type = null) /// /// The error message. public static TypedRedisValue Error(string value) - => new TypedRedisValue(value, ResultType.Error); + => new TypedRedisValue(value, RespPrefix.SimpleError); /// /// Initialize a TypedRedisValue that represents a simple string. /// /// The string value. public static TypedRedisValue SimpleString(string value) - => new TypedRedisValue(value, ResultType.SimpleString); + => new TypedRedisValue(value, RespPrefix.SimpleString); /// - /// The simple string OK + /// The simple string OK. /// public static TypedRedisValue OK { get; } = SimpleString("OK"); + internal static TypedRedisValue Zero { get; } = Integer(0); internal static TypedRedisValue One { get; } = Integer(1); - internal static TypedRedisValue NullArray { get; } = new TypedRedisValue((TypedRedisValue[])null, 0); - internal static TypedRedisValue EmptyArray { get; } = new TypedRedisValue(Array.Empty(), 0); + internal static TypedRedisValue NullArray(RespPrefix type) => new TypedRedisValue((TypedRedisValue[])null, 0, type); + internal static TypedRedisValue EmptyArray(RespPrefix type) => new TypedRedisValue([], 0, type); /// - /// Gets the array elements as a span + /// Gets the array elements as a span. /// public ReadOnlySpan Span { get { - if (Type != ResultType.MultiBulk) return default; - var arr = (TypedRedisValue[])_value.DirectObject; - if (arr == null) return default; - var length = (int)_value.DirectOverlappedBits64; - return new ReadOnlySpan(arr, 0, length); - } - } - public ArraySegment Segment - { - get - { - if (Type != ResultType.MultiBulk) return default; - var arr = (TypedRedisValue[])_value.DirectObject; - if (arr == null) return default; - var length = (int)_value.DirectOverlappedBits64; - return new ArraySegment(arr, 0, length); + if (_value.TryGetForeign(out var arr, out int index, out var length)) + { + return arr.AsSpan(index, length); + } + + return default; } } + public bool IsAggregate => Type is RespPrefix.Array or RespPrefix.Set or RespPrefix.Map or RespPrefix.Push or RespPrefix.Attribute; + + public bool IsNullValueOrArray => IsAggregate ? IsNullArray : _value.IsNull; + public bool IsError => Type is RespPrefix.SimpleError or RespPrefix.BulkError; + /// /// Initialize a that represents an integer. /// /// The value to initialize from. public static TypedRedisValue Integer(long value) - => new TypedRedisValue(value, ResultType.Integer); + => new TypedRedisValue(value, RespPrefix.Integer); /// /// Initialize a from a . /// /// The items to intialize a value from. - public static TypedRedisValue MultiBulk(ReadOnlySpan items) + public static TypedRedisValue MultiBulk(ReadOnlySpan items, RespPrefix type) { - if (items.IsEmpty) return EmptyArray; - var result = Rent(items.Length, out var span); + if (items.IsEmpty) return EmptyArray(type); + var result = Rent(items.Length, out var span, type); items.CopyTo(span); return result; } @@ -127,14 +126,19 @@ public static TypedRedisValue MultiBulk(ReadOnlySpan items) /// Initialize a from a collection. /// /// The items to intialize a value from. - public static TypedRedisValue MultiBulk(ICollection items) + public static TypedRedisValue MultiBulk(ICollection items, RespPrefix type) { - if (items == null) return NullArray; + if (items == null) return NullArray(type); int count = items.Count; - if (count == 0) return EmptyArray; - var arr = ArrayPool.Shared.Rent(count); - items.CopyTo(arr, 0); - return new TypedRedisValue(arr, count); + if (count == 0) return EmptyArray(type); + var result = Rent(count, out var span, type); + int i = 0; + foreach (var item in items) + { + span[i++] = item; + } + + return result; } /// @@ -142,57 +146,68 @@ public static TypedRedisValue MultiBulk(ICollection items) /// /// The value to initialize from. public static TypedRedisValue BulkString(RedisValue value) - => new TypedRedisValue(value, ResultType.BulkString); + => new TypedRedisValue(value, RespPrefix.BulkString); + + /// + /// Initialize a that represents a bulk string. + /// + /// The value to initialize from. + public static TypedRedisValue BulkString(in RedisChannel value) + => new TypedRedisValue((byte[])value, RespPrefix.BulkString); - private TypedRedisValue(TypedRedisValue[] oversizedItems, int count) + private TypedRedisValue(TypedRedisValue[] oversizedItems, int count, RespPrefix type) { if (oversizedItems == null) { if (count != 0) throw new ArgumentOutOfRangeException(nameof(count)); + oversizedItems = []; } else { if (count < 0 || count > oversizedItems.Length) throw new ArgumentOutOfRangeException(nameof(count)); - if (count == 0) oversizedItems = Array.Empty(); + if (count == 0) oversizedItems = []; } - _value = new RedisValue(oversizedItems, count); - Type = ResultType.MultiBulk; + + _value = RedisValue.CreateForeign(oversizedItems, 0, count); + Type = type; } internal void Recycle(int limit = -1) { - if (_value.DirectObject is TypedRedisValue[] arr) + if (_value.TryGetForeign(out var arr, out var index, out var length)) { - if (limit < 0) limit = (int)_value.DirectOverlappedBits64; - for (int i = 0; i < limit; i++) + if (limit < 0) limit = length; + var span = arr.AsSpan(index, limit); + foreach (ref readonly TypedRedisValue el in span) { - arr[i].Recycle(); + el.Recycle(); } - ArrayPool.Shared.Return(arr, clearArray: false); + span.Clear(); + ArrayPool.Shared.Return(arr, clearArray: false); // we did it ourselves } } /// /// Get the underlying assuming that it is a valid type with a meaningful value. /// - internal RedisValue AsRedisValue() => Type == ResultType.MultiBulk ? default :_value; + public RedisValue AsRedisValue() => IsAggregate ? default : _value; /// /// Obtain the value as a string. /// public override string ToString() { + if (IsAggregate) return $"{Type}:[{Span.Length}]"; + switch (Type) { - case ResultType.BulkString: - case ResultType.SimpleString: - case ResultType.Integer: - case ResultType.Error: + case RespPrefix.BulkString: + case RespPrefix.SimpleString: + case RespPrefix.Integer: + case RespPrefix.SimpleError: return $"{Type}:{_value}"; - case ResultType.MultiBulk: - return $"{Type}:[{Span.Length}]"; default: - return Type.ToString(); + return IsAggregate ? $"{Type}:[{Span.Length}]" : Type.ToString(); } } diff --git a/toys/TestConsole/TestConsole.csproj b/toys/TestConsole/TestConsole.csproj index 76826ee4e..674ec5cfe 100644 --- a/toys/TestConsole/TestConsole.csproj +++ b/toys/TestConsole/TestConsole.csproj @@ -2,7 +2,7 @@ Exe - netcoreapp3.1;net472 + net8.0;net472 SEV2 true diff --git a/toys/TestConsoleBaseline/TestConsoleBaseline.csproj b/toys/TestConsoleBaseline/TestConsoleBaseline.csproj index 66fa8a8be..10d5ab321 100644 --- a/toys/TestConsoleBaseline/TestConsoleBaseline.csproj +++ b/toys/TestConsoleBaseline/TestConsoleBaseline.csproj @@ -2,7 +2,7 @@ Exe - netcoreapp2.1;net461;net462;net47;net472 + net8.0;net461;net462;net47;net472 @@ -14,6 +14,6 @@ - + diff --git a/version.json b/version.json index 4d664c308..c2ded472b 100644 --- a/version.json +++ b/version.json @@ -1,6 +1,6 @@ { - "version": "2.2", - "versionHeightOffset": -1, + "version": "2.12", + "versionHeightOffset": 0, "assemblyVersion": "2.0", "publicReleaseRefSpec": [ "^refs/heads/main$", @@ -15,4 +15,4 @@ "setVersionVariables": true } } -} \ No newline at end of file +}